Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- omnilmm/lib/python3.10/asyncio/__init__.py +43 -0
- omnilmm/lib/python3.10/asyncio/__main__.py +127 -0
- omnilmm/lib/python3.10/asyncio/base_events.py +1934 -0
- omnilmm/lib/python3.10/asyncio/base_futures.py +80 -0
- omnilmm/lib/python3.10/asyncio/base_tasks.py +85 -0
- omnilmm/lib/python3.10/asyncio/constants.py +27 -0
- omnilmm/lib/python3.10/asyncio/coroutines.py +269 -0
- omnilmm/lib/python3.10/asyncio/events.py +819 -0
- omnilmm/lib/python3.10/asyncio/exceptions.py +58 -0
- omnilmm/lib/python3.10/asyncio/format_helpers.py +76 -0
- omnilmm/lib/python3.10/asyncio/futures.py +426 -0
- omnilmm/lib/python3.10/asyncio/locks.py +438 -0
- omnilmm/lib/python3.10/asyncio/log.py +7 -0
- omnilmm/lib/python3.10/asyncio/mixins.py +31 -0
- omnilmm/lib/python3.10/asyncio/protocols.py +216 -0
- omnilmm/lib/python3.10/asyncio/runners.py +73 -0
- omnilmm/lib/python3.10/asyncio/selector_events.py +1105 -0
- omnilmm/lib/python3.10/asyncio/sslproto.py +739 -0
- omnilmm/lib/python3.10/asyncio/subprocess.py +223 -0
- omnilmm/lib/python3.10/asyncio/tasks.py +946 -0
- omnilmm/lib/python3.10/asyncio/threads.py +25 -0
- omnilmm/lib/python3.10/asyncio/transports.py +335 -0
- omnilmm/lib/python3.10/asyncio/trsock.py +206 -0
- omnilmm/lib/python3.10/asyncio/unix_events.py +1466 -0
- omnilmm/lib/python3.10/asyncio/windows_events.py +924 -0
- omnilmm/lib/python3.10/asyncio/windows_utils.py +173 -0
- omnilmm/lib/python3.10/site-packages/lxml/__pycache__/ElementInclude.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/__pycache__/__init__.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/__pycache__/builder.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/__pycache__/doctestcompare.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/__pycache__/pyclasslookup.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/__pycache__/sax.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/ElementSoup.py +10 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/__init__.py +1923 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/ElementSoup.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/_diffcommand.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/_html5builder.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/_setmixin.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/builder.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/clean.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/defs.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/diff.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/formfill.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/html5parser.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/soupparser.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/usedoctest.cpython-310.pyc +0 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/_diffcommand.py +86 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/_html5builder.py +100 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/_setmixin.py +56 -0
- omnilmm/lib/python3.10/site-packages/lxml/html/builder.py +133 -0
omnilmm/lib/python3.10/asyncio/__init__.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""The asyncio package, tracking PEP 3156."""
|
| 2 |
+
|
| 3 |
+
# flake8: noqa
|
| 4 |
+
|
| 5 |
+
import sys
|
| 6 |
+
|
| 7 |
+
# This relies on each of the submodules having an __all__ variable.
|
| 8 |
+
from .base_events import *
|
| 9 |
+
from .coroutines import *
|
| 10 |
+
from .events import *
|
| 11 |
+
from .exceptions import *
|
| 12 |
+
from .futures import *
|
| 13 |
+
from .locks import *
|
| 14 |
+
from .protocols import *
|
| 15 |
+
from .runners import *
|
| 16 |
+
from .queues import *
|
| 17 |
+
from .streams import *
|
| 18 |
+
from .subprocess import *
|
| 19 |
+
from .tasks import *
|
| 20 |
+
from .threads import *
|
| 21 |
+
from .transports import *
|
| 22 |
+
|
| 23 |
+
__all__ = (base_events.__all__ +
|
| 24 |
+
coroutines.__all__ +
|
| 25 |
+
events.__all__ +
|
| 26 |
+
exceptions.__all__ +
|
| 27 |
+
futures.__all__ +
|
| 28 |
+
locks.__all__ +
|
| 29 |
+
protocols.__all__ +
|
| 30 |
+
runners.__all__ +
|
| 31 |
+
queues.__all__ +
|
| 32 |
+
streams.__all__ +
|
| 33 |
+
subprocess.__all__ +
|
| 34 |
+
tasks.__all__ +
|
| 35 |
+
threads.__all__ +
|
| 36 |
+
transports.__all__)
|
| 37 |
+
|
| 38 |
+
if sys.platform == 'win32': # pragma: no cover
|
| 39 |
+
from .windows_events import *
|
| 40 |
+
__all__ += windows_events.__all__
|
| 41 |
+
else:
|
| 42 |
+
from .unix_events import * # pragma: no cover
|
| 43 |
+
__all__ += unix_events.__all__
|
omnilmm/lib/python3.10/asyncio/__main__.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ast
|
| 2 |
+
import asyncio
|
| 3 |
+
import code
|
| 4 |
+
import concurrent.futures
|
| 5 |
+
import inspect
|
| 6 |
+
import sys
|
| 7 |
+
import threading
|
| 8 |
+
import types
|
| 9 |
+
import warnings
|
| 10 |
+
|
| 11 |
+
from . import futures
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class AsyncIOInteractiveConsole(code.InteractiveConsole):
|
| 15 |
+
|
| 16 |
+
def __init__(self, locals, loop):
|
| 17 |
+
super().__init__(locals)
|
| 18 |
+
self.compile.compiler.flags |= ast.PyCF_ALLOW_TOP_LEVEL_AWAIT
|
| 19 |
+
|
| 20 |
+
self.loop = loop
|
| 21 |
+
|
| 22 |
+
def runcode(self, code):
|
| 23 |
+
future = concurrent.futures.Future()
|
| 24 |
+
|
| 25 |
+
def callback():
|
| 26 |
+
global repl_future
|
| 27 |
+
global repl_future_interrupted
|
| 28 |
+
|
| 29 |
+
repl_future = None
|
| 30 |
+
repl_future_interrupted = False
|
| 31 |
+
|
| 32 |
+
func = types.FunctionType(code, self.locals)
|
| 33 |
+
try:
|
| 34 |
+
coro = func()
|
| 35 |
+
except SystemExit:
|
| 36 |
+
raise
|
| 37 |
+
except KeyboardInterrupt as ex:
|
| 38 |
+
repl_future_interrupted = True
|
| 39 |
+
future.set_exception(ex)
|
| 40 |
+
return
|
| 41 |
+
except BaseException as ex:
|
| 42 |
+
future.set_exception(ex)
|
| 43 |
+
return
|
| 44 |
+
|
| 45 |
+
if not inspect.iscoroutine(coro):
|
| 46 |
+
future.set_result(coro)
|
| 47 |
+
return
|
| 48 |
+
|
| 49 |
+
try:
|
| 50 |
+
repl_future = self.loop.create_task(coro)
|
| 51 |
+
futures._chain_future(repl_future, future)
|
| 52 |
+
except BaseException as exc:
|
| 53 |
+
future.set_exception(exc)
|
| 54 |
+
|
| 55 |
+
loop.call_soon_threadsafe(callback)
|
| 56 |
+
|
| 57 |
+
try:
|
| 58 |
+
return future.result()
|
| 59 |
+
except SystemExit:
|
| 60 |
+
raise
|
| 61 |
+
except BaseException:
|
| 62 |
+
if repl_future_interrupted:
|
| 63 |
+
self.write("\nKeyboardInterrupt\n")
|
| 64 |
+
else:
|
| 65 |
+
self.showtraceback()
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class REPLThread(threading.Thread):
|
| 69 |
+
|
| 70 |
+
def run(self):
|
| 71 |
+
try:
|
| 72 |
+
banner = (
|
| 73 |
+
f'asyncio REPL {sys.version} on {sys.platform}\n'
|
| 74 |
+
f'Use "await" directly instead of "asyncio.run()".\n'
|
| 75 |
+
f'Type "help", "copyright", "credits" or "license" '
|
| 76 |
+
f'for more information.\n'
|
| 77 |
+
f'{getattr(sys, "ps1", ">>> ")}import asyncio'
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
console.interact(
|
| 81 |
+
banner=banner,
|
| 82 |
+
exitmsg='exiting asyncio REPL...')
|
| 83 |
+
finally:
|
| 84 |
+
warnings.filterwarnings(
|
| 85 |
+
'ignore',
|
| 86 |
+
message=r'^coroutine .* was never awaited$',
|
| 87 |
+
category=RuntimeWarning)
|
| 88 |
+
|
| 89 |
+
loop.call_soon_threadsafe(loop.stop)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
if __name__ == '__main__':
|
| 93 |
+
sys.audit("cpython.run_stdin")
|
| 94 |
+
|
| 95 |
+
loop = asyncio.new_event_loop()
|
| 96 |
+
asyncio.set_event_loop(loop)
|
| 97 |
+
|
| 98 |
+
repl_locals = {'asyncio': asyncio}
|
| 99 |
+
for key in {'__name__', '__package__',
|
| 100 |
+
'__loader__', '__spec__',
|
| 101 |
+
'__builtins__', '__file__'}:
|
| 102 |
+
repl_locals[key] = locals()[key]
|
| 103 |
+
|
| 104 |
+
console = AsyncIOInteractiveConsole(repl_locals, loop)
|
| 105 |
+
|
| 106 |
+
repl_future = None
|
| 107 |
+
repl_future_interrupted = False
|
| 108 |
+
|
| 109 |
+
try:
|
| 110 |
+
import readline # NoQA
|
| 111 |
+
except ImportError:
|
| 112 |
+
pass
|
| 113 |
+
|
| 114 |
+
repl_thread = REPLThread()
|
| 115 |
+
repl_thread.daemon = True
|
| 116 |
+
repl_thread.start()
|
| 117 |
+
|
| 118 |
+
while True:
|
| 119 |
+
try:
|
| 120 |
+
loop.run_forever()
|
| 121 |
+
except KeyboardInterrupt:
|
| 122 |
+
if repl_future and not repl_future.done():
|
| 123 |
+
repl_future.cancel()
|
| 124 |
+
repl_future_interrupted = True
|
| 125 |
+
continue
|
| 126 |
+
else:
|
| 127 |
+
break
|
omnilmm/lib/python3.10/asyncio/base_events.py
ADDED
|
@@ -0,0 +1,1934 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Base implementation of event loop.
|
| 2 |
+
|
| 3 |
+
The event loop can be broken up into a multiplexer (the part
|
| 4 |
+
responsible for notifying us of I/O events) and the event loop proper,
|
| 5 |
+
which wraps a multiplexer with functionality for scheduling callbacks,
|
| 6 |
+
immediately or at a given time in the future.
|
| 7 |
+
|
| 8 |
+
Whenever a public API takes a callback, subsequent positional
|
| 9 |
+
arguments will be passed to the callback if/when it is called. This
|
| 10 |
+
avoids the proliferation of trivial lambdas implementing closures.
|
| 11 |
+
Keyword arguments for the callback are not supported; this is a
|
| 12 |
+
conscious design decision, leaving the door open for keyword arguments
|
| 13 |
+
to modify the meaning of the API call itself.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import collections
|
| 17 |
+
import collections.abc
|
| 18 |
+
import concurrent.futures
|
| 19 |
+
import functools
|
| 20 |
+
import heapq
|
| 21 |
+
import itertools
|
| 22 |
+
import os
|
| 23 |
+
import socket
|
| 24 |
+
import stat
|
| 25 |
+
import subprocess
|
| 26 |
+
import threading
|
| 27 |
+
import time
|
| 28 |
+
import traceback
|
| 29 |
+
import sys
|
| 30 |
+
import warnings
|
| 31 |
+
import weakref
|
| 32 |
+
|
| 33 |
+
try:
|
| 34 |
+
import ssl
|
| 35 |
+
except ImportError: # pragma: no cover
|
| 36 |
+
ssl = None
|
| 37 |
+
|
| 38 |
+
from . import constants
|
| 39 |
+
from . import coroutines
|
| 40 |
+
from . import events
|
| 41 |
+
from . import exceptions
|
| 42 |
+
from . import futures
|
| 43 |
+
from . import protocols
|
| 44 |
+
from . import sslproto
|
| 45 |
+
from . import staggered
|
| 46 |
+
from . import tasks
|
| 47 |
+
from . import transports
|
| 48 |
+
from . import trsock
|
| 49 |
+
from .log import logger
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
__all__ = 'BaseEventLoop','Server',
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
# Minimum number of _scheduled timer handles before cleanup of
|
| 56 |
+
# cancelled handles is performed.
|
| 57 |
+
_MIN_SCHEDULED_TIMER_HANDLES = 100
|
| 58 |
+
|
| 59 |
+
# Minimum fraction of _scheduled timer handles that are cancelled
|
| 60 |
+
# before cleanup of cancelled handles is performed.
|
| 61 |
+
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
|
| 65 |
+
|
| 66 |
+
# Maximum timeout passed to select to avoid OS limitations
|
| 67 |
+
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
|
| 68 |
+
|
| 69 |
+
# Used for deprecation and removal of `loop.create_datagram_endpoint()`'s
|
| 70 |
+
# *reuse_address* parameter
|
| 71 |
+
_unset = object()
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def _format_handle(handle):
|
| 75 |
+
cb = handle._callback
|
| 76 |
+
if isinstance(getattr(cb, '__self__', None), tasks.Task):
|
| 77 |
+
# format the task
|
| 78 |
+
return repr(cb.__self__)
|
| 79 |
+
else:
|
| 80 |
+
return str(handle)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def _format_pipe(fd):
|
| 84 |
+
if fd == subprocess.PIPE:
|
| 85 |
+
return '<pipe>'
|
| 86 |
+
elif fd == subprocess.STDOUT:
|
| 87 |
+
return '<stdout>'
|
| 88 |
+
else:
|
| 89 |
+
return repr(fd)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def _set_reuseport(sock):
|
| 93 |
+
if not hasattr(socket, 'SO_REUSEPORT'):
|
| 94 |
+
raise ValueError('reuse_port not supported by socket module')
|
| 95 |
+
else:
|
| 96 |
+
try:
|
| 97 |
+
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
| 98 |
+
except OSError:
|
| 99 |
+
raise ValueError('reuse_port not supported by socket module, '
|
| 100 |
+
'SO_REUSEPORT defined but not implemented.')
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
|
| 104 |
+
# Try to skip getaddrinfo if "host" is already an IP. Users might have
|
| 105 |
+
# handled name resolution in their own code and pass in resolved IPs.
|
| 106 |
+
if not hasattr(socket, 'inet_pton'):
|
| 107 |
+
return
|
| 108 |
+
|
| 109 |
+
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
|
| 110 |
+
host is None:
|
| 111 |
+
return None
|
| 112 |
+
|
| 113 |
+
if type == socket.SOCK_STREAM:
|
| 114 |
+
proto = socket.IPPROTO_TCP
|
| 115 |
+
elif type == socket.SOCK_DGRAM:
|
| 116 |
+
proto = socket.IPPROTO_UDP
|
| 117 |
+
else:
|
| 118 |
+
return None
|
| 119 |
+
|
| 120 |
+
if port is None:
|
| 121 |
+
port = 0
|
| 122 |
+
elif isinstance(port, bytes) and port == b'':
|
| 123 |
+
port = 0
|
| 124 |
+
elif isinstance(port, str) and port == '':
|
| 125 |
+
port = 0
|
| 126 |
+
else:
|
| 127 |
+
# If port's a service name like "http", don't skip getaddrinfo.
|
| 128 |
+
try:
|
| 129 |
+
port = int(port)
|
| 130 |
+
except (TypeError, ValueError):
|
| 131 |
+
return None
|
| 132 |
+
|
| 133 |
+
if family == socket.AF_UNSPEC:
|
| 134 |
+
afs = [socket.AF_INET]
|
| 135 |
+
if _HAS_IPv6:
|
| 136 |
+
afs.append(socket.AF_INET6)
|
| 137 |
+
else:
|
| 138 |
+
afs = [family]
|
| 139 |
+
|
| 140 |
+
if isinstance(host, bytes):
|
| 141 |
+
host = host.decode('idna')
|
| 142 |
+
if '%' in host:
|
| 143 |
+
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
|
| 144 |
+
# like '::1%lo0'.
|
| 145 |
+
return None
|
| 146 |
+
|
| 147 |
+
for af in afs:
|
| 148 |
+
try:
|
| 149 |
+
socket.inet_pton(af, host)
|
| 150 |
+
# The host has already been resolved.
|
| 151 |
+
if _HAS_IPv6 and af == socket.AF_INET6:
|
| 152 |
+
return af, type, proto, '', (host, port, flowinfo, scopeid)
|
| 153 |
+
else:
|
| 154 |
+
return af, type, proto, '', (host, port)
|
| 155 |
+
except OSError:
|
| 156 |
+
pass
|
| 157 |
+
|
| 158 |
+
# "host" is not an IP address.
|
| 159 |
+
return None
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def _interleave_addrinfos(addrinfos, first_address_family_count=1):
|
| 163 |
+
"""Interleave list of addrinfo tuples by family."""
|
| 164 |
+
# Group addresses by family
|
| 165 |
+
addrinfos_by_family = collections.OrderedDict()
|
| 166 |
+
for addr in addrinfos:
|
| 167 |
+
family = addr[0]
|
| 168 |
+
if family not in addrinfos_by_family:
|
| 169 |
+
addrinfos_by_family[family] = []
|
| 170 |
+
addrinfos_by_family[family].append(addr)
|
| 171 |
+
addrinfos_lists = list(addrinfos_by_family.values())
|
| 172 |
+
|
| 173 |
+
reordered = []
|
| 174 |
+
if first_address_family_count > 1:
|
| 175 |
+
reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
|
| 176 |
+
del addrinfos_lists[0][:first_address_family_count - 1]
|
| 177 |
+
reordered.extend(
|
| 178 |
+
a for a in itertools.chain.from_iterable(
|
| 179 |
+
itertools.zip_longest(*addrinfos_lists)
|
| 180 |
+
) if a is not None)
|
| 181 |
+
return reordered
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def _run_until_complete_cb(fut):
|
| 185 |
+
if not fut.cancelled():
|
| 186 |
+
exc = fut.exception()
|
| 187 |
+
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
|
| 188 |
+
# Issue #22429: run_forever() already finished, no need to
|
| 189 |
+
# stop it.
|
| 190 |
+
return
|
| 191 |
+
futures._get_loop(fut).stop()
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
if hasattr(socket, 'TCP_NODELAY'):
|
| 195 |
+
def _set_nodelay(sock):
|
| 196 |
+
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
|
| 197 |
+
sock.type == socket.SOCK_STREAM and
|
| 198 |
+
sock.proto == socket.IPPROTO_TCP):
|
| 199 |
+
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
| 200 |
+
else:
|
| 201 |
+
def _set_nodelay(sock):
|
| 202 |
+
pass
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def _check_ssl_socket(sock):
|
| 206 |
+
if ssl is not None and isinstance(sock, ssl.SSLSocket):
|
| 207 |
+
raise TypeError("Socket cannot be of type SSLSocket")
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
class _SendfileFallbackProtocol(protocols.Protocol):
|
| 211 |
+
def __init__(self, transp):
|
| 212 |
+
if not isinstance(transp, transports._FlowControlMixin):
|
| 213 |
+
raise TypeError("transport should be _FlowControlMixin instance")
|
| 214 |
+
self._transport = transp
|
| 215 |
+
self._proto = transp.get_protocol()
|
| 216 |
+
self._should_resume_reading = transp.is_reading()
|
| 217 |
+
self._should_resume_writing = transp._protocol_paused
|
| 218 |
+
transp.pause_reading()
|
| 219 |
+
transp.set_protocol(self)
|
| 220 |
+
if self._should_resume_writing:
|
| 221 |
+
self._write_ready_fut = self._transport._loop.create_future()
|
| 222 |
+
else:
|
| 223 |
+
self._write_ready_fut = None
|
| 224 |
+
|
| 225 |
+
async def drain(self):
|
| 226 |
+
if self._transport.is_closing():
|
| 227 |
+
raise ConnectionError("Connection closed by peer")
|
| 228 |
+
fut = self._write_ready_fut
|
| 229 |
+
if fut is None:
|
| 230 |
+
return
|
| 231 |
+
await fut
|
| 232 |
+
|
| 233 |
+
def connection_made(self, transport):
|
| 234 |
+
raise RuntimeError("Invalid state: "
|
| 235 |
+
"connection should have been established already.")
|
| 236 |
+
|
| 237 |
+
def connection_lost(self, exc):
|
| 238 |
+
if self._write_ready_fut is not None:
|
| 239 |
+
# Never happens if peer disconnects after sending the whole content
|
| 240 |
+
# Thus disconnection is always an exception from user perspective
|
| 241 |
+
if exc is None:
|
| 242 |
+
self._write_ready_fut.set_exception(
|
| 243 |
+
ConnectionError("Connection is closed by peer"))
|
| 244 |
+
else:
|
| 245 |
+
self._write_ready_fut.set_exception(exc)
|
| 246 |
+
self._proto.connection_lost(exc)
|
| 247 |
+
|
| 248 |
+
def pause_writing(self):
|
| 249 |
+
if self._write_ready_fut is not None:
|
| 250 |
+
return
|
| 251 |
+
self._write_ready_fut = self._transport._loop.create_future()
|
| 252 |
+
|
| 253 |
+
def resume_writing(self):
|
| 254 |
+
if self._write_ready_fut is None:
|
| 255 |
+
return
|
| 256 |
+
self._write_ready_fut.set_result(False)
|
| 257 |
+
self._write_ready_fut = None
|
| 258 |
+
|
| 259 |
+
def data_received(self, data):
|
| 260 |
+
raise RuntimeError("Invalid state: reading should be paused")
|
| 261 |
+
|
| 262 |
+
def eof_received(self):
|
| 263 |
+
raise RuntimeError("Invalid state: reading should be paused")
|
| 264 |
+
|
| 265 |
+
async def restore(self):
|
| 266 |
+
self._transport.set_protocol(self._proto)
|
| 267 |
+
if self._should_resume_reading:
|
| 268 |
+
self._transport.resume_reading()
|
| 269 |
+
if self._write_ready_fut is not None:
|
| 270 |
+
# Cancel the future.
|
| 271 |
+
# Basically it has no effect because protocol is switched back,
|
| 272 |
+
# no code should wait for it anymore.
|
| 273 |
+
self._write_ready_fut.cancel()
|
| 274 |
+
if self._should_resume_writing:
|
| 275 |
+
self._proto.resume_writing()
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
class Server(events.AbstractServer):
|
| 279 |
+
|
| 280 |
+
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
|
| 281 |
+
ssl_handshake_timeout):
|
| 282 |
+
self._loop = loop
|
| 283 |
+
self._sockets = sockets
|
| 284 |
+
self._active_count = 0
|
| 285 |
+
self._waiters = []
|
| 286 |
+
self._protocol_factory = protocol_factory
|
| 287 |
+
self._backlog = backlog
|
| 288 |
+
self._ssl_context = ssl_context
|
| 289 |
+
self._ssl_handshake_timeout = ssl_handshake_timeout
|
| 290 |
+
self._serving = False
|
| 291 |
+
self._serving_forever_fut = None
|
| 292 |
+
|
| 293 |
+
def __repr__(self):
|
| 294 |
+
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
|
| 295 |
+
|
| 296 |
+
def _attach(self):
|
| 297 |
+
assert self._sockets is not None
|
| 298 |
+
self._active_count += 1
|
| 299 |
+
|
| 300 |
+
def _detach(self):
|
| 301 |
+
assert self._active_count > 0
|
| 302 |
+
self._active_count -= 1
|
| 303 |
+
if self._active_count == 0 and self._sockets is None:
|
| 304 |
+
self._wakeup()
|
| 305 |
+
|
| 306 |
+
def _wakeup(self):
|
| 307 |
+
waiters = self._waiters
|
| 308 |
+
self._waiters = None
|
| 309 |
+
for waiter in waiters:
|
| 310 |
+
if not waiter.done():
|
| 311 |
+
waiter.set_result(waiter)
|
| 312 |
+
|
| 313 |
+
def _start_serving(self):
|
| 314 |
+
if self._serving:
|
| 315 |
+
return
|
| 316 |
+
self._serving = True
|
| 317 |
+
for sock in self._sockets:
|
| 318 |
+
sock.listen(self._backlog)
|
| 319 |
+
self._loop._start_serving(
|
| 320 |
+
self._protocol_factory, sock, self._ssl_context,
|
| 321 |
+
self, self._backlog, self._ssl_handshake_timeout)
|
| 322 |
+
|
| 323 |
+
def get_loop(self):
|
| 324 |
+
return self._loop
|
| 325 |
+
|
| 326 |
+
def is_serving(self):
|
| 327 |
+
return self._serving
|
| 328 |
+
|
| 329 |
+
@property
|
| 330 |
+
def sockets(self):
|
| 331 |
+
if self._sockets is None:
|
| 332 |
+
return ()
|
| 333 |
+
return tuple(trsock.TransportSocket(s) for s in self._sockets)
|
| 334 |
+
|
| 335 |
+
def close(self):
|
| 336 |
+
sockets = self._sockets
|
| 337 |
+
if sockets is None:
|
| 338 |
+
return
|
| 339 |
+
self._sockets = None
|
| 340 |
+
|
| 341 |
+
for sock in sockets:
|
| 342 |
+
self._loop._stop_serving(sock)
|
| 343 |
+
|
| 344 |
+
self._serving = False
|
| 345 |
+
|
| 346 |
+
if (self._serving_forever_fut is not None and
|
| 347 |
+
not self._serving_forever_fut.done()):
|
| 348 |
+
self._serving_forever_fut.cancel()
|
| 349 |
+
self._serving_forever_fut = None
|
| 350 |
+
|
| 351 |
+
if self._active_count == 0:
|
| 352 |
+
self._wakeup()
|
| 353 |
+
|
| 354 |
+
async def start_serving(self):
|
| 355 |
+
self._start_serving()
|
| 356 |
+
# Skip one loop iteration so that all 'loop.add_reader'
|
| 357 |
+
# go through.
|
| 358 |
+
await tasks.sleep(0)
|
| 359 |
+
|
| 360 |
+
async def serve_forever(self):
|
| 361 |
+
if self._serving_forever_fut is not None:
|
| 362 |
+
raise RuntimeError(
|
| 363 |
+
f'server {self!r} is already being awaited on serve_forever()')
|
| 364 |
+
if self._sockets is None:
|
| 365 |
+
raise RuntimeError(f'server {self!r} is closed')
|
| 366 |
+
|
| 367 |
+
self._start_serving()
|
| 368 |
+
self._serving_forever_fut = self._loop.create_future()
|
| 369 |
+
|
| 370 |
+
try:
|
| 371 |
+
await self._serving_forever_fut
|
| 372 |
+
except exceptions.CancelledError:
|
| 373 |
+
try:
|
| 374 |
+
self.close()
|
| 375 |
+
await self.wait_closed()
|
| 376 |
+
finally:
|
| 377 |
+
raise
|
| 378 |
+
finally:
|
| 379 |
+
self._serving_forever_fut = None
|
| 380 |
+
|
| 381 |
+
async def wait_closed(self):
|
| 382 |
+
if self._sockets is None or self._waiters is None:
|
| 383 |
+
return
|
| 384 |
+
waiter = self._loop.create_future()
|
| 385 |
+
self._waiters.append(waiter)
|
| 386 |
+
await waiter
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
class BaseEventLoop(events.AbstractEventLoop):
|
| 390 |
+
|
| 391 |
+
def __init__(self):
|
| 392 |
+
self._timer_cancelled_count = 0
|
| 393 |
+
self._closed = False
|
| 394 |
+
self._stopping = False
|
| 395 |
+
self._ready = collections.deque()
|
| 396 |
+
self._scheduled = []
|
| 397 |
+
self._default_executor = None
|
| 398 |
+
self._internal_fds = 0
|
| 399 |
+
# Identifier of the thread running the event loop, or None if the
|
| 400 |
+
# event loop is not running
|
| 401 |
+
self._thread_id = None
|
| 402 |
+
self._clock_resolution = time.get_clock_info('monotonic').resolution
|
| 403 |
+
self._exception_handler = None
|
| 404 |
+
self.set_debug(coroutines._is_debug_mode())
|
| 405 |
+
# In debug mode, if the execution of a callback or a step of a task
|
| 406 |
+
# exceed this duration in seconds, the slow callback/task is logged.
|
| 407 |
+
self.slow_callback_duration = 0.1
|
| 408 |
+
self._current_handle = None
|
| 409 |
+
self._task_factory = None
|
| 410 |
+
self._coroutine_origin_tracking_enabled = False
|
| 411 |
+
self._coroutine_origin_tracking_saved_depth = None
|
| 412 |
+
|
| 413 |
+
# A weak set of all asynchronous generators that are
|
| 414 |
+
# being iterated by the loop.
|
| 415 |
+
self._asyncgens = weakref.WeakSet()
|
| 416 |
+
# Set to True when `loop.shutdown_asyncgens` is called.
|
| 417 |
+
self._asyncgens_shutdown_called = False
|
| 418 |
+
# Set to True when `loop.shutdown_default_executor` is called.
|
| 419 |
+
self._executor_shutdown_called = False
|
| 420 |
+
|
| 421 |
+
def __repr__(self):
|
| 422 |
+
return (
|
| 423 |
+
f'<{self.__class__.__name__} running={self.is_running()} '
|
| 424 |
+
f'closed={self.is_closed()} debug={self.get_debug()}>'
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
def create_future(self):
|
| 428 |
+
"""Create a Future object attached to the loop."""
|
| 429 |
+
return futures.Future(loop=self)
|
| 430 |
+
|
| 431 |
+
def create_task(self, coro, *, name=None):
|
| 432 |
+
"""Schedule a coroutine object.
|
| 433 |
+
|
| 434 |
+
Return a task object.
|
| 435 |
+
"""
|
| 436 |
+
self._check_closed()
|
| 437 |
+
if self._task_factory is None:
|
| 438 |
+
task = tasks.Task(coro, loop=self, name=name)
|
| 439 |
+
if task._source_traceback:
|
| 440 |
+
del task._source_traceback[-1]
|
| 441 |
+
else:
|
| 442 |
+
task = self._task_factory(self, coro)
|
| 443 |
+
tasks._set_task_name(task, name)
|
| 444 |
+
|
| 445 |
+
return task
|
| 446 |
+
|
| 447 |
+
def set_task_factory(self, factory):
|
| 448 |
+
"""Set a task factory that will be used by loop.create_task().
|
| 449 |
+
|
| 450 |
+
If factory is None the default task factory will be set.
|
| 451 |
+
|
| 452 |
+
If factory is a callable, it should have a signature matching
|
| 453 |
+
'(loop, coro)', where 'loop' will be a reference to the active
|
| 454 |
+
event loop, 'coro' will be a coroutine object. The callable
|
| 455 |
+
must return a Future.
|
| 456 |
+
"""
|
| 457 |
+
if factory is not None and not callable(factory):
|
| 458 |
+
raise TypeError('task factory must be a callable or None')
|
| 459 |
+
self._task_factory = factory
|
| 460 |
+
|
| 461 |
+
def get_task_factory(self):
|
| 462 |
+
"""Return a task factory, or None if the default one is in use."""
|
| 463 |
+
return self._task_factory
|
| 464 |
+
|
| 465 |
+
def _make_socket_transport(self, sock, protocol, waiter=None, *,
|
| 466 |
+
extra=None, server=None):
|
| 467 |
+
"""Create socket transport."""
|
| 468 |
+
raise NotImplementedError
|
| 469 |
+
|
| 470 |
+
def _make_ssl_transport(
|
| 471 |
+
self, rawsock, protocol, sslcontext, waiter=None,
|
| 472 |
+
*, server_side=False, server_hostname=None,
|
| 473 |
+
extra=None, server=None,
|
| 474 |
+
ssl_handshake_timeout=None,
|
| 475 |
+
call_connection_made=True):
|
| 476 |
+
"""Create SSL transport."""
|
| 477 |
+
raise NotImplementedError
|
| 478 |
+
|
| 479 |
+
def _make_datagram_transport(self, sock, protocol,
|
| 480 |
+
address=None, waiter=None, extra=None):
|
| 481 |
+
"""Create datagram transport."""
|
| 482 |
+
raise NotImplementedError
|
| 483 |
+
|
| 484 |
+
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
|
| 485 |
+
extra=None):
|
| 486 |
+
"""Create read pipe transport."""
|
| 487 |
+
raise NotImplementedError
|
| 488 |
+
|
| 489 |
+
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
|
| 490 |
+
extra=None):
|
| 491 |
+
"""Create write pipe transport."""
|
| 492 |
+
raise NotImplementedError
|
| 493 |
+
|
| 494 |
+
async def _make_subprocess_transport(self, protocol, args, shell,
|
| 495 |
+
stdin, stdout, stderr, bufsize,
|
| 496 |
+
extra=None, **kwargs):
|
| 497 |
+
"""Create subprocess transport."""
|
| 498 |
+
raise NotImplementedError
|
| 499 |
+
|
| 500 |
+
def _write_to_self(self):
|
| 501 |
+
"""Write a byte to self-pipe, to wake up the event loop.
|
| 502 |
+
|
| 503 |
+
This may be called from a different thread.
|
| 504 |
+
|
| 505 |
+
The subclass is responsible for implementing the self-pipe.
|
| 506 |
+
"""
|
| 507 |
+
raise NotImplementedError
|
| 508 |
+
|
| 509 |
+
def _process_events(self, event_list):
|
| 510 |
+
"""Process selector events."""
|
| 511 |
+
raise NotImplementedError
|
| 512 |
+
|
| 513 |
+
def _check_closed(self):
|
| 514 |
+
if self._closed:
|
| 515 |
+
raise RuntimeError('Event loop is closed')
|
| 516 |
+
|
| 517 |
+
def _check_default_executor(self):
|
| 518 |
+
if self._executor_shutdown_called:
|
| 519 |
+
raise RuntimeError('Executor shutdown has been called')
|
| 520 |
+
|
| 521 |
+
def _asyncgen_finalizer_hook(self, agen):
|
| 522 |
+
self._asyncgens.discard(agen)
|
| 523 |
+
if not self.is_closed():
|
| 524 |
+
self.call_soon_threadsafe(self.create_task, agen.aclose())
|
| 525 |
+
|
| 526 |
+
def _asyncgen_firstiter_hook(self, agen):
|
| 527 |
+
if self._asyncgens_shutdown_called:
|
| 528 |
+
warnings.warn(
|
| 529 |
+
f"asynchronous generator {agen!r} was scheduled after "
|
| 530 |
+
f"loop.shutdown_asyncgens() call",
|
| 531 |
+
ResourceWarning, source=self)
|
| 532 |
+
|
| 533 |
+
self._asyncgens.add(agen)
|
| 534 |
+
|
| 535 |
+
async def shutdown_asyncgens(self):
|
| 536 |
+
"""Shutdown all active asynchronous generators."""
|
| 537 |
+
self._asyncgens_shutdown_called = True
|
| 538 |
+
|
| 539 |
+
if not len(self._asyncgens):
|
| 540 |
+
# If Python version is <3.6 or we don't have any asynchronous
|
| 541 |
+
# generators alive.
|
| 542 |
+
return
|
| 543 |
+
|
| 544 |
+
closing_agens = list(self._asyncgens)
|
| 545 |
+
self._asyncgens.clear()
|
| 546 |
+
|
| 547 |
+
results = await tasks.gather(
|
| 548 |
+
*[ag.aclose() for ag in closing_agens],
|
| 549 |
+
return_exceptions=True)
|
| 550 |
+
|
| 551 |
+
for result, agen in zip(results, closing_agens):
|
| 552 |
+
if isinstance(result, Exception):
|
| 553 |
+
self.call_exception_handler({
|
| 554 |
+
'message': f'an error occurred during closing of '
|
| 555 |
+
f'asynchronous generator {agen!r}',
|
| 556 |
+
'exception': result,
|
| 557 |
+
'asyncgen': agen
|
| 558 |
+
})
|
| 559 |
+
|
| 560 |
+
async def shutdown_default_executor(self):
|
| 561 |
+
"""Schedule the shutdown of the default executor."""
|
| 562 |
+
self._executor_shutdown_called = True
|
| 563 |
+
if self._default_executor is None:
|
| 564 |
+
return
|
| 565 |
+
future = self.create_future()
|
| 566 |
+
thread = threading.Thread(target=self._do_shutdown, args=(future,))
|
| 567 |
+
thread.start()
|
| 568 |
+
try:
|
| 569 |
+
await future
|
| 570 |
+
finally:
|
| 571 |
+
thread.join()
|
| 572 |
+
|
| 573 |
+
def _do_shutdown(self, future):
|
| 574 |
+
try:
|
| 575 |
+
self._default_executor.shutdown(wait=True)
|
| 576 |
+
if not self.is_closed():
|
| 577 |
+
self.call_soon_threadsafe(future.set_result, None)
|
| 578 |
+
except Exception as ex:
|
| 579 |
+
if not self.is_closed():
|
| 580 |
+
self.call_soon_threadsafe(future.set_exception, ex)
|
| 581 |
+
|
| 582 |
+
def _check_running(self):
|
| 583 |
+
if self.is_running():
|
| 584 |
+
raise RuntimeError('This event loop is already running')
|
| 585 |
+
if events._get_running_loop() is not None:
|
| 586 |
+
raise RuntimeError(
|
| 587 |
+
'Cannot run the event loop while another loop is running')
|
| 588 |
+
|
| 589 |
+
def run_forever(self):
|
| 590 |
+
"""Run until stop() is called."""
|
| 591 |
+
self._check_closed()
|
| 592 |
+
self._check_running()
|
| 593 |
+
self._set_coroutine_origin_tracking(self._debug)
|
| 594 |
+
|
| 595 |
+
old_agen_hooks = sys.get_asyncgen_hooks()
|
| 596 |
+
try:
|
| 597 |
+
self._thread_id = threading.get_ident()
|
| 598 |
+
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
|
| 599 |
+
finalizer=self._asyncgen_finalizer_hook)
|
| 600 |
+
|
| 601 |
+
events._set_running_loop(self)
|
| 602 |
+
while True:
|
| 603 |
+
self._run_once()
|
| 604 |
+
if self._stopping:
|
| 605 |
+
break
|
| 606 |
+
finally:
|
| 607 |
+
self._stopping = False
|
| 608 |
+
self._thread_id = None
|
| 609 |
+
events._set_running_loop(None)
|
| 610 |
+
self._set_coroutine_origin_tracking(False)
|
| 611 |
+
sys.set_asyncgen_hooks(*old_agen_hooks)
|
| 612 |
+
|
| 613 |
+
def run_until_complete(self, future):
|
| 614 |
+
"""Run until the Future is done.
|
| 615 |
+
|
| 616 |
+
If the argument is a coroutine, it is wrapped in a Task.
|
| 617 |
+
|
| 618 |
+
WARNING: It would be disastrous to call run_until_complete()
|
| 619 |
+
with the same coroutine twice -- it would wrap it in two
|
| 620 |
+
different Tasks and that can't be good.
|
| 621 |
+
|
| 622 |
+
Return the Future's result, or raise its exception.
|
| 623 |
+
"""
|
| 624 |
+
self._check_closed()
|
| 625 |
+
self._check_running()
|
| 626 |
+
|
| 627 |
+
new_task = not futures.isfuture(future)
|
| 628 |
+
future = tasks.ensure_future(future, loop=self)
|
| 629 |
+
if new_task:
|
| 630 |
+
# An exception is raised if the future didn't complete, so there
|
| 631 |
+
# is no need to log the "destroy pending task" message
|
| 632 |
+
future._log_destroy_pending = False
|
| 633 |
+
|
| 634 |
+
future.add_done_callback(_run_until_complete_cb)
|
| 635 |
+
try:
|
| 636 |
+
self.run_forever()
|
| 637 |
+
except:
|
| 638 |
+
if new_task and future.done() and not future.cancelled():
|
| 639 |
+
# The coroutine raised a BaseException. Consume the exception
|
| 640 |
+
# to not log a warning, the caller doesn't have access to the
|
| 641 |
+
# local task.
|
| 642 |
+
future.exception()
|
| 643 |
+
raise
|
| 644 |
+
finally:
|
| 645 |
+
future.remove_done_callback(_run_until_complete_cb)
|
| 646 |
+
if not future.done():
|
| 647 |
+
raise RuntimeError('Event loop stopped before Future completed.')
|
| 648 |
+
|
| 649 |
+
return future.result()
|
| 650 |
+
|
| 651 |
+
def stop(self):
|
| 652 |
+
"""Stop running the event loop.
|
| 653 |
+
|
| 654 |
+
Every callback already scheduled will still run. This simply informs
|
| 655 |
+
run_forever to stop looping after a complete iteration.
|
| 656 |
+
"""
|
| 657 |
+
self._stopping = True
|
| 658 |
+
|
| 659 |
+
def close(self):
|
| 660 |
+
"""Close the event loop.
|
| 661 |
+
|
| 662 |
+
This clears the queues and shuts down the executor,
|
| 663 |
+
but does not wait for the executor to finish.
|
| 664 |
+
|
| 665 |
+
The event loop must not be running.
|
| 666 |
+
"""
|
| 667 |
+
if self.is_running():
|
| 668 |
+
raise RuntimeError("Cannot close a running event loop")
|
| 669 |
+
if self._closed:
|
| 670 |
+
return
|
| 671 |
+
if self._debug:
|
| 672 |
+
logger.debug("Close %r", self)
|
| 673 |
+
self._closed = True
|
| 674 |
+
self._ready.clear()
|
| 675 |
+
self._scheduled.clear()
|
| 676 |
+
self._executor_shutdown_called = True
|
| 677 |
+
executor = self._default_executor
|
| 678 |
+
if executor is not None:
|
| 679 |
+
self._default_executor = None
|
| 680 |
+
executor.shutdown(wait=False)
|
| 681 |
+
|
| 682 |
+
def is_closed(self):
|
| 683 |
+
"""Returns True if the event loop was closed."""
|
| 684 |
+
return self._closed
|
| 685 |
+
|
| 686 |
+
def __del__(self, _warn=warnings.warn):
|
| 687 |
+
if not self.is_closed():
|
| 688 |
+
_warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
|
| 689 |
+
if not self.is_running():
|
| 690 |
+
self.close()
|
| 691 |
+
|
| 692 |
+
def is_running(self):
|
| 693 |
+
"""Returns True if the event loop is running."""
|
| 694 |
+
return (self._thread_id is not None)
|
| 695 |
+
|
| 696 |
+
def time(self):
|
| 697 |
+
"""Return the time according to the event loop's clock.
|
| 698 |
+
|
| 699 |
+
This is a float expressed in seconds since an epoch, but the
|
| 700 |
+
epoch, precision, accuracy and drift are unspecified and may
|
| 701 |
+
differ per event loop.
|
| 702 |
+
"""
|
| 703 |
+
return time.monotonic()
|
| 704 |
+
|
| 705 |
+
def call_later(self, delay, callback, *args, context=None):
|
| 706 |
+
"""Arrange for a callback to be called at a given time.
|
| 707 |
+
|
| 708 |
+
Return a Handle: an opaque object with a cancel() method that
|
| 709 |
+
can be used to cancel the call.
|
| 710 |
+
|
| 711 |
+
The delay can be an int or float, expressed in seconds. It is
|
| 712 |
+
always relative to the current time.
|
| 713 |
+
|
| 714 |
+
Each callback will be called exactly once. If two callbacks
|
| 715 |
+
are scheduled for exactly the same time, it undefined which
|
| 716 |
+
will be called first.
|
| 717 |
+
|
| 718 |
+
Any positional arguments after the callback will be passed to
|
| 719 |
+
the callback when it is called.
|
| 720 |
+
"""
|
| 721 |
+
timer = self.call_at(self.time() + delay, callback, *args,
|
| 722 |
+
context=context)
|
| 723 |
+
if timer._source_traceback:
|
| 724 |
+
del timer._source_traceback[-1]
|
| 725 |
+
return timer
|
| 726 |
+
|
| 727 |
+
def call_at(self, when, callback, *args, context=None):
|
| 728 |
+
"""Like call_later(), but uses an absolute time.
|
| 729 |
+
|
| 730 |
+
Absolute time corresponds to the event loop's time() method.
|
| 731 |
+
"""
|
| 732 |
+
self._check_closed()
|
| 733 |
+
if self._debug:
|
| 734 |
+
self._check_thread()
|
| 735 |
+
self._check_callback(callback, 'call_at')
|
| 736 |
+
timer = events.TimerHandle(when, callback, args, self, context)
|
| 737 |
+
if timer._source_traceback:
|
| 738 |
+
del timer._source_traceback[-1]
|
| 739 |
+
heapq.heappush(self._scheduled, timer)
|
| 740 |
+
timer._scheduled = True
|
| 741 |
+
return timer
|
| 742 |
+
|
| 743 |
+
def call_soon(self, callback, *args, context=None):
|
| 744 |
+
"""Arrange for a callback to be called as soon as possible.
|
| 745 |
+
|
| 746 |
+
This operates as a FIFO queue: callbacks are called in the
|
| 747 |
+
order in which they are registered. Each callback will be
|
| 748 |
+
called exactly once.
|
| 749 |
+
|
| 750 |
+
Any positional arguments after the callback will be passed to
|
| 751 |
+
the callback when it is called.
|
| 752 |
+
"""
|
| 753 |
+
self._check_closed()
|
| 754 |
+
if self._debug:
|
| 755 |
+
self._check_thread()
|
| 756 |
+
self._check_callback(callback, 'call_soon')
|
| 757 |
+
handle = self._call_soon(callback, args, context)
|
| 758 |
+
if handle._source_traceback:
|
| 759 |
+
del handle._source_traceback[-1]
|
| 760 |
+
return handle
|
| 761 |
+
|
| 762 |
+
def _check_callback(self, callback, method):
|
| 763 |
+
if (coroutines.iscoroutine(callback) or
|
| 764 |
+
coroutines.iscoroutinefunction(callback)):
|
| 765 |
+
raise TypeError(
|
| 766 |
+
f"coroutines cannot be used with {method}()")
|
| 767 |
+
if not callable(callback):
|
| 768 |
+
raise TypeError(
|
| 769 |
+
f'a callable object was expected by {method}(), '
|
| 770 |
+
f'got {callback!r}')
|
| 771 |
+
|
| 772 |
+
def _call_soon(self, callback, args, context):
|
| 773 |
+
handle = events.Handle(callback, args, self, context)
|
| 774 |
+
if handle._source_traceback:
|
| 775 |
+
del handle._source_traceback[-1]
|
| 776 |
+
self._ready.append(handle)
|
| 777 |
+
return handle
|
| 778 |
+
|
| 779 |
+
def _check_thread(self):
|
| 780 |
+
"""Check that the current thread is the thread running the event loop.
|
| 781 |
+
|
| 782 |
+
Non-thread-safe methods of this class make this assumption and will
|
| 783 |
+
likely behave incorrectly when the assumption is violated.
|
| 784 |
+
|
| 785 |
+
Should only be called when (self._debug == True). The caller is
|
| 786 |
+
responsible for checking this condition for performance reasons.
|
| 787 |
+
"""
|
| 788 |
+
if self._thread_id is None:
|
| 789 |
+
return
|
| 790 |
+
thread_id = threading.get_ident()
|
| 791 |
+
if thread_id != self._thread_id:
|
| 792 |
+
raise RuntimeError(
|
| 793 |
+
"Non-thread-safe operation invoked on an event loop other "
|
| 794 |
+
"than the current one")
|
| 795 |
+
|
| 796 |
+
def call_soon_threadsafe(self, callback, *args, context=None):
|
| 797 |
+
"""Like call_soon(), but thread-safe."""
|
| 798 |
+
self._check_closed()
|
| 799 |
+
if self._debug:
|
| 800 |
+
self._check_callback(callback, 'call_soon_threadsafe')
|
| 801 |
+
handle = self._call_soon(callback, args, context)
|
| 802 |
+
if handle._source_traceback:
|
| 803 |
+
del handle._source_traceback[-1]
|
| 804 |
+
self._write_to_self()
|
| 805 |
+
return handle
|
| 806 |
+
|
| 807 |
+
def run_in_executor(self, executor, func, *args):
|
| 808 |
+
self._check_closed()
|
| 809 |
+
if self._debug:
|
| 810 |
+
self._check_callback(func, 'run_in_executor')
|
| 811 |
+
if executor is None:
|
| 812 |
+
executor = self._default_executor
|
| 813 |
+
# Only check when the default executor is being used
|
| 814 |
+
self._check_default_executor()
|
| 815 |
+
if executor is None:
|
| 816 |
+
executor = concurrent.futures.ThreadPoolExecutor(
|
| 817 |
+
thread_name_prefix='asyncio'
|
| 818 |
+
)
|
| 819 |
+
self._default_executor = executor
|
| 820 |
+
return futures.wrap_future(
|
| 821 |
+
executor.submit(func, *args), loop=self)
|
| 822 |
+
|
| 823 |
+
def set_default_executor(self, executor):
|
| 824 |
+
if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
|
| 825 |
+
warnings.warn(
|
| 826 |
+
'Using the default executor that is not an instance of '
|
| 827 |
+
'ThreadPoolExecutor is deprecated and will be prohibited '
|
| 828 |
+
'in Python 3.9',
|
| 829 |
+
DeprecationWarning, 2)
|
| 830 |
+
self._default_executor = executor
|
| 831 |
+
|
| 832 |
+
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
|
| 833 |
+
msg = [f"{host}:{port!r}"]
|
| 834 |
+
if family:
|
| 835 |
+
msg.append(f'family={family!r}')
|
| 836 |
+
if type:
|
| 837 |
+
msg.append(f'type={type!r}')
|
| 838 |
+
if proto:
|
| 839 |
+
msg.append(f'proto={proto!r}')
|
| 840 |
+
if flags:
|
| 841 |
+
msg.append(f'flags={flags!r}')
|
| 842 |
+
msg = ', '.join(msg)
|
| 843 |
+
logger.debug('Get address info %s', msg)
|
| 844 |
+
|
| 845 |
+
t0 = self.time()
|
| 846 |
+
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
|
| 847 |
+
dt = self.time() - t0
|
| 848 |
+
|
| 849 |
+
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
|
| 850 |
+
if dt >= self.slow_callback_duration:
|
| 851 |
+
logger.info(msg)
|
| 852 |
+
else:
|
| 853 |
+
logger.debug(msg)
|
| 854 |
+
return addrinfo
|
| 855 |
+
|
| 856 |
+
async def getaddrinfo(self, host, port, *,
|
| 857 |
+
family=0, type=0, proto=0, flags=0):
|
| 858 |
+
if self._debug:
|
| 859 |
+
getaddr_func = self._getaddrinfo_debug
|
| 860 |
+
else:
|
| 861 |
+
getaddr_func = socket.getaddrinfo
|
| 862 |
+
|
| 863 |
+
return await self.run_in_executor(
|
| 864 |
+
None, getaddr_func, host, port, family, type, proto, flags)
|
| 865 |
+
|
| 866 |
+
async def getnameinfo(self, sockaddr, flags=0):
|
| 867 |
+
return await self.run_in_executor(
|
| 868 |
+
None, socket.getnameinfo, sockaddr, flags)
|
| 869 |
+
|
| 870 |
+
async def sock_sendfile(self, sock, file, offset=0, count=None,
|
| 871 |
+
*, fallback=True):
|
| 872 |
+
if self._debug and sock.gettimeout() != 0:
|
| 873 |
+
raise ValueError("the socket must be non-blocking")
|
| 874 |
+
_check_ssl_socket(sock)
|
| 875 |
+
self._check_sendfile_params(sock, file, offset, count)
|
| 876 |
+
try:
|
| 877 |
+
return await self._sock_sendfile_native(sock, file,
|
| 878 |
+
offset, count)
|
| 879 |
+
except exceptions.SendfileNotAvailableError as exc:
|
| 880 |
+
if not fallback:
|
| 881 |
+
raise
|
| 882 |
+
return await self._sock_sendfile_fallback(sock, file,
|
| 883 |
+
offset, count)
|
| 884 |
+
|
| 885 |
+
async def _sock_sendfile_native(self, sock, file, offset, count):
|
| 886 |
+
# NB: sendfile syscall is not supported for SSL sockets and
|
| 887 |
+
# non-mmap files even if sendfile is supported by OS
|
| 888 |
+
raise exceptions.SendfileNotAvailableError(
|
| 889 |
+
f"syscall sendfile is not available for socket {sock!r} "
|
| 890 |
+
f"and file {file!r} combination")
|
| 891 |
+
|
| 892 |
+
async def _sock_sendfile_fallback(self, sock, file, offset, count):
|
| 893 |
+
if offset:
|
| 894 |
+
file.seek(offset)
|
| 895 |
+
blocksize = (
|
| 896 |
+
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
|
| 897 |
+
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
|
| 898 |
+
)
|
| 899 |
+
buf = bytearray(blocksize)
|
| 900 |
+
total_sent = 0
|
| 901 |
+
try:
|
| 902 |
+
while True:
|
| 903 |
+
if count:
|
| 904 |
+
blocksize = min(count - total_sent, blocksize)
|
| 905 |
+
if blocksize <= 0:
|
| 906 |
+
break
|
| 907 |
+
view = memoryview(buf)[:blocksize]
|
| 908 |
+
read = await self.run_in_executor(None, file.readinto, view)
|
| 909 |
+
if not read:
|
| 910 |
+
break # EOF
|
| 911 |
+
await self.sock_sendall(sock, view[:read])
|
| 912 |
+
total_sent += read
|
| 913 |
+
return total_sent
|
| 914 |
+
finally:
|
| 915 |
+
if total_sent > 0 and hasattr(file, 'seek'):
|
| 916 |
+
file.seek(offset + total_sent)
|
| 917 |
+
|
| 918 |
+
def _check_sendfile_params(self, sock, file, offset, count):
|
| 919 |
+
if 'b' not in getattr(file, 'mode', 'b'):
|
| 920 |
+
raise ValueError("file should be opened in binary mode")
|
| 921 |
+
if not sock.type == socket.SOCK_STREAM:
|
| 922 |
+
raise ValueError("only SOCK_STREAM type sockets are supported")
|
| 923 |
+
if count is not None:
|
| 924 |
+
if not isinstance(count, int):
|
| 925 |
+
raise TypeError(
|
| 926 |
+
"count must be a positive integer (got {!r})".format(count))
|
| 927 |
+
if count <= 0:
|
| 928 |
+
raise ValueError(
|
| 929 |
+
"count must be a positive integer (got {!r})".format(count))
|
| 930 |
+
if not isinstance(offset, int):
|
| 931 |
+
raise TypeError(
|
| 932 |
+
"offset must be a non-negative integer (got {!r})".format(
|
| 933 |
+
offset))
|
| 934 |
+
if offset < 0:
|
| 935 |
+
raise ValueError(
|
| 936 |
+
"offset must be a non-negative integer (got {!r})".format(
|
| 937 |
+
offset))
|
| 938 |
+
|
| 939 |
+
async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
|
| 940 |
+
"""Create, bind and connect one socket."""
|
| 941 |
+
my_exceptions = []
|
| 942 |
+
exceptions.append(my_exceptions)
|
| 943 |
+
family, type_, proto, _, address = addr_info
|
| 944 |
+
sock = None
|
| 945 |
+
try:
|
| 946 |
+
sock = socket.socket(family=family, type=type_, proto=proto)
|
| 947 |
+
sock.setblocking(False)
|
| 948 |
+
if local_addr_infos is not None:
|
| 949 |
+
for lfamily, _, _, _, laddr in local_addr_infos:
|
| 950 |
+
# skip local addresses of different family
|
| 951 |
+
if lfamily != family:
|
| 952 |
+
continue
|
| 953 |
+
try:
|
| 954 |
+
sock.bind(laddr)
|
| 955 |
+
break
|
| 956 |
+
except OSError as exc:
|
| 957 |
+
msg = (
|
| 958 |
+
f'error while attempting to bind on '
|
| 959 |
+
f'address {laddr!r}: '
|
| 960 |
+
f'{exc.strerror.lower()}'
|
| 961 |
+
)
|
| 962 |
+
exc = OSError(exc.errno, msg)
|
| 963 |
+
my_exceptions.append(exc)
|
| 964 |
+
else: # all bind attempts failed
|
| 965 |
+
if my_exceptions:
|
| 966 |
+
raise my_exceptions.pop()
|
| 967 |
+
else:
|
| 968 |
+
raise OSError(f"no matching local address with {family=} found")
|
| 969 |
+
await self.sock_connect(sock, address)
|
| 970 |
+
return sock
|
| 971 |
+
except OSError as exc:
|
| 972 |
+
my_exceptions.append(exc)
|
| 973 |
+
if sock is not None:
|
| 974 |
+
sock.close()
|
| 975 |
+
raise
|
| 976 |
+
except:
|
| 977 |
+
if sock is not None:
|
| 978 |
+
sock.close()
|
| 979 |
+
raise
|
| 980 |
+
finally:
|
| 981 |
+
exceptions = my_exceptions = None
|
| 982 |
+
|
| 983 |
+
async def create_connection(
|
| 984 |
+
self, protocol_factory, host=None, port=None,
|
| 985 |
+
*, ssl=None, family=0,
|
| 986 |
+
proto=0, flags=0, sock=None,
|
| 987 |
+
local_addr=None, server_hostname=None,
|
| 988 |
+
ssl_handshake_timeout=None,
|
| 989 |
+
happy_eyeballs_delay=None, interleave=None):
|
| 990 |
+
"""Connect to a TCP server.
|
| 991 |
+
|
| 992 |
+
Create a streaming transport connection to a given internet host and
|
| 993 |
+
port: socket family AF_INET or socket.AF_INET6 depending on host (or
|
| 994 |
+
family if specified), socket type SOCK_STREAM. protocol_factory must be
|
| 995 |
+
a callable returning a protocol instance.
|
| 996 |
+
|
| 997 |
+
This method is a coroutine which will try to establish the connection
|
| 998 |
+
in the background. When successful, the coroutine returns a
|
| 999 |
+
(transport, protocol) pair.
|
| 1000 |
+
"""
|
| 1001 |
+
if server_hostname is not None and not ssl:
|
| 1002 |
+
raise ValueError('server_hostname is only meaningful with ssl')
|
| 1003 |
+
|
| 1004 |
+
if server_hostname is None and ssl:
|
| 1005 |
+
# Use host as default for server_hostname. It is an error
|
| 1006 |
+
# if host is empty or not set, e.g. when an
|
| 1007 |
+
# already-connected socket was passed or when only a port
|
| 1008 |
+
# is given. To avoid this error, you can pass
|
| 1009 |
+
# server_hostname='' -- this will bypass the hostname
|
| 1010 |
+
# check. (This also means that if host is a numeric
|
| 1011 |
+
# IP/IPv6 address, we will attempt to verify that exact
|
| 1012 |
+
# address; this will probably fail, but it is possible to
|
| 1013 |
+
# create a certificate for a specific IP address, so we
|
| 1014 |
+
# don't judge it here.)
|
| 1015 |
+
if not host:
|
| 1016 |
+
raise ValueError('You must set server_hostname '
|
| 1017 |
+
'when using ssl without a host')
|
| 1018 |
+
server_hostname = host
|
| 1019 |
+
|
| 1020 |
+
if ssl_handshake_timeout is not None and not ssl:
|
| 1021 |
+
raise ValueError(
|
| 1022 |
+
'ssl_handshake_timeout is only meaningful with ssl')
|
| 1023 |
+
|
| 1024 |
+
if sock is not None:
|
| 1025 |
+
_check_ssl_socket(sock)
|
| 1026 |
+
|
| 1027 |
+
if happy_eyeballs_delay is not None and interleave is None:
|
| 1028 |
+
# If using happy eyeballs, default to interleave addresses by family
|
| 1029 |
+
interleave = 1
|
| 1030 |
+
|
| 1031 |
+
if host is not None or port is not None:
|
| 1032 |
+
if sock is not None:
|
| 1033 |
+
raise ValueError(
|
| 1034 |
+
'host/port and sock can not be specified at the same time')
|
| 1035 |
+
|
| 1036 |
+
infos = await self._ensure_resolved(
|
| 1037 |
+
(host, port), family=family,
|
| 1038 |
+
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
|
| 1039 |
+
if not infos:
|
| 1040 |
+
raise OSError('getaddrinfo() returned empty list')
|
| 1041 |
+
|
| 1042 |
+
if local_addr is not None:
|
| 1043 |
+
laddr_infos = await self._ensure_resolved(
|
| 1044 |
+
local_addr, family=family,
|
| 1045 |
+
type=socket.SOCK_STREAM, proto=proto,
|
| 1046 |
+
flags=flags, loop=self)
|
| 1047 |
+
if not laddr_infos:
|
| 1048 |
+
raise OSError('getaddrinfo() returned empty list')
|
| 1049 |
+
else:
|
| 1050 |
+
laddr_infos = None
|
| 1051 |
+
|
| 1052 |
+
if interleave:
|
| 1053 |
+
infos = _interleave_addrinfos(infos, interleave)
|
| 1054 |
+
|
| 1055 |
+
exceptions = []
|
| 1056 |
+
if happy_eyeballs_delay is None:
|
| 1057 |
+
# not using happy eyeballs
|
| 1058 |
+
for addrinfo in infos:
|
| 1059 |
+
try:
|
| 1060 |
+
sock = await self._connect_sock(
|
| 1061 |
+
exceptions, addrinfo, laddr_infos)
|
| 1062 |
+
break
|
| 1063 |
+
except OSError:
|
| 1064 |
+
continue
|
| 1065 |
+
else: # using happy eyeballs
|
| 1066 |
+
sock, _, _ = await staggered.staggered_race(
|
| 1067 |
+
(functools.partial(self._connect_sock,
|
| 1068 |
+
exceptions, addrinfo, laddr_infos)
|
| 1069 |
+
for addrinfo in infos),
|
| 1070 |
+
happy_eyeballs_delay, loop=self)
|
| 1071 |
+
|
| 1072 |
+
if sock is None:
|
| 1073 |
+
exceptions = [exc for sub in exceptions for exc in sub]
|
| 1074 |
+
try:
|
| 1075 |
+
if len(exceptions) == 1:
|
| 1076 |
+
raise exceptions[0]
|
| 1077 |
+
else:
|
| 1078 |
+
# If they all have the same str(), raise one.
|
| 1079 |
+
model = str(exceptions[0])
|
| 1080 |
+
if all(str(exc) == model for exc in exceptions):
|
| 1081 |
+
raise exceptions[0]
|
| 1082 |
+
# Raise a combined exception so the user can see all
|
| 1083 |
+
# the various error messages.
|
| 1084 |
+
raise OSError('Multiple exceptions: {}'.format(
|
| 1085 |
+
', '.join(str(exc) for exc in exceptions)))
|
| 1086 |
+
finally:
|
| 1087 |
+
exceptions = None
|
| 1088 |
+
|
| 1089 |
+
else:
|
| 1090 |
+
if sock is None:
|
| 1091 |
+
raise ValueError(
|
| 1092 |
+
'host and port was not specified and no sock specified')
|
| 1093 |
+
if sock.type != socket.SOCK_STREAM:
|
| 1094 |
+
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
|
| 1095 |
+
# are SOCK_STREAM.
|
| 1096 |
+
# We support passing AF_UNIX sockets even though we have
|
| 1097 |
+
# a dedicated API for that: create_unix_connection.
|
| 1098 |
+
# Disallowing AF_UNIX in this method, breaks backwards
|
| 1099 |
+
# compatibility.
|
| 1100 |
+
raise ValueError(
|
| 1101 |
+
f'A Stream Socket was expected, got {sock!r}')
|
| 1102 |
+
|
| 1103 |
+
transport, protocol = await self._create_connection_transport(
|
| 1104 |
+
sock, protocol_factory, ssl, server_hostname,
|
| 1105 |
+
ssl_handshake_timeout=ssl_handshake_timeout)
|
| 1106 |
+
if self._debug:
|
| 1107 |
+
# Get the socket from the transport because SSL transport closes
|
| 1108 |
+
# the old socket and creates a new SSL socket
|
| 1109 |
+
sock = transport.get_extra_info('socket')
|
| 1110 |
+
logger.debug("%r connected to %s:%r: (%r, %r)",
|
| 1111 |
+
sock, host, port, transport, protocol)
|
| 1112 |
+
return transport, protocol
|
| 1113 |
+
|
| 1114 |
+
async def _create_connection_transport(
|
| 1115 |
+
self, sock, protocol_factory, ssl,
|
| 1116 |
+
server_hostname, server_side=False,
|
| 1117 |
+
ssl_handshake_timeout=None):
|
| 1118 |
+
|
| 1119 |
+
sock.setblocking(False)
|
| 1120 |
+
|
| 1121 |
+
protocol = protocol_factory()
|
| 1122 |
+
waiter = self.create_future()
|
| 1123 |
+
if ssl:
|
| 1124 |
+
sslcontext = None if isinstance(ssl, bool) else ssl
|
| 1125 |
+
transport = self._make_ssl_transport(
|
| 1126 |
+
sock, protocol, sslcontext, waiter,
|
| 1127 |
+
server_side=server_side, server_hostname=server_hostname,
|
| 1128 |
+
ssl_handshake_timeout=ssl_handshake_timeout)
|
| 1129 |
+
else:
|
| 1130 |
+
transport = self._make_socket_transport(sock, protocol, waiter)
|
| 1131 |
+
|
| 1132 |
+
try:
|
| 1133 |
+
await waiter
|
| 1134 |
+
except:
|
| 1135 |
+
transport.close()
|
| 1136 |
+
raise
|
| 1137 |
+
|
| 1138 |
+
return transport, protocol
|
| 1139 |
+
|
| 1140 |
+
async def sendfile(self, transport, file, offset=0, count=None,
|
| 1141 |
+
*, fallback=True):
|
| 1142 |
+
"""Send a file to transport.
|
| 1143 |
+
|
| 1144 |
+
Return the total number of bytes which were sent.
|
| 1145 |
+
|
| 1146 |
+
The method uses high-performance os.sendfile if available.
|
| 1147 |
+
|
| 1148 |
+
file must be a regular file object opened in binary mode.
|
| 1149 |
+
|
| 1150 |
+
offset tells from where to start reading the file. If specified,
|
| 1151 |
+
count is the total number of bytes to transmit as opposed to
|
| 1152 |
+
sending the file until EOF is reached. File position is updated on
|
| 1153 |
+
return or also in case of error in which case file.tell()
|
| 1154 |
+
can be used to figure out the number of bytes
|
| 1155 |
+
which were sent.
|
| 1156 |
+
|
| 1157 |
+
fallback set to True makes asyncio to manually read and send
|
| 1158 |
+
the file when the platform does not support the sendfile syscall
|
| 1159 |
+
(e.g. Windows or SSL socket on Unix).
|
| 1160 |
+
|
| 1161 |
+
Raise SendfileNotAvailableError if the system does not support
|
| 1162 |
+
sendfile syscall and fallback is False.
|
| 1163 |
+
"""
|
| 1164 |
+
if transport.is_closing():
|
| 1165 |
+
raise RuntimeError("Transport is closing")
|
| 1166 |
+
mode = getattr(transport, '_sendfile_compatible',
|
| 1167 |
+
constants._SendfileMode.UNSUPPORTED)
|
| 1168 |
+
if mode is constants._SendfileMode.UNSUPPORTED:
|
| 1169 |
+
raise RuntimeError(
|
| 1170 |
+
f"sendfile is not supported for transport {transport!r}")
|
| 1171 |
+
if mode is constants._SendfileMode.TRY_NATIVE:
|
| 1172 |
+
try:
|
| 1173 |
+
return await self._sendfile_native(transport, file,
|
| 1174 |
+
offset, count)
|
| 1175 |
+
except exceptions.SendfileNotAvailableError as exc:
|
| 1176 |
+
if not fallback:
|
| 1177 |
+
raise
|
| 1178 |
+
|
| 1179 |
+
if not fallback:
|
| 1180 |
+
raise RuntimeError(
|
| 1181 |
+
f"fallback is disabled and native sendfile is not "
|
| 1182 |
+
f"supported for transport {transport!r}")
|
| 1183 |
+
|
| 1184 |
+
return await self._sendfile_fallback(transport, file,
|
| 1185 |
+
offset, count)
|
| 1186 |
+
|
| 1187 |
+
async def _sendfile_native(self, transp, file, offset, count):
|
| 1188 |
+
raise exceptions.SendfileNotAvailableError(
|
| 1189 |
+
"sendfile syscall is not supported")
|
| 1190 |
+
|
| 1191 |
+
async def _sendfile_fallback(self, transp, file, offset, count):
|
| 1192 |
+
if offset:
|
| 1193 |
+
file.seek(offset)
|
| 1194 |
+
blocksize = min(count, 16384) if count else 16384
|
| 1195 |
+
buf = bytearray(blocksize)
|
| 1196 |
+
total_sent = 0
|
| 1197 |
+
proto = _SendfileFallbackProtocol(transp)
|
| 1198 |
+
try:
|
| 1199 |
+
while True:
|
| 1200 |
+
if count:
|
| 1201 |
+
blocksize = min(count - total_sent, blocksize)
|
| 1202 |
+
if blocksize <= 0:
|
| 1203 |
+
return total_sent
|
| 1204 |
+
view = memoryview(buf)[:blocksize]
|
| 1205 |
+
read = await self.run_in_executor(None, file.readinto, view)
|
| 1206 |
+
if not read:
|
| 1207 |
+
return total_sent # EOF
|
| 1208 |
+
await proto.drain()
|
| 1209 |
+
transp.write(view[:read])
|
| 1210 |
+
total_sent += read
|
| 1211 |
+
finally:
|
| 1212 |
+
if total_sent > 0 and hasattr(file, 'seek'):
|
| 1213 |
+
file.seek(offset + total_sent)
|
| 1214 |
+
await proto.restore()
|
| 1215 |
+
|
| 1216 |
+
async def start_tls(self, transport, protocol, sslcontext, *,
|
| 1217 |
+
server_side=False,
|
| 1218 |
+
server_hostname=None,
|
| 1219 |
+
ssl_handshake_timeout=None):
|
| 1220 |
+
"""Upgrade transport to TLS.
|
| 1221 |
+
|
| 1222 |
+
Return a new transport that *protocol* should start using
|
| 1223 |
+
immediately.
|
| 1224 |
+
"""
|
| 1225 |
+
if ssl is None:
|
| 1226 |
+
raise RuntimeError('Python ssl module is not available')
|
| 1227 |
+
|
| 1228 |
+
if not isinstance(sslcontext, ssl.SSLContext):
|
| 1229 |
+
raise TypeError(
|
| 1230 |
+
f'sslcontext is expected to be an instance of ssl.SSLContext, '
|
| 1231 |
+
f'got {sslcontext!r}')
|
| 1232 |
+
|
| 1233 |
+
if not getattr(transport, '_start_tls_compatible', False):
|
| 1234 |
+
raise TypeError(
|
| 1235 |
+
f'transport {transport!r} is not supported by start_tls()')
|
| 1236 |
+
|
| 1237 |
+
waiter = self.create_future()
|
| 1238 |
+
ssl_protocol = sslproto.SSLProtocol(
|
| 1239 |
+
self, protocol, sslcontext, waiter,
|
| 1240 |
+
server_side, server_hostname,
|
| 1241 |
+
ssl_handshake_timeout=ssl_handshake_timeout,
|
| 1242 |
+
call_connection_made=False)
|
| 1243 |
+
|
| 1244 |
+
# Pause early so that "ssl_protocol.data_received()" doesn't
|
| 1245 |
+
# have a chance to get called before "ssl_protocol.connection_made()".
|
| 1246 |
+
transport.pause_reading()
|
| 1247 |
+
|
| 1248 |
+
transport.set_protocol(ssl_protocol)
|
| 1249 |
+
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
|
| 1250 |
+
resume_cb = self.call_soon(transport.resume_reading)
|
| 1251 |
+
|
| 1252 |
+
try:
|
| 1253 |
+
await waiter
|
| 1254 |
+
except BaseException:
|
| 1255 |
+
transport.close()
|
| 1256 |
+
conmade_cb.cancel()
|
| 1257 |
+
resume_cb.cancel()
|
| 1258 |
+
raise
|
| 1259 |
+
|
| 1260 |
+
return ssl_protocol._app_transport
|
| 1261 |
+
|
| 1262 |
+
async def create_datagram_endpoint(self, protocol_factory,
|
| 1263 |
+
local_addr=None, remote_addr=None, *,
|
| 1264 |
+
family=0, proto=0, flags=0,
|
| 1265 |
+
reuse_address=_unset, reuse_port=None,
|
| 1266 |
+
allow_broadcast=None, sock=None):
|
| 1267 |
+
"""Create datagram connection."""
|
| 1268 |
+
if sock is not None:
|
| 1269 |
+
if sock.type != socket.SOCK_DGRAM:
|
| 1270 |
+
raise ValueError(
|
| 1271 |
+
f'A UDP Socket was expected, got {sock!r}')
|
| 1272 |
+
if (local_addr or remote_addr or
|
| 1273 |
+
family or proto or flags or
|
| 1274 |
+
reuse_port or allow_broadcast):
|
| 1275 |
+
# show the problematic kwargs in exception msg
|
| 1276 |
+
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
|
| 1277 |
+
family=family, proto=proto, flags=flags,
|
| 1278 |
+
reuse_address=reuse_address, reuse_port=reuse_port,
|
| 1279 |
+
allow_broadcast=allow_broadcast)
|
| 1280 |
+
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
|
| 1281 |
+
raise ValueError(
|
| 1282 |
+
f'socket modifier keyword arguments can not be used '
|
| 1283 |
+
f'when sock is specified. ({problems})')
|
| 1284 |
+
sock.setblocking(False)
|
| 1285 |
+
r_addr = None
|
| 1286 |
+
else:
|
| 1287 |
+
if not (local_addr or remote_addr):
|
| 1288 |
+
if family == 0:
|
| 1289 |
+
raise ValueError('unexpected address family')
|
| 1290 |
+
addr_pairs_info = (((family, proto), (None, None)),)
|
| 1291 |
+
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
|
| 1292 |
+
for addr in (local_addr, remote_addr):
|
| 1293 |
+
if addr is not None and not isinstance(addr, str):
|
| 1294 |
+
raise TypeError('string is expected')
|
| 1295 |
+
|
| 1296 |
+
if local_addr and local_addr[0] not in (0, '\x00'):
|
| 1297 |
+
try:
|
| 1298 |
+
if stat.S_ISSOCK(os.stat(local_addr).st_mode):
|
| 1299 |
+
os.remove(local_addr)
|
| 1300 |
+
except FileNotFoundError:
|
| 1301 |
+
pass
|
| 1302 |
+
except OSError as err:
|
| 1303 |
+
# Directory may have permissions only to create socket.
|
| 1304 |
+
logger.error('Unable to check or remove stale UNIX '
|
| 1305 |
+
'socket %r: %r',
|
| 1306 |
+
local_addr, err)
|
| 1307 |
+
|
| 1308 |
+
addr_pairs_info = (((family, proto),
|
| 1309 |
+
(local_addr, remote_addr)), )
|
| 1310 |
+
else:
|
| 1311 |
+
# join address by (family, protocol)
|
| 1312 |
+
addr_infos = {} # Using order preserving dict
|
| 1313 |
+
for idx, addr in ((0, local_addr), (1, remote_addr)):
|
| 1314 |
+
if addr is not None:
|
| 1315 |
+
assert isinstance(addr, tuple) and len(addr) == 2, (
|
| 1316 |
+
'2-tuple is expected')
|
| 1317 |
+
|
| 1318 |
+
infos = await self._ensure_resolved(
|
| 1319 |
+
addr, family=family, type=socket.SOCK_DGRAM,
|
| 1320 |
+
proto=proto, flags=flags, loop=self)
|
| 1321 |
+
if not infos:
|
| 1322 |
+
raise OSError('getaddrinfo() returned empty list')
|
| 1323 |
+
|
| 1324 |
+
for fam, _, pro, _, address in infos:
|
| 1325 |
+
key = (fam, pro)
|
| 1326 |
+
if key not in addr_infos:
|
| 1327 |
+
addr_infos[key] = [None, None]
|
| 1328 |
+
addr_infos[key][idx] = address
|
| 1329 |
+
|
| 1330 |
+
# each addr has to have info for each (family, proto) pair
|
| 1331 |
+
addr_pairs_info = [
|
| 1332 |
+
(key, addr_pair) for key, addr_pair in addr_infos.items()
|
| 1333 |
+
if not ((local_addr and addr_pair[0] is None) or
|
| 1334 |
+
(remote_addr and addr_pair[1] is None))]
|
| 1335 |
+
|
| 1336 |
+
if not addr_pairs_info:
|
| 1337 |
+
raise ValueError('can not get address information')
|
| 1338 |
+
|
| 1339 |
+
exceptions = []
|
| 1340 |
+
|
| 1341 |
+
# bpo-37228
|
| 1342 |
+
if reuse_address is not _unset:
|
| 1343 |
+
if reuse_address:
|
| 1344 |
+
raise ValueError("Passing `reuse_address=True` is no "
|
| 1345 |
+
"longer supported, as the usage of "
|
| 1346 |
+
"SO_REUSEPORT in UDP poses a significant "
|
| 1347 |
+
"security concern.")
|
| 1348 |
+
else:
|
| 1349 |
+
warnings.warn("The *reuse_address* parameter has been "
|
| 1350 |
+
"deprecated as of 3.5.10 and is scheduled "
|
| 1351 |
+
"for removal in 3.11.", DeprecationWarning,
|
| 1352 |
+
stacklevel=2)
|
| 1353 |
+
|
| 1354 |
+
for ((family, proto),
|
| 1355 |
+
(local_address, remote_address)) in addr_pairs_info:
|
| 1356 |
+
sock = None
|
| 1357 |
+
r_addr = None
|
| 1358 |
+
try:
|
| 1359 |
+
sock = socket.socket(
|
| 1360 |
+
family=family, type=socket.SOCK_DGRAM, proto=proto)
|
| 1361 |
+
if reuse_port:
|
| 1362 |
+
_set_reuseport(sock)
|
| 1363 |
+
if allow_broadcast:
|
| 1364 |
+
sock.setsockopt(
|
| 1365 |
+
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
|
| 1366 |
+
sock.setblocking(False)
|
| 1367 |
+
|
| 1368 |
+
if local_addr:
|
| 1369 |
+
sock.bind(local_address)
|
| 1370 |
+
if remote_addr:
|
| 1371 |
+
if not allow_broadcast:
|
| 1372 |
+
await self.sock_connect(sock, remote_address)
|
| 1373 |
+
r_addr = remote_address
|
| 1374 |
+
except OSError as exc:
|
| 1375 |
+
if sock is not None:
|
| 1376 |
+
sock.close()
|
| 1377 |
+
exceptions.append(exc)
|
| 1378 |
+
except:
|
| 1379 |
+
if sock is not None:
|
| 1380 |
+
sock.close()
|
| 1381 |
+
raise
|
| 1382 |
+
else:
|
| 1383 |
+
break
|
| 1384 |
+
else:
|
| 1385 |
+
raise exceptions[0]
|
| 1386 |
+
|
| 1387 |
+
protocol = protocol_factory()
|
| 1388 |
+
waiter = self.create_future()
|
| 1389 |
+
transport = self._make_datagram_transport(
|
| 1390 |
+
sock, protocol, r_addr, waiter)
|
| 1391 |
+
if self._debug:
|
| 1392 |
+
if local_addr:
|
| 1393 |
+
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
|
| 1394 |
+
"created: (%r, %r)",
|
| 1395 |
+
local_addr, remote_addr, transport, protocol)
|
| 1396 |
+
else:
|
| 1397 |
+
logger.debug("Datagram endpoint remote_addr=%r created: "
|
| 1398 |
+
"(%r, %r)",
|
| 1399 |
+
remote_addr, transport, protocol)
|
| 1400 |
+
|
| 1401 |
+
try:
|
| 1402 |
+
await waiter
|
| 1403 |
+
except:
|
| 1404 |
+
transport.close()
|
| 1405 |
+
raise
|
| 1406 |
+
|
| 1407 |
+
return transport, protocol
|
| 1408 |
+
|
| 1409 |
+
async def _ensure_resolved(self, address, *,
|
| 1410 |
+
family=0, type=socket.SOCK_STREAM,
|
| 1411 |
+
proto=0, flags=0, loop):
|
| 1412 |
+
host, port = address[:2]
|
| 1413 |
+
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
|
| 1414 |
+
if info is not None:
|
| 1415 |
+
# "host" is already a resolved IP.
|
| 1416 |
+
return [info]
|
| 1417 |
+
else:
|
| 1418 |
+
return await loop.getaddrinfo(host, port, family=family, type=type,
|
| 1419 |
+
proto=proto, flags=flags)
|
| 1420 |
+
|
| 1421 |
+
async def _create_server_getaddrinfo(self, host, port, family, flags):
|
| 1422 |
+
infos = await self._ensure_resolved((host, port), family=family,
|
| 1423 |
+
type=socket.SOCK_STREAM,
|
| 1424 |
+
flags=flags, loop=self)
|
| 1425 |
+
if not infos:
|
| 1426 |
+
raise OSError(f'getaddrinfo({host!r}) returned empty list')
|
| 1427 |
+
return infos
|
| 1428 |
+
|
| 1429 |
+
async def create_server(
|
| 1430 |
+
self, protocol_factory, host=None, port=None,
|
| 1431 |
+
*,
|
| 1432 |
+
family=socket.AF_UNSPEC,
|
| 1433 |
+
flags=socket.AI_PASSIVE,
|
| 1434 |
+
sock=None,
|
| 1435 |
+
backlog=100,
|
| 1436 |
+
ssl=None,
|
| 1437 |
+
reuse_address=None,
|
| 1438 |
+
reuse_port=None,
|
| 1439 |
+
ssl_handshake_timeout=None,
|
| 1440 |
+
start_serving=True):
|
| 1441 |
+
"""Create a TCP server.
|
| 1442 |
+
|
| 1443 |
+
The host parameter can be a string, in that case the TCP server is
|
| 1444 |
+
bound to host and port.
|
| 1445 |
+
|
| 1446 |
+
The host parameter can also be a sequence of strings and in that case
|
| 1447 |
+
the TCP server is bound to all hosts of the sequence. If a host
|
| 1448 |
+
appears multiple times (possibly indirectly e.g. when hostnames
|
| 1449 |
+
resolve to the same IP address), the server is only bound once to that
|
| 1450 |
+
host.
|
| 1451 |
+
|
| 1452 |
+
Return a Server object which can be used to stop the service.
|
| 1453 |
+
|
| 1454 |
+
This method is a coroutine.
|
| 1455 |
+
"""
|
| 1456 |
+
if isinstance(ssl, bool):
|
| 1457 |
+
raise TypeError('ssl argument must be an SSLContext or None')
|
| 1458 |
+
|
| 1459 |
+
if ssl_handshake_timeout is not None and ssl is None:
|
| 1460 |
+
raise ValueError(
|
| 1461 |
+
'ssl_handshake_timeout is only meaningful with ssl')
|
| 1462 |
+
|
| 1463 |
+
if sock is not None:
|
| 1464 |
+
_check_ssl_socket(sock)
|
| 1465 |
+
|
| 1466 |
+
if host is not None or port is not None:
|
| 1467 |
+
if sock is not None:
|
| 1468 |
+
raise ValueError(
|
| 1469 |
+
'host/port and sock can not be specified at the same time')
|
| 1470 |
+
|
| 1471 |
+
if reuse_address is None:
|
| 1472 |
+
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
|
| 1473 |
+
sockets = []
|
| 1474 |
+
if host == '':
|
| 1475 |
+
hosts = [None]
|
| 1476 |
+
elif (isinstance(host, str) or
|
| 1477 |
+
not isinstance(host, collections.abc.Iterable)):
|
| 1478 |
+
hosts = [host]
|
| 1479 |
+
else:
|
| 1480 |
+
hosts = host
|
| 1481 |
+
|
| 1482 |
+
fs = [self._create_server_getaddrinfo(host, port, family=family,
|
| 1483 |
+
flags=flags)
|
| 1484 |
+
for host in hosts]
|
| 1485 |
+
infos = await tasks.gather(*fs)
|
| 1486 |
+
infos = set(itertools.chain.from_iterable(infos))
|
| 1487 |
+
|
| 1488 |
+
completed = False
|
| 1489 |
+
try:
|
| 1490 |
+
for res in infos:
|
| 1491 |
+
af, socktype, proto, canonname, sa = res
|
| 1492 |
+
try:
|
| 1493 |
+
sock = socket.socket(af, socktype, proto)
|
| 1494 |
+
except socket.error:
|
| 1495 |
+
# Assume it's a bad family/type/protocol combination.
|
| 1496 |
+
if self._debug:
|
| 1497 |
+
logger.warning('create_server() failed to create '
|
| 1498 |
+
'socket.socket(%r, %r, %r)',
|
| 1499 |
+
af, socktype, proto, exc_info=True)
|
| 1500 |
+
continue
|
| 1501 |
+
sockets.append(sock)
|
| 1502 |
+
if reuse_address:
|
| 1503 |
+
sock.setsockopt(
|
| 1504 |
+
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
|
| 1505 |
+
if reuse_port:
|
| 1506 |
+
_set_reuseport(sock)
|
| 1507 |
+
# Disable IPv4/IPv6 dual stack support (enabled by
|
| 1508 |
+
# default on Linux) which makes a single socket
|
| 1509 |
+
# listen on both address families.
|
| 1510 |
+
if (_HAS_IPv6 and
|
| 1511 |
+
af == socket.AF_INET6 and
|
| 1512 |
+
hasattr(socket, 'IPPROTO_IPV6')):
|
| 1513 |
+
sock.setsockopt(socket.IPPROTO_IPV6,
|
| 1514 |
+
socket.IPV6_V6ONLY,
|
| 1515 |
+
True)
|
| 1516 |
+
try:
|
| 1517 |
+
sock.bind(sa)
|
| 1518 |
+
except OSError as err:
|
| 1519 |
+
raise OSError(err.errno, 'error while attempting '
|
| 1520 |
+
'to bind on address %r: %s'
|
| 1521 |
+
% (sa, err.strerror.lower())) from None
|
| 1522 |
+
completed = True
|
| 1523 |
+
finally:
|
| 1524 |
+
if not completed:
|
| 1525 |
+
for sock in sockets:
|
| 1526 |
+
sock.close()
|
| 1527 |
+
else:
|
| 1528 |
+
if sock is None:
|
| 1529 |
+
raise ValueError('Neither host/port nor sock were specified')
|
| 1530 |
+
if sock.type != socket.SOCK_STREAM:
|
| 1531 |
+
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
|
| 1532 |
+
sockets = [sock]
|
| 1533 |
+
|
| 1534 |
+
for sock in sockets:
|
| 1535 |
+
sock.setblocking(False)
|
| 1536 |
+
|
| 1537 |
+
server = Server(self, sockets, protocol_factory,
|
| 1538 |
+
ssl, backlog, ssl_handshake_timeout)
|
| 1539 |
+
if start_serving:
|
| 1540 |
+
server._start_serving()
|
| 1541 |
+
# Skip one loop iteration so that all 'loop.add_reader'
|
| 1542 |
+
# go through.
|
| 1543 |
+
await tasks.sleep(0)
|
| 1544 |
+
|
| 1545 |
+
if self._debug:
|
| 1546 |
+
logger.info("%r is serving", server)
|
| 1547 |
+
return server
|
| 1548 |
+
|
| 1549 |
+
async def connect_accepted_socket(
|
| 1550 |
+
self, protocol_factory, sock,
|
| 1551 |
+
*, ssl=None,
|
| 1552 |
+
ssl_handshake_timeout=None):
|
| 1553 |
+
if sock.type != socket.SOCK_STREAM:
|
| 1554 |
+
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
|
| 1555 |
+
|
| 1556 |
+
if ssl_handshake_timeout is not None and not ssl:
|
| 1557 |
+
raise ValueError(
|
| 1558 |
+
'ssl_handshake_timeout is only meaningful with ssl')
|
| 1559 |
+
|
| 1560 |
+
if sock is not None:
|
| 1561 |
+
_check_ssl_socket(sock)
|
| 1562 |
+
|
| 1563 |
+
transport, protocol = await self._create_connection_transport(
|
| 1564 |
+
sock, protocol_factory, ssl, '', server_side=True,
|
| 1565 |
+
ssl_handshake_timeout=ssl_handshake_timeout)
|
| 1566 |
+
if self._debug:
|
| 1567 |
+
# Get the socket from the transport because SSL transport closes
|
| 1568 |
+
# the old socket and creates a new SSL socket
|
| 1569 |
+
sock = transport.get_extra_info('socket')
|
| 1570 |
+
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
|
| 1571 |
+
return transport, protocol
|
| 1572 |
+
|
| 1573 |
+
async def connect_read_pipe(self, protocol_factory, pipe):
|
| 1574 |
+
protocol = protocol_factory()
|
| 1575 |
+
waiter = self.create_future()
|
| 1576 |
+
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
|
| 1577 |
+
|
| 1578 |
+
try:
|
| 1579 |
+
await waiter
|
| 1580 |
+
except:
|
| 1581 |
+
transport.close()
|
| 1582 |
+
raise
|
| 1583 |
+
|
| 1584 |
+
if self._debug:
|
| 1585 |
+
logger.debug('Read pipe %r connected: (%r, %r)',
|
| 1586 |
+
pipe.fileno(), transport, protocol)
|
| 1587 |
+
return transport, protocol
|
| 1588 |
+
|
| 1589 |
+
async def connect_write_pipe(self, protocol_factory, pipe):
|
| 1590 |
+
protocol = protocol_factory()
|
| 1591 |
+
waiter = self.create_future()
|
| 1592 |
+
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
|
| 1593 |
+
|
| 1594 |
+
try:
|
| 1595 |
+
await waiter
|
| 1596 |
+
except:
|
| 1597 |
+
transport.close()
|
| 1598 |
+
raise
|
| 1599 |
+
|
| 1600 |
+
if self._debug:
|
| 1601 |
+
logger.debug('Write pipe %r connected: (%r, %r)',
|
| 1602 |
+
pipe.fileno(), transport, protocol)
|
| 1603 |
+
return transport, protocol
|
| 1604 |
+
|
| 1605 |
+
def _log_subprocess(self, msg, stdin, stdout, stderr):
|
| 1606 |
+
info = [msg]
|
| 1607 |
+
if stdin is not None:
|
| 1608 |
+
info.append(f'stdin={_format_pipe(stdin)}')
|
| 1609 |
+
if stdout is not None and stderr == subprocess.STDOUT:
|
| 1610 |
+
info.append(f'stdout=stderr={_format_pipe(stdout)}')
|
| 1611 |
+
else:
|
| 1612 |
+
if stdout is not None:
|
| 1613 |
+
info.append(f'stdout={_format_pipe(stdout)}')
|
| 1614 |
+
if stderr is not None:
|
| 1615 |
+
info.append(f'stderr={_format_pipe(stderr)}')
|
| 1616 |
+
logger.debug(' '.join(info))
|
| 1617 |
+
|
| 1618 |
+
async def subprocess_shell(self, protocol_factory, cmd, *,
|
| 1619 |
+
stdin=subprocess.PIPE,
|
| 1620 |
+
stdout=subprocess.PIPE,
|
| 1621 |
+
stderr=subprocess.PIPE,
|
| 1622 |
+
universal_newlines=False,
|
| 1623 |
+
shell=True, bufsize=0,
|
| 1624 |
+
encoding=None, errors=None, text=None,
|
| 1625 |
+
**kwargs):
|
| 1626 |
+
if not isinstance(cmd, (bytes, str)):
|
| 1627 |
+
raise ValueError("cmd must be a string")
|
| 1628 |
+
if universal_newlines:
|
| 1629 |
+
raise ValueError("universal_newlines must be False")
|
| 1630 |
+
if not shell:
|
| 1631 |
+
raise ValueError("shell must be True")
|
| 1632 |
+
if bufsize != 0:
|
| 1633 |
+
raise ValueError("bufsize must be 0")
|
| 1634 |
+
if text:
|
| 1635 |
+
raise ValueError("text must be False")
|
| 1636 |
+
if encoding is not None:
|
| 1637 |
+
raise ValueError("encoding must be None")
|
| 1638 |
+
if errors is not None:
|
| 1639 |
+
raise ValueError("errors must be None")
|
| 1640 |
+
|
| 1641 |
+
protocol = protocol_factory()
|
| 1642 |
+
debug_log = None
|
| 1643 |
+
if self._debug:
|
| 1644 |
+
# don't log parameters: they may contain sensitive information
|
| 1645 |
+
# (password) and may be too long
|
| 1646 |
+
debug_log = 'run shell command %r' % cmd
|
| 1647 |
+
self._log_subprocess(debug_log, stdin, stdout, stderr)
|
| 1648 |
+
transport = await self._make_subprocess_transport(
|
| 1649 |
+
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
|
| 1650 |
+
if self._debug and debug_log is not None:
|
| 1651 |
+
logger.info('%s: %r', debug_log, transport)
|
| 1652 |
+
return transport, protocol
|
| 1653 |
+
|
| 1654 |
+
async def subprocess_exec(self, protocol_factory, program, *args,
|
| 1655 |
+
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
|
| 1656 |
+
stderr=subprocess.PIPE, universal_newlines=False,
|
| 1657 |
+
shell=False, bufsize=0,
|
| 1658 |
+
encoding=None, errors=None, text=None,
|
| 1659 |
+
**kwargs):
|
| 1660 |
+
if universal_newlines:
|
| 1661 |
+
raise ValueError("universal_newlines must be False")
|
| 1662 |
+
if shell:
|
| 1663 |
+
raise ValueError("shell must be False")
|
| 1664 |
+
if bufsize != 0:
|
| 1665 |
+
raise ValueError("bufsize must be 0")
|
| 1666 |
+
if text:
|
| 1667 |
+
raise ValueError("text must be False")
|
| 1668 |
+
if encoding is not None:
|
| 1669 |
+
raise ValueError("encoding must be None")
|
| 1670 |
+
if errors is not None:
|
| 1671 |
+
raise ValueError("errors must be None")
|
| 1672 |
+
|
| 1673 |
+
popen_args = (program,) + args
|
| 1674 |
+
protocol = protocol_factory()
|
| 1675 |
+
debug_log = None
|
| 1676 |
+
if self._debug:
|
| 1677 |
+
# don't log parameters: they may contain sensitive information
|
| 1678 |
+
# (password) and may be too long
|
| 1679 |
+
debug_log = f'execute program {program!r}'
|
| 1680 |
+
self._log_subprocess(debug_log, stdin, stdout, stderr)
|
| 1681 |
+
transport = await self._make_subprocess_transport(
|
| 1682 |
+
protocol, popen_args, False, stdin, stdout, stderr,
|
| 1683 |
+
bufsize, **kwargs)
|
| 1684 |
+
if self._debug and debug_log is not None:
|
| 1685 |
+
logger.info('%s: %r', debug_log, transport)
|
| 1686 |
+
return transport, protocol
|
| 1687 |
+
|
| 1688 |
+
def get_exception_handler(self):
|
| 1689 |
+
"""Return an exception handler, or None if the default one is in use.
|
| 1690 |
+
"""
|
| 1691 |
+
return self._exception_handler
|
| 1692 |
+
|
| 1693 |
+
def set_exception_handler(self, handler):
|
| 1694 |
+
"""Set handler as the new event loop exception handler.
|
| 1695 |
+
|
| 1696 |
+
If handler is None, the default exception handler will
|
| 1697 |
+
be set.
|
| 1698 |
+
|
| 1699 |
+
If handler is a callable object, it should have a
|
| 1700 |
+
signature matching '(loop, context)', where 'loop'
|
| 1701 |
+
will be a reference to the active event loop, 'context'
|
| 1702 |
+
will be a dict object (see `call_exception_handler()`
|
| 1703 |
+
documentation for details about context).
|
| 1704 |
+
"""
|
| 1705 |
+
if handler is not None and not callable(handler):
|
| 1706 |
+
raise TypeError(f'A callable object or None is expected, '
|
| 1707 |
+
f'got {handler!r}')
|
| 1708 |
+
self._exception_handler = handler
|
| 1709 |
+
|
| 1710 |
+
def default_exception_handler(self, context):
|
| 1711 |
+
"""Default exception handler.
|
| 1712 |
+
|
| 1713 |
+
This is called when an exception occurs and no exception
|
| 1714 |
+
handler is set, and can be called by a custom exception
|
| 1715 |
+
handler that wants to defer to the default behavior.
|
| 1716 |
+
|
| 1717 |
+
This default handler logs the error message and other
|
| 1718 |
+
context-dependent information. In debug mode, a truncated
|
| 1719 |
+
stack trace is also appended showing where the given object
|
| 1720 |
+
(e.g. a handle or future or task) was created, if any.
|
| 1721 |
+
|
| 1722 |
+
The context parameter has the same meaning as in
|
| 1723 |
+
`call_exception_handler()`.
|
| 1724 |
+
"""
|
| 1725 |
+
message = context.get('message')
|
| 1726 |
+
if not message:
|
| 1727 |
+
message = 'Unhandled exception in event loop'
|
| 1728 |
+
|
| 1729 |
+
exception = context.get('exception')
|
| 1730 |
+
if exception is not None:
|
| 1731 |
+
exc_info = (type(exception), exception, exception.__traceback__)
|
| 1732 |
+
else:
|
| 1733 |
+
exc_info = False
|
| 1734 |
+
|
| 1735 |
+
if ('source_traceback' not in context and
|
| 1736 |
+
self._current_handle is not None and
|
| 1737 |
+
self._current_handle._source_traceback):
|
| 1738 |
+
context['handle_traceback'] = \
|
| 1739 |
+
self._current_handle._source_traceback
|
| 1740 |
+
|
| 1741 |
+
log_lines = [message]
|
| 1742 |
+
for key in sorted(context):
|
| 1743 |
+
if key in {'message', 'exception'}:
|
| 1744 |
+
continue
|
| 1745 |
+
value = context[key]
|
| 1746 |
+
if key == 'source_traceback':
|
| 1747 |
+
tb = ''.join(traceback.format_list(value))
|
| 1748 |
+
value = 'Object created at (most recent call last):\n'
|
| 1749 |
+
value += tb.rstrip()
|
| 1750 |
+
elif key == 'handle_traceback':
|
| 1751 |
+
tb = ''.join(traceback.format_list(value))
|
| 1752 |
+
value = 'Handle created at (most recent call last):\n'
|
| 1753 |
+
value += tb.rstrip()
|
| 1754 |
+
else:
|
| 1755 |
+
value = repr(value)
|
| 1756 |
+
log_lines.append(f'{key}: {value}')
|
| 1757 |
+
|
| 1758 |
+
logger.error('\n'.join(log_lines), exc_info=exc_info)
|
| 1759 |
+
|
| 1760 |
+
def call_exception_handler(self, context):
|
| 1761 |
+
"""Call the current event loop's exception handler.
|
| 1762 |
+
|
| 1763 |
+
The context argument is a dict containing the following keys:
|
| 1764 |
+
|
| 1765 |
+
- 'message': Error message;
|
| 1766 |
+
- 'exception' (optional): Exception object;
|
| 1767 |
+
- 'future' (optional): Future instance;
|
| 1768 |
+
- 'task' (optional): Task instance;
|
| 1769 |
+
- 'handle' (optional): Handle instance;
|
| 1770 |
+
- 'protocol' (optional): Protocol instance;
|
| 1771 |
+
- 'transport' (optional): Transport instance;
|
| 1772 |
+
- 'socket' (optional): Socket instance;
|
| 1773 |
+
- 'asyncgen' (optional): Asynchronous generator that caused
|
| 1774 |
+
the exception.
|
| 1775 |
+
|
| 1776 |
+
New keys maybe introduced in the future.
|
| 1777 |
+
|
| 1778 |
+
Note: do not overload this method in an event loop subclass.
|
| 1779 |
+
For custom exception handling, use the
|
| 1780 |
+
`set_exception_handler()` method.
|
| 1781 |
+
"""
|
| 1782 |
+
if self._exception_handler is None:
|
| 1783 |
+
try:
|
| 1784 |
+
self.default_exception_handler(context)
|
| 1785 |
+
except (SystemExit, KeyboardInterrupt):
|
| 1786 |
+
raise
|
| 1787 |
+
except BaseException:
|
| 1788 |
+
# Second protection layer for unexpected errors
|
| 1789 |
+
# in the default implementation, as well as for subclassed
|
| 1790 |
+
# event loops with overloaded "default_exception_handler".
|
| 1791 |
+
logger.error('Exception in default exception handler',
|
| 1792 |
+
exc_info=True)
|
| 1793 |
+
else:
|
| 1794 |
+
try:
|
| 1795 |
+
self._exception_handler(self, context)
|
| 1796 |
+
except (SystemExit, KeyboardInterrupt):
|
| 1797 |
+
raise
|
| 1798 |
+
except BaseException as exc:
|
| 1799 |
+
# Exception in the user set custom exception handler.
|
| 1800 |
+
try:
|
| 1801 |
+
# Let's try default handler.
|
| 1802 |
+
self.default_exception_handler({
|
| 1803 |
+
'message': 'Unhandled error in exception handler',
|
| 1804 |
+
'exception': exc,
|
| 1805 |
+
'context': context,
|
| 1806 |
+
})
|
| 1807 |
+
except (SystemExit, KeyboardInterrupt):
|
| 1808 |
+
raise
|
| 1809 |
+
except BaseException:
|
| 1810 |
+
# Guard 'default_exception_handler' in case it is
|
| 1811 |
+
# overloaded.
|
| 1812 |
+
logger.error('Exception in default exception handler '
|
| 1813 |
+
'while handling an unexpected error '
|
| 1814 |
+
'in custom exception handler',
|
| 1815 |
+
exc_info=True)
|
| 1816 |
+
|
| 1817 |
+
def _add_callback(self, handle):
|
| 1818 |
+
"""Add a Handle to _ready."""
|
| 1819 |
+
if not handle._cancelled:
|
| 1820 |
+
self._ready.append(handle)
|
| 1821 |
+
|
| 1822 |
+
def _add_callback_signalsafe(self, handle):
|
| 1823 |
+
"""Like _add_callback() but called from a signal handler."""
|
| 1824 |
+
self._add_callback(handle)
|
| 1825 |
+
self._write_to_self()
|
| 1826 |
+
|
| 1827 |
+
def _timer_handle_cancelled(self, handle):
|
| 1828 |
+
"""Notification that a TimerHandle has been cancelled."""
|
| 1829 |
+
if handle._scheduled:
|
| 1830 |
+
self._timer_cancelled_count += 1
|
| 1831 |
+
|
| 1832 |
+
def _run_once(self):
|
| 1833 |
+
"""Run one full iteration of the event loop.
|
| 1834 |
+
|
| 1835 |
+
This calls all currently ready callbacks, polls for I/O,
|
| 1836 |
+
schedules the resulting callbacks, and finally schedules
|
| 1837 |
+
'call_later' callbacks.
|
| 1838 |
+
"""
|
| 1839 |
+
|
| 1840 |
+
sched_count = len(self._scheduled)
|
| 1841 |
+
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
|
| 1842 |
+
self._timer_cancelled_count / sched_count >
|
| 1843 |
+
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
|
| 1844 |
+
# Remove delayed calls that were cancelled if their number
|
| 1845 |
+
# is too high
|
| 1846 |
+
new_scheduled = []
|
| 1847 |
+
for handle in self._scheduled:
|
| 1848 |
+
if handle._cancelled:
|
| 1849 |
+
handle._scheduled = False
|
| 1850 |
+
else:
|
| 1851 |
+
new_scheduled.append(handle)
|
| 1852 |
+
|
| 1853 |
+
heapq.heapify(new_scheduled)
|
| 1854 |
+
self._scheduled = new_scheduled
|
| 1855 |
+
self._timer_cancelled_count = 0
|
| 1856 |
+
else:
|
| 1857 |
+
# Remove delayed calls that were cancelled from head of queue.
|
| 1858 |
+
while self._scheduled and self._scheduled[0]._cancelled:
|
| 1859 |
+
self._timer_cancelled_count -= 1
|
| 1860 |
+
handle = heapq.heappop(self._scheduled)
|
| 1861 |
+
handle._scheduled = False
|
| 1862 |
+
|
| 1863 |
+
timeout = None
|
| 1864 |
+
if self._ready or self._stopping:
|
| 1865 |
+
timeout = 0
|
| 1866 |
+
elif self._scheduled:
|
| 1867 |
+
# Compute the desired timeout.
|
| 1868 |
+
when = self._scheduled[0]._when
|
| 1869 |
+
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
|
| 1870 |
+
|
| 1871 |
+
event_list = self._selector.select(timeout)
|
| 1872 |
+
self._process_events(event_list)
|
| 1873 |
+
# Needed to break cycles when an exception occurs.
|
| 1874 |
+
event_list = None
|
| 1875 |
+
|
| 1876 |
+
# Handle 'later' callbacks that are ready.
|
| 1877 |
+
end_time = self.time() + self._clock_resolution
|
| 1878 |
+
while self._scheduled:
|
| 1879 |
+
handle = self._scheduled[0]
|
| 1880 |
+
if handle._when >= end_time:
|
| 1881 |
+
break
|
| 1882 |
+
handle = heapq.heappop(self._scheduled)
|
| 1883 |
+
handle._scheduled = False
|
| 1884 |
+
self._ready.append(handle)
|
| 1885 |
+
|
| 1886 |
+
# This is the only place where callbacks are actually *called*.
|
| 1887 |
+
# All other places just add them to ready.
|
| 1888 |
+
# Note: We run all currently scheduled callbacks, but not any
|
| 1889 |
+
# callbacks scheduled by callbacks run this time around --
|
| 1890 |
+
# they will be run the next time (after another I/O poll).
|
| 1891 |
+
# Use an idiom that is thread-safe without using locks.
|
| 1892 |
+
ntodo = len(self._ready)
|
| 1893 |
+
for i in range(ntodo):
|
| 1894 |
+
handle = self._ready.popleft()
|
| 1895 |
+
if handle._cancelled:
|
| 1896 |
+
continue
|
| 1897 |
+
if self._debug:
|
| 1898 |
+
try:
|
| 1899 |
+
self._current_handle = handle
|
| 1900 |
+
t0 = self.time()
|
| 1901 |
+
handle._run()
|
| 1902 |
+
dt = self.time() - t0
|
| 1903 |
+
if dt >= self.slow_callback_duration:
|
| 1904 |
+
logger.warning('Executing %s took %.3f seconds',
|
| 1905 |
+
_format_handle(handle), dt)
|
| 1906 |
+
finally:
|
| 1907 |
+
self._current_handle = None
|
| 1908 |
+
else:
|
| 1909 |
+
handle._run()
|
| 1910 |
+
handle = None # Needed to break cycles when an exception occurs.
|
| 1911 |
+
|
| 1912 |
+
def _set_coroutine_origin_tracking(self, enabled):
|
| 1913 |
+
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
|
| 1914 |
+
return
|
| 1915 |
+
|
| 1916 |
+
if enabled:
|
| 1917 |
+
self._coroutine_origin_tracking_saved_depth = (
|
| 1918 |
+
sys.get_coroutine_origin_tracking_depth())
|
| 1919 |
+
sys.set_coroutine_origin_tracking_depth(
|
| 1920 |
+
constants.DEBUG_STACK_DEPTH)
|
| 1921 |
+
else:
|
| 1922 |
+
sys.set_coroutine_origin_tracking_depth(
|
| 1923 |
+
self._coroutine_origin_tracking_saved_depth)
|
| 1924 |
+
|
| 1925 |
+
self._coroutine_origin_tracking_enabled = enabled
|
| 1926 |
+
|
| 1927 |
+
def get_debug(self):
|
| 1928 |
+
return self._debug
|
| 1929 |
+
|
| 1930 |
+
def set_debug(self, enabled):
|
| 1931 |
+
self._debug = enabled
|
| 1932 |
+
|
| 1933 |
+
if self.is_running():
|
| 1934 |
+
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
|
omnilmm/lib/python3.10/asyncio/base_futures.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__ = ()
|
| 2 |
+
|
| 3 |
+
import reprlib
|
| 4 |
+
from _thread import get_ident
|
| 5 |
+
|
| 6 |
+
from . import format_helpers
|
| 7 |
+
|
| 8 |
+
# States for Future.
|
| 9 |
+
_PENDING = 'PENDING'
|
| 10 |
+
_CANCELLED = 'CANCELLED'
|
| 11 |
+
_FINISHED = 'FINISHED'
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def isfuture(obj):
|
| 15 |
+
"""Check for a Future.
|
| 16 |
+
|
| 17 |
+
This returns True when obj is a Future instance or is advertising
|
| 18 |
+
itself as duck-type compatible by setting _asyncio_future_blocking.
|
| 19 |
+
See comment in Future for more details.
|
| 20 |
+
"""
|
| 21 |
+
return (hasattr(obj.__class__, '_asyncio_future_blocking') and
|
| 22 |
+
obj._asyncio_future_blocking is not None)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _format_callbacks(cb):
|
| 26 |
+
"""helper function for Future.__repr__"""
|
| 27 |
+
size = len(cb)
|
| 28 |
+
if not size:
|
| 29 |
+
cb = ''
|
| 30 |
+
|
| 31 |
+
def format_cb(callback):
|
| 32 |
+
return format_helpers._format_callback_source(callback, ())
|
| 33 |
+
|
| 34 |
+
if size == 1:
|
| 35 |
+
cb = format_cb(cb[0][0])
|
| 36 |
+
elif size == 2:
|
| 37 |
+
cb = '{}, {}'.format(format_cb(cb[0][0]), format_cb(cb[1][0]))
|
| 38 |
+
elif size > 2:
|
| 39 |
+
cb = '{}, <{} more>, {}'.format(format_cb(cb[0][0]),
|
| 40 |
+
size - 2,
|
| 41 |
+
format_cb(cb[-1][0]))
|
| 42 |
+
return f'cb=[{cb}]'
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# bpo-42183: _repr_running is needed for repr protection
|
| 46 |
+
# when a Future or Task result contains itself directly or indirectly.
|
| 47 |
+
# The logic is borrowed from @reprlib.recursive_repr decorator.
|
| 48 |
+
# Unfortunately, the direct decorator usage is impossible because of
|
| 49 |
+
# AttributeError: '_asyncio.Task' object has no attribute '__module__' error.
|
| 50 |
+
#
|
| 51 |
+
# After fixing this thing we can return to the decorator based approach.
|
| 52 |
+
_repr_running = set()
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def _future_repr_info(future):
|
| 56 |
+
# (Future) -> str
|
| 57 |
+
"""helper function for Future.__repr__"""
|
| 58 |
+
info = [future._state.lower()]
|
| 59 |
+
if future._state == _FINISHED:
|
| 60 |
+
if future._exception is not None:
|
| 61 |
+
info.append(f'exception={future._exception!r}')
|
| 62 |
+
else:
|
| 63 |
+
key = id(future), get_ident()
|
| 64 |
+
if key in _repr_running:
|
| 65 |
+
result = '...'
|
| 66 |
+
else:
|
| 67 |
+
_repr_running.add(key)
|
| 68 |
+
try:
|
| 69 |
+
# use reprlib to limit the length of the output, especially
|
| 70 |
+
# for very long strings
|
| 71 |
+
result = reprlib.repr(future._result)
|
| 72 |
+
finally:
|
| 73 |
+
_repr_running.discard(key)
|
| 74 |
+
info.append(f'result={result}')
|
| 75 |
+
if future._callbacks:
|
| 76 |
+
info.append(_format_callbacks(future._callbacks))
|
| 77 |
+
if future._source_traceback:
|
| 78 |
+
frame = future._source_traceback[-1]
|
| 79 |
+
info.append(f'created at {frame[0]}:{frame[1]}')
|
| 80 |
+
return info
|
omnilmm/lib/python3.10/asyncio/base_tasks.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import linecache
|
| 2 |
+
import traceback
|
| 3 |
+
|
| 4 |
+
from . import base_futures
|
| 5 |
+
from . import coroutines
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _task_repr_info(task):
|
| 9 |
+
info = base_futures._future_repr_info(task)
|
| 10 |
+
|
| 11 |
+
if task._must_cancel:
|
| 12 |
+
# replace status
|
| 13 |
+
info[0] = 'cancelling'
|
| 14 |
+
|
| 15 |
+
info.insert(1, 'name=%r' % task.get_name())
|
| 16 |
+
|
| 17 |
+
coro = coroutines._format_coroutine(task._coro)
|
| 18 |
+
info.insert(2, f'coro=<{coro}>')
|
| 19 |
+
|
| 20 |
+
if task._fut_waiter is not None:
|
| 21 |
+
info.insert(3, f'wait_for={task._fut_waiter!r}')
|
| 22 |
+
return info
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _task_get_stack(task, limit):
|
| 26 |
+
frames = []
|
| 27 |
+
if hasattr(task._coro, 'cr_frame'):
|
| 28 |
+
# case 1: 'async def' coroutines
|
| 29 |
+
f = task._coro.cr_frame
|
| 30 |
+
elif hasattr(task._coro, 'gi_frame'):
|
| 31 |
+
# case 2: legacy coroutines
|
| 32 |
+
f = task._coro.gi_frame
|
| 33 |
+
elif hasattr(task._coro, 'ag_frame'):
|
| 34 |
+
# case 3: async generators
|
| 35 |
+
f = task._coro.ag_frame
|
| 36 |
+
else:
|
| 37 |
+
# case 4: unknown objects
|
| 38 |
+
f = None
|
| 39 |
+
if f is not None:
|
| 40 |
+
while f is not None:
|
| 41 |
+
if limit is not None:
|
| 42 |
+
if limit <= 0:
|
| 43 |
+
break
|
| 44 |
+
limit -= 1
|
| 45 |
+
frames.append(f)
|
| 46 |
+
f = f.f_back
|
| 47 |
+
frames.reverse()
|
| 48 |
+
elif task._exception is not None:
|
| 49 |
+
tb = task._exception.__traceback__
|
| 50 |
+
while tb is not None:
|
| 51 |
+
if limit is not None:
|
| 52 |
+
if limit <= 0:
|
| 53 |
+
break
|
| 54 |
+
limit -= 1
|
| 55 |
+
frames.append(tb.tb_frame)
|
| 56 |
+
tb = tb.tb_next
|
| 57 |
+
return frames
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def _task_print_stack(task, limit, file):
|
| 61 |
+
extracted_list = []
|
| 62 |
+
checked = set()
|
| 63 |
+
for f in task.get_stack(limit=limit):
|
| 64 |
+
lineno = f.f_lineno
|
| 65 |
+
co = f.f_code
|
| 66 |
+
filename = co.co_filename
|
| 67 |
+
name = co.co_name
|
| 68 |
+
if filename not in checked:
|
| 69 |
+
checked.add(filename)
|
| 70 |
+
linecache.checkcache(filename)
|
| 71 |
+
line = linecache.getline(filename, lineno, f.f_globals)
|
| 72 |
+
extracted_list.append((filename, lineno, name, line))
|
| 73 |
+
|
| 74 |
+
exc = task._exception
|
| 75 |
+
if not extracted_list:
|
| 76 |
+
print(f'No stack for {task!r}', file=file)
|
| 77 |
+
elif exc is not None:
|
| 78 |
+
print(f'Traceback for {task!r} (most recent call last):', file=file)
|
| 79 |
+
else:
|
| 80 |
+
print(f'Stack for {task!r} (most recent call last):', file=file)
|
| 81 |
+
|
| 82 |
+
traceback.print_list(extracted_list, file=file)
|
| 83 |
+
if exc is not None:
|
| 84 |
+
for line in traceback.format_exception_only(exc.__class__, exc):
|
| 85 |
+
print(line, file=file, end='')
|
omnilmm/lib/python3.10/asyncio/constants.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import enum
|
| 2 |
+
|
| 3 |
+
# After the connection is lost, log warnings after this many write()s.
|
| 4 |
+
LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5
|
| 5 |
+
|
| 6 |
+
# Seconds to wait before retrying accept().
|
| 7 |
+
ACCEPT_RETRY_DELAY = 1
|
| 8 |
+
|
| 9 |
+
# Number of stack entries to capture in debug mode.
|
| 10 |
+
# The larger the number, the slower the operation in debug mode
|
| 11 |
+
# (see extract_stack() in format_helpers.py).
|
| 12 |
+
DEBUG_STACK_DEPTH = 10
|
| 13 |
+
|
| 14 |
+
# Number of seconds to wait for SSL handshake to complete
|
| 15 |
+
# The default timeout matches that of Nginx.
|
| 16 |
+
SSL_HANDSHAKE_TIMEOUT = 60.0
|
| 17 |
+
|
| 18 |
+
# Used in sendfile fallback code. We use fallback for platforms
|
| 19 |
+
# that don't support sendfile, or for TLS connections.
|
| 20 |
+
SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 256
|
| 21 |
+
|
| 22 |
+
# The enum should be here to break circular dependencies between
|
| 23 |
+
# base_events and sslproto
|
| 24 |
+
class _SendfileMode(enum.Enum):
|
| 25 |
+
UNSUPPORTED = enum.auto()
|
| 26 |
+
TRY_NATIVE = enum.auto()
|
| 27 |
+
FALLBACK = enum.auto()
|
omnilmm/lib/python3.10/asyncio/coroutines.py
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__ = 'coroutine', 'iscoroutinefunction', 'iscoroutine'
|
| 2 |
+
|
| 3 |
+
import collections.abc
|
| 4 |
+
import functools
|
| 5 |
+
import inspect
|
| 6 |
+
import os
|
| 7 |
+
import sys
|
| 8 |
+
import traceback
|
| 9 |
+
import types
|
| 10 |
+
import warnings
|
| 11 |
+
|
| 12 |
+
from . import base_futures
|
| 13 |
+
from . import constants
|
| 14 |
+
from . import format_helpers
|
| 15 |
+
from .log import logger
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def _is_debug_mode():
|
| 19 |
+
# If you set _DEBUG to true, @coroutine will wrap the resulting
|
| 20 |
+
# generator objects in a CoroWrapper instance (defined below). That
|
| 21 |
+
# instance will log a message when the generator is never iterated
|
| 22 |
+
# over, which may happen when you forget to use "await" or "yield from"
|
| 23 |
+
# with a coroutine call.
|
| 24 |
+
# Note that the value of the _DEBUG flag is taken
|
| 25 |
+
# when the decorator is used, so to be of any use it must be set
|
| 26 |
+
# before you define your coroutines. A downside of using this feature
|
| 27 |
+
# is that tracebacks show entries for the CoroWrapper.__next__ method
|
| 28 |
+
# when _DEBUG is true.
|
| 29 |
+
return sys.flags.dev_mode or (not sys.flags.ignore_environment and
|
| 30 |
+
bool(os.environ.get('PYTHONASYNCIODEBUG')))
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
_DEBUG = _is_debug_mode()
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class CoroWrapper:
|
| 37 |
+
# Wrapper for coroutine object in _DEBUG mode.
|
| 38 |
+
|
| 39 |
+
def __init__(self, gen, func=None):
|
| 40 |
+
assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen
|
| 41 |
+
self.gen = gen
|
| 42 |
+
self.func = func # Used to unwrap @coroutine decorator
|
| 43 |
+
self._source_traceback = format_helpers.extract_stack(sys._getframe(1))
|
| 44 |
+
self.__name__ = getattr(gen, '__name__', None)
|
| 45 |
+
self.__qualname__ = getattr(gen, '__qualname__', None)
|
| 46 |
+
|
| 47 |
+
def __repr__(self):
|
| 48 |
+
coro_repr = _format_coroutine(self)
|
| 49 |
+
if self._source_traceback:
|
| 50 |
+
frame = self._source_traceback[-1]
|
| 51 |
+
coro_repr += f', created at {frame[0]}:{frame[1]}'
|
| 52 |
+
|
| 53 |
+
return f'<{self.__class__.__name__} {coro_repr}>'
|
| 54 |
+
|
| 55 |
+
def __iter__(self):
|
| 56 |
+
return self
|
| 57 |
+
|
| 58 |
+
def __next__(self):
|
| 59 |
+
return self.gen.send(None)
|
| 60 |
+
|
| 61 |
+
def send(self, value):
|
| 62 |
+
return self.gen.send(value)
|
| 63 |
+
|
| 64 |
+
def throw(self, type, value=None, traceback=None):
|
| 65 |
+
return self.gen.throw(type, value, traceback)
|
| 66 |
+
|
| 67 |
+
def close(self):
|
| 68 |
+
return self.gen.close()
|
| 69 |
+
|
| 70 |
+
@property
|
| 71 |
+
def gi_frame(self):
|
| 72 |
+
return self.gen.gi_frame
|
| 73 |
+
|
| 74 |
+
@property
|
| 75 |
+
def gi_running(self):
|
| 76 |
+
return self.gen.gi_running
|
| 77 |
+
|
| 78 |
+
@property
|
| 79 |
+
def gi_code(self):
|
| 80 |
+
return self.gen.gi_code
|
| 81 |
+
|
| 82 |
+
def __await__(self):
|
| 83 |
+
return self
|
| 84 |
+
|
| 85 |
+
@property
|
| 86 |
+
def gi_yieldfrom(self):
|
| 87 |
+
return self.gen.gi_yieldfrom
|
| 88 |
+
|
| 89 |
+
def __del__(self):
|
| 90 |
+
# Be careful accessing self.gen.frame -- self.gen might not exist.
|
| 91 |
+
gen = getattr(self, 'gen', None)
|
| 92 |
+
frame = getattr(gen, 'gi_frame', None)
|
| 93 |
+
if frame is not None and frame.f_lasti == -1:
|
| 94 |
+
msg = f'{self!r} was never yielded from'
|
| 95 |
+
tb = getattr(self, '_source_traceback', ())
|
| 96 |
+
if tb:
|
| 97 |
+
tb = ''.join(traceback.format_list(tb))
|
| 98 |
+
msg += (f'\nCoroutine object created at '
|
| 99 |
+
f'(most recent call last, truncated to '
|
| 100 |
+
f'{constants.DEBUG_STACK_DEPTH} last lines):\n')
|
| 101 |
+
msg += tb.rstrip()
|
| 102 |
+
logger.error(msg)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def coroutine(func):
|
| 106 |
+
"""Decorator to mark coroutines.
|
| 107 |
+
|
| 108 |
+
If the coroutine is not yielded from before it is destroyed,
|
| 109 |
+
an error message is logged.
|
| 110 |
+
"""
|
| 111 |
+
warnings.warn('"@coroutine" decorator is deprecated since Python 3.8, use "async def" instead',
|
| 112 |
+
DeprecationWarning,
|
| 113 |
+
stacklevel=2)
|
| 114 |
+
if inspect.iscoroutinefunction(func):
|
| 115 |
+
# In Python 3.5 that's all we need to do for coroutines
|
| 116 |
+
# defined with "async def".
|
| 117 |
+
return func
|
| 118 |
+
|
| 119 |
+
if inspect.isgeneratorfunction(func):
|
| 120 |
+
coro = func
|
| 121 |
+
else:
|
| 122 |
+
@functools.wraps(func)
|
| 123 |
+
def coro(*args, **kw):
|
| 124 |
+
res = func(*args, **kw)
|
| 125 |
+
if (base_futures.isfuture(res) or inspect.isgenerator(res) or
|
| 126 |
+
isinstance(res, CoroWrapper)):
|
| 127 |
+
res = yield from res
|
| 128 |
+
else:
|
| 129 |
+
# If 'res' is an awaitable, run it.
|
| 130 |
+
try:
|
| 131 |
+
await_meth = res.__await__
|
| 132 |
+
except AttributeError:
|
| 133 |
+
pass
|
| 134 |
+
else:
|
| 135 |
+
if isinstance(res, collections.abc.Awaitable):
|
| 136 |
+
res = yield from await_meth()
|
| 137 |
+
return res
|
| 138 |
+
|
| 139 |
+
coro = types.coroutine(coro)
|
| 140 |
+
if not _DEBUG:
|
| 141 |
+
wrapper = coro
|
| 142 |
+
else:
|
| 143 |
+
@functools.wraps(func)
|
| 144 |
+
def wrapper(*args, **kwds):
|
| 145 |
+
w = CoroWrapper(coro(*args, **kwds), func=func)
|
| 146 |
+
if w._source_traceback:
|
| 147 |
+
del w._source_traceback[-1]
|
| 148 |
+
# Python < 3.5 does not implement __qualname__
|
| 149 |
+
# on generator objects, so we set it manually.
|
| 150 |
+
# We use getattr as some callables (such as
|
| 151 |
+
# functools.partial may lack __qualname__).
|
| 152 |
+
w.__name__ = getattr(func, '__name__', None)
|
| 153 |
+
w.__qualname__ = getattr(func, '__qualname__', None)
|
| 154 |
+
return w
|
| 155 |
+
|
| 156 |
+
wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction().
|
| 157 |
+
return wrapper
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
# A marker for iscoroutinefunction.
|
| 161 |
+
_is_coroutine = object()
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def iscoroutinefunction(func):
|
| 165 |
+
"""Return True if func is a decorated coroutine function."""
|
| 166 |
+
return (inspect.iscoroutinefunction(func) or
|
| 167 |
+
getattr(func, '_is_coroutine', None) is _is_coroutine)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
# Prioritize native coroutine check to speed-up
|
| 171 |
+
# asyncio.iscoroutine.
|
| 172 |
+
_COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType,
|
| 173 |
+
collections.abc.Coroutine, CoroWrapper)
|
| 174 |
+
_iscoroutine_typecache = set()
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def iscoroutine(obj):
|
| 178 |
+
"""Return True if obj is a coroutine object."""
|
| 179 |
+
if type(obj) in _iscoroutine_typecache:
|
| 180 |
+
return True
|
| 181 |
+
|
| 182 |
+
if isinstance(obj, _COROUTINE_TYPES):
|
| 183 |
+
# Just in case we don't want to cache more than 100
|
| 184 |
+
# positive types. That shouldn't ever happen, unless
|
| 185 |
+
# someone stressing the system on purpose.
|
| 186 |
+
if len(_iscoroutine_typecache) < 100:
|
| 187 |
+
_iscoroutine_typecache.add(type(obj))
|
| 188 |
+
return True
|
| 189 |
+
else:
|
| 190 |
+
return False
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def _format_coroutine(coro):
|
| 194 |
+
assert iscoroutine(coro)
|
| 195 |
+
|
| 196 |
+
is_corowrapper = isinstance(coro, CoroWrapper)
|
| 197 |
+
|
| 198 |
+
def get_name(coro):
|
| 199 |
+
# Coroutines compiled with Cython sometimes don't have
|
| 200 |
+
# proper __qualname__ or __name__. While that is a bug
|
| 201 |
+
# in Cython, asyncio shouldn't crash with an AttributeError
|
| 202 |
+
# in its __repr__ functions.
|
| 203 |
+
if is_corowrapper:
|
| 204 |
+
return format_helpers._format_callback(coro.func, (), {})
|
| 205 |
+
|
| 206 |
+
if hasattr(coro, '__qualname__') and coro.__qualname__:
|
| 207 |
+
coro_name = coro.__qualname__
|
| 208 |
+
elif hasattr(coro, '__name__') and coro.__name__:
|
| 209 |
+
coro_name = coro.__name__
|
| 210 |
+
else:
|
| 211 |
+
# Stop masking Cython bugs, expose them in a friendly way.
|
| 212 |
+
coro_name = f'<{type(coro).__name__} without __name__>'
|
| 213 |
+
return f'{coro_name}()'
|
| 214 |
+
|
| 215 |
+
def is_running(coro):
|
| 216 |
+
try:
|
| 217 |
+
return coro.cr_running
|
| 218 |
+
except AttributeError:
|
| 219 |
+
try:
|
| 220 |
+
return coro.gi_running
|
| 221 |
+
except AttributeError:
|
| 222 |
+
return False
|
| 223 |
+
|
| 224 |
+
coro_code = None
|
| 225 |
+
if hasattr(coro, 'cr_code') and coro.cr_code:
|
| 226 |
+
coro_code = coro.cr_code
|
| 227 |
+
elif hasattr(coro, 'gi_code') and coro.gi_code:
|
| 228 |
+
coro_code = coro.gi_code
|
| 229 |
+
|
| 230 |
+
coro_name = get_name(coro)
|
| 231 |
+
|
| 232 |
+
if not coro_code:
|
| 233 |
+
# Built-in types might not have __qualname__ or __name__.
|
| 234 |
+
if is_running(coro):
|
| 235 |
+
return f'{coro_name} running'
|
| 236 |
+
else:
|
| 237 |
+
return coro_name
|
| 238 |
+
|
| 239 |
+
coro_frame = None
|
| 240 |
+
if hasattr(coro, 'gi_frame') and coro.gi_frame:
|
| 241 |
+
coro_frame = coro.gi_frame
|
| 242 |
+
elif hasattr(coro, 'cr_frame') and coro.cr_frame:
|
| 243 |
+
coro_frame = coro.cr_frame
|
| 244 |
+
|
| 245 |
+
# If Cython's coroutine has a fake code object without proper
|
| 246 |
+
# co_filename -- expose that.
|
| 247 |
+
filename = coro_code.co_filename or '<empty co_filename>'
|
| 248 |
+
|
| 249 |
+
lineno = 0
|
| 250 |
+
if (is_corowrapper and
|
| 251 |
+
coro.func is not None and
|
| 252 |
+
not inspect.isgeneratorfunction(coro.func)):
|
| 253 |
+
source = format_helpers._get_function_source(coro.func)
|
| 254 |
+
if source is not None:
|
| 255 |
+
filename, lineno = source
|
| 256 |
+
if coro_frame is None:
|
| 257 |
+
coro_repr = f'{coro_name} done, defined at {filename}:{lineno}'
|
| 258 |
+
else:
|
| 259 |
+
coro_repr = f'{coro_name} running, defined at {filename}:{lineno}'
|
| 260 |
+
|
| 261 |
+
elif coro_frame is not None:
|
| 262 |
+
lineno = coro_frame.f_lineno
|
| 263 |
+
coro_repr = f'{coro_name} running at {filename}:{lineno}'
|
| 264 |
+
|
| 265 |
+
else:
|
| 266 |
+
lineno = coro_code.co_firstlineno
|
| 267 |
+
coro_repr = f'{coro_name} done, defined at {filename}:{lineno}'
|
| 268 |
+
|
| 269 |
+
return coro_repr
|
omnilmm/lib/python3.10/asyncio/events.py
ADDED
|
@@ -0,0 +1,819 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Event loop and event loop policy."""
|
| 2 |
+
|
| 3 |
+
__all__ = (
|
| 4 |
+
'AbstractEventLoopPolicy',
|
| 5 |
+
'AbstractEventLoop', 'AbstractServer',
|
| 6 |
+
'Handle', 'TimerHandle',
|
| 7 |
+
'get_event_loop_policy', 'set_event_loop_policy',
|
| 8 |
+
'get_event_loop', 'set_event_loop', 'new_event_loop',
|
| 9 |
+
'get_child_watcher', 'set_child_watcher',
|
| 10 |
+
'_set_running_loop', 'get_running_loop',
|
| 11 |
+
'_get_running_loop',
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
import contextvars
|
| 15 |
+
import os
|
| 16 |
+
import socket
|
| 17 |
+
import subprocess
|
| 18 |
+
import sys
|
| 19 |
+
import threading
|
| 20 |
+
|
| 21 |
+
from . import format_helpers
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class Handle:
|
| 25 |
+
"""Object returned by callback registration methods."""
|
| 26 |
+
|
| 27 |
+
__slots__ = ('_callback', '_args', '_cancelled', '_loop',
|
| 28 |
+
'_source_traceback', '_repr', '__weakref__',
|
| 29 |
+
'_context')
|
| 30 |
+
|
| 31 |
+
def __init__(self, callback, args, loop, context=None):
|
| 32 |
+
if context is None:
|
| 33 |
+
context = contextvars.copy_context()
|
| 34 |
+
self._context = context
|
| 35 |
+
self._loop = loop
|
| 36 |
+
self._callback = callback
|
| 37 |
+
self._args = args
|
| 38 |
+
self._cancelled = False
|
| 39 |
+
self._repr = None
|
| 40 |
+
if self._loop.get_debug():
|
| 41 |
+
self._source_traceback = format_helpers.extract_stack(
|
| 42 |
+
sys._getframe(1))
|
| 43 |
+
else:
|
| 44 |
+
self._source_traceback = None
|
| 45 |
+
|
| 46 |
+
def _repr_info(self):
|
| 47 |
+
info = [self.__class__.__name__]
|
| 48 |
+
if self._cancelled:
|
| 49 |
+
info.append('cancelled')
|
| 50 |
+
if self._callback is not None:
|
| 51 |
+
info.append(format_helpers._format_callback_source(
|
| 52 |
+
self._callback, self._args))
|
| 53 |
+
if self._source_traceback:
|
| 54 |
+
frame = self._source_traceback[-1]
|
| 55 |
+
info.append(f'created at {frame[0]}:{frame[1]}')
|
| 56 |
+
return info
|
| 57 |
+
|
| 58 |
+
def __repr__(self):
|
| 59 |
+
if self._repr is not None:
|
| 60 |
+
return self._repr
|
| 61 |
+
info = self._repr_info()
|
| 62 |
+
return '<{}>'.format(' '.join(info))
|
| 63 |
+
|
| 64 |
+
def cancel(self):
|
| 65 |
+
if not self._cancelled:
|
| 66 |
+
self._cancelled = True
|
| 67 |
+
if self._loop.get_debug():
|
| 68 |
+
# Keep a representation in debug mode to keep callback and
|
| 69 |
+
# parameters. For example, to log the warning
|
| 70 |
+
# "Executing <Handle...> took 2.5 second"
|
| 71 |
+
self._repr = repr(self)
|
| 72 |
+
self._callback = None
|
| 73 |
+
self._args = None
|
| 74 |
+
|
| 75 |
+
def cancelled(self):
|
| 76 |
+
return self._cancelled
|
| 77 |
+
|
| 78 |
+
def _run(self):
|
| 79 |
+
try:
|
| 80 |
+
self._context.run(self._callback, *self._args)
|
| 81 |
+
except (SystemExit, KeyboardInterrupt):
|
| 82 |
+
raise
|
| 83 |
+
except BaseException as exc:
|
| 84 |
+
cb = format_helpers._format_callback_source(
|
| 85 |
+
self._callback, self._args)
|
| 86 |
+
msg = f'Exception in callback {cb}'
|
| 87 |
+
context = {
|
| 88 |
+
'message': msg,
|
| 89 |
+
'exception': exc,
|
| 90 |
+
'handle': self,
|
| 91 |
+
}
|
| 92 |
+
if self._source_traceback:
|
| 93 |
+
context['source_traceback'] = self._source_traceback
|
| 94 |
+
self._loop.call_exception_handler(context)
|
| 95 |
+
self = None # Needed to break cycles when an exception occurs.
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class TimerHandle(Handle):
|
| 99 |
+
"""Object returned by timed callback registration methods."""
|
| 100 |
+
|
| 101 |
+
__slots__ = ['_scheduled', '_when']
|
| 102 |
+
|
| 103 |
+
def __init__(self, when, callback, args, loop, context=None):
|
| 104 |
+
assert when is not None
|
| 105 |
+
super().__init__(callback, args, loop, context)
|
| 106 |
+
if self._source_traceback:
|
| 107 |
+
del self._source_traceback[-1]
|
| 108 |
+
self._when = when
|
| 109 |
+
self._scheduled = False
|
| 110 |
+
|
| 111 |
+
def _repr_info(self):
|
| 112 |
+
info = super()._repr_info()
|
| 113 |
+
pos = 2 if self._cancelled else 1
|
| 114 |
+
info.insert(pos, f'when={self._when}')
|
| 115 |
+
return info
|
| 116 |
+
|
| 117 |
+
def __hash__(self):
|
| 118 |
+
return hash(self._when)
|
| 119 |
+
|
| 120 |
+
def __lt__(self, other):
|
| 121 |
+
if isinstance(other, TimerHandle):
|
| 122 |
+
return self._when < other._when
|
| 123 |
+
return NotImplemented
|
| 124 |
+
|
| 125 |
+
def __le__(self, other):
|
| 126 |
+
if isinstance(other, TimerHandle):
|
| 127 |
+
return self._when < other._when or self.__eq__(other)
|
| 128 |
+
return NotImplemented
|
| 129 |
+
|
| 130 |
+
def __gt__(self, other):
|
| 131 |
+
if isinstance(other, TimerHandle):
|
| 132 |
+
return self._when > other._when
|
| 133 |
+
return NotImplemented
|
| 134 |
+
|
| 135 |
+
def __ge__(self, other):
|
| 136 |
+
if isinstance(other, TimerHandle):
|
| 137 |
+
return self._when > other._when or self.__eq__(other)
|
| 138 |
+
return NotImplemented
|
| 139 |
+
|
| 140 |
+
def __eq__(self, other):
|
| 141 |
+
if isinstance(other, TimerHandle):
|
| 142 |
+
return (self._when == other._when and
|
| 143 |
+
self._callback == other._callback and
|
| 144 |
+
self._args == other._args and
|
| 145 |
+
self._cancelled == other._cancelled)
|
| 146 |
+
return NotImplemented
|
| 147 |
+
|
| 148 |
+
def cancel(self):
|
| 149 |
+
if not self._cancelled:
|
| 150 |
+
self._loop._timer_handle_cancelled(self)
|
| 151 |
+
super().cancel()
|
| 152 |
+
|
| 153 |
+
def when(self):
|
| 154 |
+
"""Return a scheduled callback time.
|
| 155 |
+
|
| 156 |
+
The time is an absolute timestamp, using the same time
|
| 157 |
+
reference as loop.time().
|
| 158 |
+
"""
|
| 159 |
+
return self._when
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class AbstractServer:
|
| 163 |
+
"""Abstract server returned by create_server()."""
|
| 164 |
+
|
| 165 |
+
def close(self):
|
| 166 |
+
"""Stop serving. This leaves existing connections open."""
|
| 167 |
+
raise NotImplementedError
|
| 168 |
+
|
| 169 |
+
def get_loop(self):
|
| 170 |
+
"""Get the event loop the Server object is attached to."""
|
| 171 |
+
raise NotImplementedError
|
| 172 |
+
|
| 173 |
+
def is_serving(self):
|
| 174 |
+
"""Return True if the server is accepting connections."""
|
| 175 |
+
raise NotImplementedError
|
| 176 |
+
|
| 177 |
+
async def start_serving(self):
|
| 178 |
+
"""Start accepting connections.
|
| 179 |
+
|
| 180 |
+
This method is idempotent, so it can be called when
|
| 181 |
+
the server is already being serving.
|
| 182 |
+
"""
|
| 183 |
+
raise NotImplementedError
|
| 184 |
+
|
| 185 |
+
async def serve_forever(self):
|
| 186 |
+
"""Start accepting connections until the coroutine is cancelled.
|
| 187 |
+
|
| 188 |
+
The server is closed when the coroutine is cancelled.
|
| 189 |
+
"""
|
| 190 |
+
raise NotImplementedError
|
| 191 |
+
|
| 192 |
+
async def wait_closed(self):
|
| 193 |
+
"""Coroutine to wait until service is closed."""
|
| 194 |
+
raise NotImplementedError
|
| 195 |
+
|
| 196 |
+
async def __aenter__(self):
|
| 197 |
+
return self
|
| 198 |
+
|
| 199 |
+
async def __aexit__(self, *exc):
|
| 200 |
+
self.close()
|
| 201 |
+
await self.wait_closed()
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
class AbstractEventLoop:
|
| 205 |
+
"""Abstract event loop."""
|
| 206 |
+
|
| 207 |
+
# Running and stopping the event loop.
|
| 208 |
+
|
| 209 |
+
def run_forever(self):
|
| 210 |
+
"""Run the event loop until stop() is called."""
|
| 211 |
+
raise NotImplementedError
|
| 212 |
+
|
| 213 |
+
def run_until_complete(self, future):
|
| 214 |
+
"""Run the event loop until a Future is done.
|
| 215 |
+
|
| 216 |
+
Return the Future's result, or raise its exception.
|
| 217 |
+
"""
|
| 218 |
+
raise NotImplementedError
|
| 219 |
+
|
| 220 |
+
def stop(self):
|
| 221 |
+
"""Stop the event loop as soon as reasonable.
|
| 222 |
+
|
| 223 |
+
Exactly how soon that is may depend on the implementation, but
|
| 224 |
+
no more I/O callbacks should be scheduled.
|
| 225 |
+
"""
|
| 226 |
+
raise NotImplementedError
|
| 227 |
+
|
| 228 |
+
def is_running(self):
|
| 229 |
+
"""Return whether the event loop is currently running."""
|
| 230 |
+
raise NotImplementedError
|
| 231 |
+
|
| 232 |
+
def is_closed(self):
|
| 233 |
+
"""Returns True if the event loop was closed."""
|
| 234 |
+
raise NotImplementedError
|
| 235 |
+
|
| 236 |
+
def close(self):
|
| 237 |
+
"""Close the loop.
|
| 238 |
+
|
| 239 |
+
The loop should not be running.
|
| 240 |
+
|
| 241 |
+
This is idempotent and irreversible.
|
| 242 |
+
|
| 243 |
+
No other methods should be called after this one.
|
| 244 |
+
"""
|
| 245 |
+
raise NotImplementedError
|
| 246 |
+
|
| 247 |
+
async def shutdown_asyncgens(self):
|
| 248 |
+
"""Shutdown all active asynchronous generators."""
|
| 249 |
+
raise NotImplementedError
|
| 250 |
+
|
| 251 |
+
async def shutdown_default_executor(self):
|
| 252 |
+
"""Schedule the shutdown of the default executor."""
|
| 253 |
+
raise NotImplementedError
|
| 254 |
+
|
| 255 |
+
# Methods scheduling callbacks. All these return Handles.
|
| 256 |
+
|
| 257 |
+
def _timer_handle_cancelled(self, handle):
|
| 258 |
+
"""Notification that a TimerHandle has been cancelled."""
|
| 259 |
+
raise NotImplementedError
|
| 260 |
+
|
| 261 |
+
def call_soon(self, callback, *args, context=None):
|
| 262 |
+
return self.call_later(0, callback, *args, context=context)
|
| 263 |
+
|
| 264 |
+
def call_later(self, delay, callback, *args, context=None):
|
| 265 |
+
raise NotImplementedError
|
| 266 |
+
|
| 267 |
+
def call_at(self, when, callback, *args, context=None):
|
| 268 |
+
raise NotImplementedError
|
| 269 |
+
|
| 270 |
+
def time(self):
|
| 271 |
+
raise NotImplementedError
|
| 272 |
+
|
| 273 |
+
def create_future(self):
|
| 274 |
+
raise NotImplementedError
|
| 275 |
+
|
| 276 |
+
# Method scheduling a coroutine object: create a task.
|
| 277 |
+
|
| 278 |
+
def create_task(self, coro, *, name=None):
|
| 279 |
+
raise NotImplementedError
|
| 280 |
+
|
| 281 |
+
# Methods for interacting with threads.
|
| 282 |
+
|
| 283 |
+
def call_soon_threadsafe(self, callback, *args, context=None):
|
| 284 |
+
raise NotImplementedError
|
| 285 |
+
|
| 286 |
+
def run_in_executor(self, executor, func, *args):
|
| 287 |
+
raise NotImplementedError
|
| 288 |
+
|
| 289 |
+
def set_default_executor(self, executor):
|
| 290 |
+
raise NotImplementedError
|
| 291 |
+
|
| 292 |
+
# Network I/O methods returning Futures.
|
| 293 |
+
|
| 294 |
+
async def getaddrinfo(self, host, port, *,
|
| 295 |
+
family=0, type=0, proto=0, flags=0):
|
| 296 |
+
raise NotImplementedError
|
| 297 |
+
|
| 298 |
+
async def getnameinfo(self, sockaddr, flags=0):
|
| 299 |
+
raise NotImplementedError
|
| 300 |
+
|
| 301 |
+
async def create_connection(
|
| 302 |
+
self, protocol_factory, host=None, port=None,
|
| 303 |
+
*, ssl=None, family=0, proto=0,
|
| 304 |
+
flags=0, sock=None, local_addr=None,
|
| 305 |
+
server_hostname=None,
|
| 306 |
+
ssl_handshake_timeout=None,
|
| 307 |
+
happy_eyeballs_delay=None, interleave=None):
|
| 308 |
+
raise NotImplementedError
|
| 309 |
+
|
| 310 |
+
async def create_server(
|
| 311 |
+
self, protocol_factory, host=None, port=None,
|
| 312 |
+
*, family=socket.AF_UNSPEC,
|
| 313 |
+
flags=socket.AI_PASSIVE, sock=None, backlog=100,
|
| 314 |
+
ssl=None, reuse_address=None, reuse_port=None,
|
| 315 |
+
ssl_handshake_timeout=None,
|
| 316 |
+
start_serving=True):
|
| 317 |
+
"""A coroutine which creates a TCP server bound to host and port.
|
| 318 |
+
|
| 319 |
+
The return value is a Server object which can be used to stop
|
| 320 |
+
the service.
|
| 321 |
+
|
| 322 |
+
If host is an empty string or None all interfaces are assumed
|
| 323 |
+
and a list of multiple sockets will be returned (most likely
|
| 324 |
+
one for IPv4 and another one for IPv6). The host parameter can also be
|
| 325 |
+
a sequence (e.g. list) of hosts to bind to.
|
| 326 |
+
|
| 327 |
+
family can be set to either AF_INET or AF_INET6 to force the
|
| 328 |
+
socket to use IPv4 or IPv6. If not set it will be determined
|
| 329 |
+
from host (defaults to AF_UNSPEC).
|
| 330 |
+
|
| 331 |
+
flags is a bitmask for getaddrinfo().
|
| 332 |
+
|
| 333 |
+
sock can optionally be specified in order to use a preexisting
|
| 334 |
+
socket object.
|
| 335 |
+
|
| 336 |
+
backlog is the maximum number of queued connections passed to
|
| 337 |
+
listen() (defaults to 100).
|
| 338 |
+
|
| 339 |
+
ssl can be set to an SSLContext to enable SSL over the
|
| 340 |
+
accepted connections.
|
| 341 |
+
|
| 342 |
+
reuse_address tells the kernel to reuse a local socket in
|
| 343 |
+
TIME_WAIT state, without waiting for its natural timeout to
|
| 344 |
+
expire. If not specified will automatically be set to True on
|
| 345 |
+
UNIX.
|
| 346 |
+
|
| 347 |
+
reuse_port tells the kernel to allow this endpoint to be bound to
|
| 348 |
+
the same port as other existing endpoints are bound to, so long as
|
| 349 |
+
they all set this flag when being created. This option is not
|
| 350 |
+
supported on Windows.
|
| 351 |
+
|
| 352 |
+
ssl_handshake_timeout is the time in seconds that an SSL server
|
| 353 |
+
will wait for completion of the SSL handshake before aborting the
|
| 354 |
+
connection. Default is 60s.
|
| 355 |
+
|
| 356 |
+
start_serving set to True (default) causes the created server
|
| 357 |
+
to start accepting connections immediately. When set to False,
|
| 358 |
+
the user should await Server.start_serving() or Server.serve_forever()
|
| 359 |
+
to make the server to start accepting connections.
|
| 360 |
+
"""
|
| 361 |
+
raise NotImplementedError
|
| 362 |
+
|
| 363 |
+
async def sendfile(self, transport, file, offset=0, count=None,
|
| 364 |
+
*, fallback=True):
|
| 365 |
+
"""Send a file through a transport.
|
| 366 |
+
|
| 367 |
+
Return an amount of sent bytes.
|
| 368 |
+
"""
|
| 369 |
+
raise NotImplementedError
|
| 370 |
+
|
| 371 |
+
async def start_tls(self, transport, protocol, sslcontext, *,
|
| 372 |
+
server_side=False,
|
| 373 |
+
server_hostname=None,
|
| 374 |
+
ssl_handshake_timeout=None):
|
| 375 |
+
"""Upgrade a transport to TLS.
|
| 376 |
+
|
| 377 |
+
Return a new transport that *protocol* should start using
|
| 378 |
+
immediately.
|
| 379 |
+
"""
|
| 380 |
+
raise NotImplementedError
|
| 381 |
+
|
| 382 |
+
async def create_unix_connection(
|
| 383 |
+
self, protocol_factory, path=None, *,
|
| 384 |
+
ssl=None, sock=None,
|
| 385 |
+
server_hostname=None,
|
| 386 |
+
ssl_handshake_timeout=None):
|
| 387 |
+
raise NotImplementedError
|
| 388 |
+
|
| 389 |
+
async def create_unix_server(
|
| 390 |
+
self, protocol_factory, path=None, *,
|
| 391 |
+
sock=None, backlog=100, ssl=None,
|
| 392 |
+
ssl_handshake_timeout=None,
|
| 393 |
+
start_serving=True):
|
| 394 |
+
"""A coroutine which creates a UNIX Domain Socket server.
|
| 395 |
+
|
| 396 |
+
The return value is a Server object, which can be used to stop
|
| 397 |
+
the service.
|
| 398 |
+
|
| 399 |
+
path is a str, representing a file system path to bind the
|
| 400 |
+
server socket to.
|
| 401 |
+
|
| 402 |
+
sock can optionally be specified in order to use a preexisting
|
| 403 |
+
socket object.
|
| 404 |
+
|
| 405 |
+
backlog is the maximum number of queued connections passed to
|
| 406 |
+
listen() (defaults to 100).
|
| 407 |
+
|
| 408 |
+
ssl can be set to an SSLContext to enable SSL over the
|
| 409 |
+
accepted connections.
|
| 410 |
+
|
| 411 |
+
ssl_handshake_timeout is the time in seconds that an SSL server
|
| 412 |
+
will wait for the SSL handshake to complete (defaults to 60s).
|
| 413 |
+
|
| 414 |
+
start_serving set to True (default) causes the created server
|
| 415 |
+
to start accepting connections immediately. When set to False,
|
| 416 |
+
the user should await Server.start_serving() or Server.serve_forever()
|
| 417 |
+
to make the server to start accepting connections.
|
| 418 |
+
"""
|
| 419 |
+
raise NotImplementedError
|
| 420 |
+
|
| 421 |
+
async def connect_accepted_socket(
|
| 422 |
+
self, protocol_factory, sock,
|
| 423 |
+
*, ssl=None,
|
| 424 |
+
ssl_handshake_timeout=None):
|
| 425 |
+
"""Handle an accepted connection.
|
| 426 |
+
|
| 427 |
+
This is used by servers that accept connections outside of
|
| 428 |
+
asyncio, but use asyncio to handle connections.
|
| 429 |
+
|
| 430 |
+
This method is a coroutine. When completed, the coroutine
|
| 431 |
+
returns a (transport, protocol) pair.
|
| 432 |
+
"""
|
| 433 |
+
raise NotImplementedError
|
| 434 |
+
|
| 435 |
+
async def create_datagram_endpoint(self, protocol_factory,
|
| 436 |
+
local_addr=None, remote_addr=None, *,
|
| 437 |
+
family=0, proto=0, flags=0,
|
| 438 |
+
reuse_address=None, reuse_port=None,
|
| 439 |
+
allow_broadcast=None, sock=None):
|
| 440 |
+
"""A coroutine which creates a datagram endpoint.
|
| 441 |
+
|
| 442 |
+
This method will try to establish the endpoint in the background.
|
| 443 |
+
When successful, the coroutine returns a (transport, protocol) pair.
|
| 444 |
+
|
| 445 |
+
protocol_factory must be a callable returning a protocol instance.
|
| 446 |
+
|
| 447 |
+
socket family AF_INET, socket.AF_INET6 or socket.AF_UNIX depending on
|
| 448 |
+
host (or family if specified), socket type SOCK_DGRAM.
|
| 449 |
+
|
| 450 |
+
reuse_address tells the kernel to reuse a local socket in
|
| 451 |
+
TIME_WAIT state, without waiting for its natural timeout to
|
| 452 |
+
expire. If not specified it will automatically be set to True on
|
| 453 |
+
UNIX.
|
| 454 |
+
|
| 455 |
+
reuse_port tells the kernel to allow this endpoint to be bound to
|
| 456 |
+
the same port as other existing endpoints are bound to, so long as
|
| 457 |
+
they all set this flag when being created. This option is not
|
| 458 |
+
supported on Windows and some UNIX's. If the
|
| 459 |
+
:py:data:`~socket.SO_REUSEPORT` constant is not defined then this
|
| 460 |
+
capability is unsupported.
|
| 461 |
+
|
| 462 |
+
allow_broadcast tells the kernel to allow this endpoint to send
|
| 463 |
+
messages to the broadcast address.
|
| 464 |
+
|
| 465 |
+
sock can optionally be specified in order to use a preexisting
|
| 466 |
+
socket object.
|
| 467 |
+
"""
|
| 468 |
+
raise NotImplementedError
|
| 469 |
+
|
| 470 |
+
# Pipes and subprocesses.
|
| 471 |
+
|
| 472 |
+
async def connect_read_pipe(self, protocol_factory, pipe):
|
| 473 |
+
"""Register read pipe in event loop. Set the pipe to non-blocking mode.
|
| 474 |
+
|
| 475 |
+
protocol_factory should instantiate object with Protocol interface.
|
| 476 |
+
pipe is a file-like object.
|
| 477 |
+
Return pair (transport, protocol), where transport supports the
|
| 478 |
+
ReadTransport interface."""
|
| 479 |
+
# The reason to accept file-like object instead of just file descriptor
|
| 480 |
+
# is: we need to own pipe and close it at transport finishing
|
| 481 |
+
# Can got complicated errors if pass f.fileno(),
|
| 482 |
+
# close fd in pipe transport then close f and vice versa.
|
| 483 |
+
raise NotImplementedError
|
| 484 |
+
|
| 485 |
+
async def connect_write_pipe(self, protocol_factory, pipe):
|
| 486 |
+
"""Register write pipe in event loop.
|
| 487 |
+
|
| 488 |
+
protocol_factory should instantiate object with BaseProtocol interface.
|
| 489 |
+
Pipe is file-like object already switched to nonblocking.
|
| 490 |
+
Return pair (transport, protocol), where transport support
|
| 491 |
+
WriteTransport interface."""
|
| 492 |
+
# The reason to accept file-like object instead of just file descriptor
|
| 493 |
+
# is: we need to own pipe and close it at transport finishing
|
| 494 |
+
# Can got complicated errors if pass f.fileno(),
|
| 495 |
+
# close fd in pipe transport then close f and vice versa.
|
| 496 |
+
raise NotImplementedError
|
| 497 |
+
|
| 498 |
+
async def subprocess_shell(self, protocol_factory, cmd, *,
|
| 499 |
+
stdin=subprocess.PIPE,
|
| 500 |
+
stdout=subprocess.PIPE,
|
| 501 |
+
stderr=subprocess.PIPE,
|
| 502 |
+
**kwargs):
|
| 503 |
+
raise NotImplementedError
|
| 504 |
+
|
| 505 |
+
async def subprocess_exec(self, protocol_factory, *args,
|
| 506 |
+
stdin=subprocess.PIPE,
|
| 507 |
+
stdout=subprocess.PIPE,
|
| 508 |
+
stderr=subprocess.PIPE,
|
| 509 |
+
**kwargs):
|
| 510 |
+
raise NotImplementedError
|
| 511 |
+
|
| 512 |
+
# Ready-based callback registration methods.
|
| 513 |
+
# The add_*() methods return None.
|
| 514 |
+
# The remove_*() methods return True if something was removed,
|
| 515 |
+
# False if there was nothing to delete.
|
| 516 |
+
|
| 517 |
+
def add_reader(self, fd, callback, *args):
|
| 518 |
+
raise NotImplementedError
|
| 519 |
+
|
| 520 |
+
def remove_reader(self, fd):
|
| 521 |
+
raise NotImplementedError
|
| 522 |
+
|
| 523 |
+
def add_writer(self, fd, callback, *args):
|
| 524 |
+
raise NotImplementedError
|
| 525 |
+
|
| 526 |
+
def remove_writer(self, fd):
|
| 527 |
+
raise NotImplementedError
|
| 528 |
+
|
| 529 |
+
# Completion based I/O methods returning Futures.
|
| 530 |
+
|
| 531 |
+
async def sock_recv(self, sock, nbytes):
|
| 532 |
+
raise NotImplementedError
|
| 533 |
+
|
| 534 |
+
async def sock_recv_into(self, sock, buf):
|
| 535 |
+
raise NotImplementedError
|
| 536 |
+
|
| 537 |
+
async def sock_sendall(self, sock, data):
|
| 538 |
+
raise NotImplementedError
|
| 539 |
+
|
| 540 |
+
async def sock_connect(self, sock, address):
|
| 541 |
+
raise NotImplementedError
|
| 542 |
+
|
| 543 |
+
async def sock_accept(self, sock):
|
| 544 |
+
raise NotImplementedError
|
| 545 |
+
|
| 546 |
+
async def sock_sendfile(self, sock, file, offset=0, count=None,
|
| 547 |
+
*, fallback=None):
|
| 548 |
+
raise NotImplementedError
|
| 549 |
+
|
| 550 |
+
# Signal handling.
|
| 551 |
+
|
| 552 |
+
def add_signal_handler(self, sig, callback, *args):
|
| 553 |
+
raise NotImplementedError
|
| 554 |
+
|
| 555 |
+
def remove_signal_handler(self, sig):
|
| 556 |
+
raise NotImplementedError
|
| 557 |
+
|
| 558 |
+
# Task factory.
|
| 559 |
+
|
| 560 |
+
def set_task_factory(self, factory):
|
| 561 |
+
raise NotImplementedError
|
| 562 |
+
|
| 563 |
+
def get_task_factory(self):
|
| 564 |
+
raise NotImplementedError
|
| 565 |
+
|
| 566 |
+
# Error handlers.
|
| 567 |
+
|
| 568 |
+
def get_exception_handler(self):
|
| 569 |
+
raise NotImplementedError
|
| 570 |
+
|
| 571 |
+
def set_exception_handler(self, handler):
|
| 572 |
+
raise NotImplementedError
|
| 573 |
+
|
| 574 |
+
def default_exception_handler(self, context):
|
| 575 |
+
raise NotImplementedError
|
| 576 |
+
|
| 577 |
+
def call_exception_handler(self, context):
|
| 578 |
+
raise NotImplementedError
|
| 579 |
+
|
| 580 |
+
# Debug flag management.
|
| 581 |
+
|
| 582 |
+
def get_debug(self):
|
| 583 |
+
raise NotImplementedError
|
| 584 |
+
|
| 585 |
+
def set_debug(self, enabled):
|
| 586 |
+
raise NotImplementedError
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
class AbstractEventLoopPolicy:
|
| 590 |
+
"""Abstract policy for accessing the event loop."""
|
| 591 |
+
|
| 592 |
+
def get_event_loop(self):
|
| 593 |
+
"""Get the event loop for the current context.
|
| 594 |
+
|
| 595 |
+
Returns an event loop object implementing the BaseEventLoop interface,
|
| 596 |
+
or raises an exception in case no event loop has been set for the
|
| 597 |
+
current context and the current policy does not specify to create one.
|
| 598 |
+
|
| 599 |
+
It should never return None."""
|
| 600 |
+
raise NotImplementedError
|
| 601 |
+
|
| 602 |
+
def set_event_loop(self, loop):
|
| 603 |
+
"""Set the event loop for the current context to loop."""
|
| 604 |
+
raise NotImplementedError
|
| 605 |
+
|
| 606 |
+
def new_event_loop(self):
|
| 607 |
+
"""Create and return a new event loop object according to this
|
| 608 |
+
policy's rules. If there's need to set this loop as the event loop for
|
| 609 |
+
the current context, set_event_loop must be called explicitly."""
|
| 610 |
+
raise NotImplementedError
|
| 611 |
+
|
| 612 |
+
# Child processes handling (Unix only).
|
| 613 |
+
|
| 614 |
+
def get_child_watcher(self):
|
| 615 |
+
"Get the watcher for child processes."
|
| 616 |
+
raise NotImplementedError
|
| 617 |
+
|
| 618 |
+
def set_child_watcher(self, watcher):
|
| 619 |
+
"""Set the watcher for child processes."""
|
| 620 |
+
raise NotImplementedError
|
| 621 |
+
|
| 622 |
+
|
| 623 |
+
class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
|
| 624 |
+
"""Default policy implementation for accessing the event loop.
|
| 625 |
+
|
| 626 |
+
In this policy, each thread has its own event loop. However, we
|
| 627 |
+
only automatically create an event loop by default for the main
|
| 628 |
+
thread; other threads by default have no event loop.
|
| 629 |
+
|
| 630 |
+
Other policies may have different rules (e.g. a single global
|
| 631 |
+
event loop, or automatically creating an event loop per thread, or
|
| 632 |
+
using some other notion of context to which an event loop is
|
| 633 |
+
associated).
|
| 634 |
+
"""
|
| 635 |
+
|
| 636 |
+
_loop_factory = None
|
| 637 |
+
|
| 638 |
+
class _Local(threading.local):
|
| 639 |
+
_loop = None
|
| 640 |
+
_set_called = False
|
| 641 |
+
|
| 642 |
+
def __init__(self):
|
| 643 |
+
self._local = self._Local()
|
| 644 |
+
|
| 645 |
+
def get_event_loop(self):
|
| 646 |
+
"""Get the event loop for the current context.
|
| 647 |
+
|
| 648 |
+
Returns an instance of EventLoop or raises an exception.
|
| 649 |
+
"""
|
| 650 |
+
if (self._local._loop is None and
|
| 651 |
+
not self._local._set_called and
|
| 652 |
+
threading.current_thread() is threading.main_thread()):
|
| 653 |
+
self.set_event_loop(self.new_event_loop())
|
| 654 |
+
|
| 655 |
+
if self._local._loop is None:
|
| 656 |
+
raise RuntimeError('There is no current event loop in thread %r.'
|
| 657 |
+
% threading.current_thread().name)
|
| 658 |
+
|
| 659 |
+
return self._local._loop
|
| 660 |
+
|
| 661 |
+
def set_event_loop(self, loop):
|
| 662 |
+
"""Set the event loop."""
|
| 663 |
+
self._local._set_called = True
|
| 664 |
+
assert loop is None or isinstance(loop, AbstractEventLoop)
|
| 665 |
+
self._local._loop = loop
|
| 666 |
+
|
| 667 |
+
def new_event_loop(self):
|
| 668 |
+
"""Create a new event loop.
|
| 669 |
+
|
| 670 |
+
You must call set_event_loop() to make this the current event
|
| 671 |
+
loop.
|
| 672 |
+
"""
|
| 673 |
+
return self._loop_factory()
|
| 674 |
+
|
| 675 |
+
|
| 676 |
+
# Event loop policy. The policy itself is always global, even if the
|
| 677 |
+
# policy's rules say that there is an event loop per thread (or other
|
| 678 |
+
# notion of context). The default policy is installed by the first
|
| 679 |
+
# call to get_event_loop_policy().
|
| 680 |
+
_event_loop_policy = None
|
| 681 |
+
|
| 682 |
+
# Lock for protecting the on-the-fly creation of the event loop policy.
|
| 683 |
+
_lock = threading.Lock()
|
| 684 |
+
|
| 685 |
+
|
| 686 |
+
# A TLS for the running event loop, used by _get_running_loop.
|
| 687 |
+
class _RunningLoop(threading.local):
|
| 688 |
+
loop_pid = (None, None)
|
| 689 |
+
|
| 690 |
+
|
| 691 |
+
_running_loop = _RunningLoop()
|
| 692 |
+
|
| 693 |
+
|
| 694 |
+
def get_running_loop():
|
| 695 |
+
"""Return the running event loop. Raise a RuntimeError if there is none.
|
| 696 |
+
|
| 697 |
+
This function is thread-specific.
|
| 698 |
+
"""
|
| 699 |
+
# NOTE: this function is implemented in C (see _asynciomodule.c)
|
| 700 |
+
loop = _get_running_loop()
|
| 701 |
+
if loop is None:
|
| 702 |
+
raise RuntimeError('no running event loop')
|
| 703 |
+
return loop
|
| 704 |
+
|
| 705 |
+
|
| 706 |
+
def _get_running_loop():
|
| 707 |
+
"""Return the running event loop or None.
|
| 708 |
+
|
| 709 |
+
This is a low-level function intended to be used by event loops.
|
| 710 |
+
This function is thread-specific.
|
| 711 |
+
"""
|
| 712 |
+
# NOTE: this function is implemented in C (see _asynciomodule.c)
|
| 713 |
+
running_loop, pid = _running_loop.loop_pid
|
| 714 |
+
if running_loop is not None and pid == os.getpid():
|
| 715 |
+
return running_loop
|
| 716 |
+
|
| 717 |
+
|
| 718 |
+
def _set_running_loop(loop):
|
| 719 |
+
"""Set the running event loop.
|
| 720 |
+
|
| 721 |
+
This is a low-level function intended to be used by event loops.
|
| 722 |
+
This function is thread-specific.
|
| 723 |
+
"""
|
| 724 |
+
# NOTE: this function is implemented in C (see _asynciomodule.c)
|
| 725 |
+
_running_loop.loop_pid = (loop, os.getpid())
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
def _init_event_loop_policy():
|
| 729 |
+
global _event_loop_policy
|
| 730 |
+
with _lock:
|
| 731 |
+
if _event_loop_policy is None: # pragma: no branch
|
| 732 |
+
from . import DefaultEventLoopPolicy
|
| 733 |
+
_event_loop_policy = DefaultEventLoopPolicy()
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
def get_event_loop_policy():
|
| 737 |
+
"""Get the current event loop policy."""
|
| 738 |
+
if _event_loop_policy is None:
|
| 739 |
+
_init_event_loop_policy()
|
| 740 |
+
return _event_loop_policy
|
| 741 |
+
|
| 742 |
+
|
| 743 |
+
def set_event_loop_policy(policy):
|
| 744 |
+
"""Set the current event loop policy.
|
| 745 |
+
|
| 746 |
+
If policy is None, the default policy is restored."""
|
| 747 |
+
global _event_loop_policy
|
| 748 |
+
assert policy is None or isinstance(policy, AbstractEventLoopPolicy)
|
| 749 |
+
_event_loop_policy = policy
|
| 750 |
+
|
| 751 |
+
|
| 752 |
+
def get_event_loop():
|
| 753 |
+
"""Return an asyncio event loop.
|
| 754 |
+
|
| 755 |
+
When called from a coroutine or a callback (e.g. scheduled with call_soon
|
| 756 |
+
or similar API), this function will always return the running event loop.
|
| 757 |
+
|
| 758 |
+
If there is no running event loop set, the function will return
|
| 759 |
+
the result of `get_event_loop_policy().get_event_loop()` call.
|
| 760 |
+
"""
|
| 761 |
+
# NOTE: this function is implemented in C (see _asynciomodule.c)
|
| 762 |
+
return _py__get_event_loop()
|
| 763 |
+
|
| 764 |
+
|
| 765 |
+
def _get_event_loop(stacklevel=3):
|
| 766 |
+
# This internal method is going away in Python 3.12, left here only for
|
| 767 |
+
# backwards compatibility with 3.10.0 - 3.10.8 and 3.11.0.
|
| 768 |
+
# Similarly, this method's C equivalent in _asyncio is going away as well.
|
| 769 |
+
# See GH-99949 for more details.
|
| 770 |
+
current_loop = _get_running_loop()
|
| 771 |
+
if current_loop is not None:
|
| 772 |
+
return current_loop
|
| 773 |
+
return get_event_loop_policy().get_event_loop()
|
| 774 |
+
|
| 775 |
+
|
| 776 |
+
def set_event_loop(loop):
|
| 777 |
+
"""Equivalent to calling get_event_loop_policy().set_event_loop(loop)."""
|
| 778 |
+
get_event_loop_policy().set_event_loop(loop)
|
| 779 |
+
|
| 780 |
+
|
| 781 |
+
def new_event_loop():
|
| 782 |
+
"""Equivalent to calling get_event_loop_policy().new_event_loop()."""
|
| 783 |
+
return get_event_loop_policy().new_event_loop()
|
| 784 |
+
|
| 785 |
+
|
| 786 |
+
def get_child_watcher():
|
| 787 |
+
"""Equivalent to calling get_event_loop_policy().get_child_watcher()."""
|
| 788 |
+
return get_event_loop_policy().get_child_watcher()
|
| 789 |
+
|
| 790 |
+
|
| 791 |
+
def set_child_watcher(watcher):
|
| 792 |
+
"""Equivalent to calling
|
| 793 |
+
get_event_loop_policy().set_child_watcher(watcher)."""
|
| 794 |
+
return get_event_loop_policy().set_child_watcher(watcher)
|
| 795 |
+
|
| 796 |
+
|
| 797 |
+
# Alias pure-Python implementations for testing purposes.
|
| 798 |
+
_py__get_running_loop = _get_running_loop
|
| 799 |
+
_py__set_running_loop = _set_running_loop
|
| 800 |
+
_py_get_running_loop = get_running_loop
|
| 801 |
+
_py_get_event_loop = get_event_loop
|
| 802 |
+
_py__get_event_loop = _get_event_loop
|
| 803 |
+
|
| 804 |
+
|
| 805 |
+
try:
|
| 806 |
+
# get_event_loop() is one of the most frequently called
|
| 807 |
+
# functions in asyncio. Pure Python implementation is
|
| 808 |
+
# about 4 times slower than C-accelerated.
|
| 809 |
+
from _asyncio import (_get_running_loop, _set_running_loop,
|
| 810 |
+
get_running_loop, get_event_loop, _get_event_loop)
|
| 811 |
+
except ImportError:
|
| 812 |
+
pass
|
| 813 |
+
else:
|
| 814 |
+
# Alias C implementations for testing purposes.
|
| 815 |
+
_c__get_running_loop = _get_running_loop
|
| 816 |
+
_c__set_running_loop = _set_running_loop
|
| 817 |
+
_c_get_running_loop = get_running_loop
|
| 818 |
+
_c_get_event_loop = get_event_loop
|
| 819 |
+
_c__get_event_loop = _get_event_loop
|
omnilmm/lib/python3.10/asyncio/exceptions.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""asyncio exceptions."""
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
__all__ = ('CancelledError', 'InvalidStateError', 'TimeoutError',
|
| 5 |
+
'IncompleteReadError', 'LimitOverrunError',
|
| 6 |
+
'SendfileNotAvailableError')
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class CancelledError(BaseException):
|
| 10 |
+
"""The Future or Task was cancelled."""
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class TimeoutError(Exception):
|
| 14 |
+
"""The operation exceeded the given deadline."""
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class InvalidStateError(Exception):
|
| 18 |
+
"""The operation is not allowed in this state."""
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class SendfileNotAvailableError(RuntimeError):
|
| 22 |
+
"""Sendfile syscall is not available.
|
| 23 |
+
|
| 24 |
+
Raised if OS does not support sendfile syscall for given socket or
|
| 25 |
+
file type.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class IncompleteReadError(EOFError):
|
| 30 |
+
"""
|
| 31 |
+
Incomplete read error. Attributes:
|
| 32 |
+
|
| 33 |
+
- partial: read bytes string before the end of stream was reached
|
| 34 |
+
- expected: total number of expected bytes (or None if unknown)
|
| 35 |
+
"""
|
| 36 |
+
def __init__(self, partial, expected):
|
| 37 |
+
r_expected = 'undefined' if expected is None else repr(expected)
|
| 38 |
+
super().__init__(f'{len(partial)} bytes read on a total of '
|
| 39 |
+
f'{r_expected} expected bytes')
|
| 40 |
+
self.partial = partial
|
| 41 |
+
self.expected = expected
|
| 42 |
+
|
| 43 |
+
def __reduce__(self):
|
| 44 |
+
return type(self), (self.partial, self.expected)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class LimitOverrunError(Exception):
|
| 48 |
+
"""Reached the buffer limit while looking for a separator.
|
| 49 |
+
|
| 50 |
+
Attributes:
|
| 51 |
+
- consumed: total number of to be consumed bytes.
|
| 52 |
+
"""
|
| 53 |
+
def __init__(self, message, consumed):
|
| 54 |
+
super().__init__(message)
|
| 55 |
+
self.consumed = consumed
|
| 56 |
+
|
| 57 |
+
def __reduce__(self):
|
| 58 |
+
return type(self), (self.args[0], self.consumed)
|
omnilmm/lib/python3.10/asyncio/format_helpers.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import inspect
|
| 3 |
+
import reprlib
|
| 4 |
+
import sys
|
| 5 |
+
import traceback
|
| 6 |
+
|
| 7 |
+
from . import constants
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def _get_function_source(func):
|
| 11 |
+
func = inspect.unwrap(func)
|
| 12 |
+
if inspect.isfunction(func):
|
| 13 |
+
code = func.__code__
|
| 14 |
+
return (code.co_filename, code.co_firstlineno)
|
| 15 |
+
if isinstance(func, functools.partial):
|
| 16 |
+
return _get_function_source(func.func)
|
| 17 |
+
if isinstance(func, functools.partialmethod):
|
| 18 |
+
return _get_function_source(func.func)
|
| 19 |
+
return None
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def _format_callback_source(func, args):
|
| 23 |
+
func_repr = _format_callback(func, args, None)
|
| 24 |
+
source = _get_function_source(func)
|
| 25 |
+
if source:
|
| 26 |
+
func_repr += f' at {source[0]}:{source[1]}'
|
| 27 |
+
return func_repr
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _format_args_and_kwargs(args, kwargs):
|
| 31 |
+
"""Format function arguments and keyword arguments.
|
| 32 |
+
|
| 33 |
+
Special case for a single parameter: ('hello',) is formatted as ('hello').
|
| 34 |
+
"""
|
| 35 |
+
# use reprlib to limit the length of the output
|
| 36 |
+
items = []
|
| 37 |
+
if args:
|
| 38 |
+
items.extend(reprlib.repr(arg) for arg in args)
|
| 39 |
+
if kwargs:
|
| 40 |
+
items.extend(f'{k}={reprlib.repr(v)}' for k, v in kwargs.items())
|
| 41 |
+
return '({})'.format(', '.join(items))
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def _format_callback(func, args, kwargs, suffix=''):
|
| 45 |
+
if isinstance(func, functools.partial):
|
| 46 |
+
suffix = _format_args_and_kwargs(args, kwargs) + suffix
|
| 47 |
+
return _format_callback(func.func, func.args, func.keywords, suffix)
|
| 48 |
+
|
| 49 |
+
if hasattr(func, '__qualname__') and func.__qualname__:
|
| 50 |
+
func_repr = func.__qualname__
|
| 51 |
+
elif hasattr(func, '__name__') and func.__name__:
|
| 52 |
+
func_repr = func.__name__
|
| 53 |
+
else:
|
| 54 |
+
func_repr = repr(func)
|
| 55 |
+
|
| 56 |
+
func_repr += _format_args_and_kwargs(args, kwargs)
|
| 57 |
+
if suffix:
|
| 58 |
+
func_repr += suffix
|
| 59 |
+
return func_repr
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def extract_stack(f=None, limit=None):
|
| 63 |
+
"""Replacement for traceback.extract_stack() that only does the
|
| 64 |
+
necessary work for asyncio debug mode.
|
| 65 |
+
"""
|
| 66 |
+
if f is None:
|
| 67 |
+
f = sys._getframe().f_back
|
| 68 |
+
if limit is None:
|
| 69 |
+
# Limit the amount of work to a reasonable amount, as extract_stack()
|
| 70 |
+
# can be called for each coroutine and future in debug mode.
|
| 71 |
+
limit = constants.DEBUG_STACK_DEPTH
|
| 72 |
+
stack = traceback.StackSummary.extract(traceback.walk_stack(f),
|
| 73 |
+
limit=limit,
|
| 74 |
+
lookup_lines=False)
|
| 75 |
+
stack.reverse()
|
| 76 |
+
return stack
|
omnilmm/lib/python3.10/asyncio/futures.py
ADDED
|
@@ -0,0 +1,426 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""A Future class similar to the one in PEP 3148."""
|
| 2 |
+
|
| 3 |
+
__all__ = (
|
| 4 |
+
'Future', 'wrap_future', 'isfuture',
|
| 5 |
+
)
|
| 6 |
+
|
| 7 |
+
import concurrent.futures
|
| 8 |
+
import contextvars
|
| 9 |
+
import logging
|
| 10 |
+
import sys
|
| 11 |
+
from types import GenericAlias
|
| 12 |
+
|
| 13 |
+
from . import base_futures
|
| 14 |
+
from . import events
|
| 15 |
+
from . import exceptions
|
| 16 |
+
from . import format_helpers
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
isfuture = base_futures.isfuture
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
_PENDING = base_futures._PENDING
|
| 23 |
+
_CANCELLED = base_futures._CANCELLED
|
| 24 |
+
_FINISHED = base_futures._FINISHED
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class Future:
|
| 31 |
+
"""This class is *almost* compatible with concurrent.futures.Future.
|
| 32 |
+
|
| 33 |
+
Differences:
|
| 34 |
+
|
| 35 |
+
- This class is not thread-safe.
|
| 36 |
+
|
| 37 |
+
- result() and exception() do not take a timeout argument and
|
| 38 |
+
raise an exception when the future isn't done yet.
|
| 39 |
+
|
| 40 |
+
- Callbacks registered with add_done_callback() are always called
|
| 41 |
+
via the event loop's call_soon().
|
| 42 |
+
|
| 43 |
+
- This class is not compatible with the wait() and as_completed()
|
| 44 |
+
methods in the concurrent.futures package.
|
| 45 |
+
|
| 46 |
+
(In Python 3.4 or later we may be able to unify the implementations.)
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
# Class variables serving as defaults for instance variables.
|
| 50 |
+
_state = _PENDING
|
| 51 |
+
_result = None
|
| 52 |
+
_exception = None
|
| 53 |
+
_loop = None
|
| 54 |
+
_source_traceback = None
|
| 55 |
+
_cancel_message = None
|
| 56 |
+
# A saved CancelledError for later chaining as an exception context.
|
| 57 |
+
_cancelled_exc = None
|
| 58 |
+
|
| 59 |
+
# This field is used for a dual purpose:
|
| 60 |
+
# - Its presence is a marker to declare that a class implements
|
| 61 |
+
# the Future protocol (i.e. is intended to be duck-type compatible).
|
| 62 |
+
# The value must also be not-None, to enable a subclass to declare
|
| 63 |
+
# that it is not compatible by setting this to None.
|
| 64 |
+
# - It is set by __iter__() below so that Task._step() can tell
|
| 65 |
+
# the difference between
|
| 66 |
+
# `await Future()` or`yield from Future()` (correct) vs.
|
| 67 |
+
# `yield Future()` (incorrect).
|
| 68 |
+
_asyncio_future_blocking = False
|
| 69 |
+
|
| 70 |
+
__log_traceback = False
|
| 71 |
+
|
| 72 |
+
def __init__(self, *, loop=None):
|
| 73 |
+
"""Initialize the future.
|
| 74 |
+
|
| 75 |
+
The optional event_loop argument allows explicitly setting the event
|
| 76 |
+
loop object used by the future. If it's not provided, the future uses
|
| 77 |
+
the default event loop.
|
| 78 |
+
"""
|
| 79 |
+
if loop is None:
|
| 80 |
+
self._loop = events._get_event_loop()
|
| 81 |
+
else:
|
| 82 |
+
self._loop = loop
|
| 83 |
+
self._callbacks = []
|
| 84 |
+
if self._loop.get_debug():
|
| 85 |
+
self._source_traceback = format_helpers.extract_stack(
|
| 86 |
+
sys._getframe(1))
|
| 87 |
+
|
| 88 |
+
_repr_info = base_futures._future_repr_info
|
| 89 |
+
|
| 90 |
+
def __repr__(self):
|
| 91 |
+
return '<{} {}>'.format(self.__class__.__name__,
|
| 92 |
+
' '.join(self._repr_info()))
|
| 93 |
+
|
| 94 |
+
def __del__(self):
|
| 95 |
+
if not self.__log_traceback:
|
| 96 |
+
# set_exception() was not called, or result() or exception()
|
| 97 |
+
# has consumed the exception
|
| 98 |
+
return
|
| 99 |
+
exc = self._exception
|
| 100 |
+
context = {
|
| 101 |
+
'message':
|
| 102 |
+
f'{self.__class__.__name__} exception was never retrieved',
|
| 103 |
+
'exception': exc,
|
| 104 |
+
'future': self,
|
| 105 |
+
}
|
| 106 |
+
if self._source_traceback:
|
| 107 |
+
context['source_traceback'] = self._source_traceback
|
| 108 |
+
self._loop.call_exception_handler(context)
|
| 109 |
+
|
| 110 |
+
__class_getitem__ = classmethod(GenericAlias)
|
| 111 |
+
|
| 112 |
+
@property
|
| 113 |
+
def _log_traceback(self):
|
| 114 |
+
return self.__log_traceback
|
| 115 |
+
|
| 116 |
+
@_log_traceback.setter
|
| 117 |
+
def _log_traceback(self, val):
|
| 118 |
+
if val:
|
| 119 |
+
raise ValueError('_log_traceback can only be set to False')
|
| 120 |
+
self.__log_traceback = False
|
| 121 |
+
|
| 122 |
+
def get_loop(self):
|
| 123 |
+
"""Return the event loop the Future is bound to."""
|
| 124 |
+
loop = self._loop
|
| 125 |
+
if loop is None:
|
| 126 |
+
raise RuntimeError("Future object is not initialized.")
|
| 127 |
+
return loop
|
| 128 |
+
|
| 129 |
+
def _make_cancelled_error(self):
|
| 130 |
+
"""Create the CancelledError to raise if the Future is cancelled.
|
| 131 |
+
|
| 132 |
+
This should only be called once when handling a cancellation since
|
| 133 |
+
it erases the saved context exception value.
|
| 134 |
+
"""
|
| 135 |
+
if self._cancel_message is None:
|
| 136 |
+
exc = exceptions.CancelledError()
|
| 137 |
+
else:
|
| 138 |
+
exc = exceptions.CancelledError(self._cancel_message)
|
| 139 |
+
exc.__context__ = self._cancelled_exc
|
| 140 |
+
# Remove the reference since we don't need this anymore.
|
| 141 |
+
self._cancelled_exc = None
|
| 142 |
+
return exc
|
| 143 |
+
|
| 144 |
+
def cancel(self, msg=None):
|
| 145 |
+
"""Cancel the future and schedule callbacks.
|
| 146 |
+
|
| 147 |
+
If the future is already done or cancelled, return False. Otherwise,
|
| 148 |
+
change the future's state to cancelled, schedule the callbacks and
|
| 149 |
+
return True.
|
| 150 |
+
"""
|
| 151 |
+
self.__log_traceback = False
|
| 152 |
+
if self._state != _PENDING:
|
| 153 |
+
return False
|
| 154 |
+
self._state = _CANCELLED
|
| 155 |
+
self._cancel_message = msg
|
| 156 |
+
self.__schedule_callbacks()
|
| 157 |
+
return True
|
| 158 |
+
|
| 159 |
+
def __schedule_callbacks(self):
|
| 160 |
+
"""Internal: Ask the event loop to call all callbacks.
|
| 161 |
+
|
| 162 |
+
The callbacks are scheduled to be called as soon as possible. Also
|
| 163 |
+
clears the callback list.
|
| 164 |
+
"""
|
| 165 |
+
callbacks = self._callbacks[:]
|
| 166 |
+
if not callbacks:
|
| 167 |
+
return
|
| 168 |
+
|
| 169 |
+
self._callbacks[:] = []
|
| 170 |
+
for callback, ctx in callbacks:
|
| 171 |
+
self._loop.call_soon(callback, self, context=ctx)
|
| 172 |
+
|
| 173 |
+
def cancelled(self):
|
| 174 |
+
"""Return True if the future was cancelled."""
|
| 175 |
+
return self._state == _CANCELLED
|
| 176 |
+
|
| 177 |
+
# Don't implement running(); see http://bugs.python.org/issue18699
|
| 178 |
+
|
| 179 |
+
def done(self):
|
| 180 |
+
"""Return True if the future is done.
|
| 181 |
+
|
| 182 |
+
Done means either that a result / exception are available, or that the
|
| 183 |
+
future was cancelled.
|
| 184 |
+
"""
|
| 185 |
+
return self._state != _PENDING
|
| 186 |
+
|
| 187 |
+
def result(self):
|
| 188 |
+
"""Return the result this future represents.
|
| 189 |
+
|
| 190 |
+
If the future has been cancelled, raises CancelledError. If the
|
| 191 |
+
future's result isn't yet available, raises InvalidStateError. If
|
| 192 |
+
the future is done and has an exception set, this exception is raised.
|
| 193 |
+
"""
|
| 194 |
+
if self._state == _CANCELLED:
|
| 195 |
+
exc = self._make_cancelled_error()
|
| 196 |
+
raise exc
|
| 197 |
+
if self._state != _FINISHED:
|
| 198 |
+
raise exceptions.InvalidStateError('Result is not ready.')
|
| 199 |
+
self.__log_traceback = False
|
| 200 |
+
if self._exception is not None:
|
| 201 |
+
raise self._exception.with_traceback(self._exception_tb)
|
| 202 |
+
return self._result
|
| 203 |
+
|
| 204 |
+
def exception(self):
|
| 205 |
+
"""Return the exception that was set on this future.
|
| 206 |
+
|
| 207 |
+
The exception (or None if no exception was set) is returned only if
|
| 208 |
+
the future is done. If the future has been cancelled, raises
|
| 209 |
+
CancelledError. If the future isn't done yet, raises
|
| 210 |
+
InvalidStateError.
|
| 211 |
+
"""
|
| 212 |
+
if self._state == _CANCELLED:
|
| 213 |
+
exc = self._make_cancelled_error()
|
| 214 |
+
raise exc
|
| 215 |
+
if self._state != _FINISHED:
|
| 216 |
+
raise exceptions.InvalidStateError('Exception is not set.')
|
| 217 |
+
self.__log_traceback = False
|
| 218 |
+
return self._exception
|
| 219 |
+
|
| 220 |
+
def add_done_callback(self, fn, *, context=None):
|
| 221 |
+
"""Add a callback to be run when the future becomes done.
|
| 222 |
+
|
| 223 |
+
The callback is called with a single argument - the future object. If
|
| 224 |
+
the future is already done when this is called, the callback is
|
| 225 |
+
scheduled with call_soon.
|
| 226 |
+
"""
|
| 227 |
+
if self._state != _PENDING:
|
| 228 |
+
self._loop.call_soon(fn, self, context=context)
|
| 229 |
+
else:
|
| 230 |
+
if context is None:
|
| 231 |
+
context = contextvars.copy_context()
|
| 232 |
+
self._callbacks.append((fn, context))
|
| 233 |
+
|
| 234 |
+
# New method not in PEP 3148.
|
| 235 |
+
|
| 236 |
+
def remove_done_callback(self, fn):
|
| 237 |
+
"""Remove all instances of a callback from the "call when done" list.
|
| 238 |
+
|
| 239 |
+
Returns the number of callbacks removed.
|
| 240 |
+
"""
|
| 241 |
+
filtered_callbacks = [(f, ctx)
|
| 242 |
+
for (f, ctx) in self._callbacks
|
| 243 |
+
if f != fn]
|
| 244 |
+
removed_count = len(self._callbacks) - len(filtered_callbacks)
|
| 245 |
+
if removed_count:
|
| 246 |
+
self._callbacks[:] = filtered_callbacks
|
| 247 |
+
return removed_count
|
| 248 |
+
|
| 249 |
+
# So-called internal methods (note: no set_running_or_notify_cancel()).
|
| 250 |
+
|
| 251 |
+
def set_result(self, result):
|
| 252 |
+
"""Mark the future done and set its result.
|
| 253 |
+
|
| 254 |
+
If the future is already done when this method is called, raises
|
| 255 |
+
InvalidStateError.
|
| 256 |
+
"""
|
| 257 |
+
if self._state != _PENDING:
|
| 258 |
+
raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
|
| 259 |
+
self._result = result
|
| 260 |
+
self._state = _FINISHED
|
| 261 |
+
self.__schedule_callbacks()
|
| 262 |
+
|
| 263 |
+
def set_exception(self, exception):
|
| 264 |
+
"""Mark the future done and set an exception.
|
| 265 |
+
|
| 266 |
+
If the future is already done when this method is called, raises
|
| 267 |
+
InvalidStateError.
|
| 268 |
+
"""
|
| 269 |
+
if self._state != _PENDING:
|
| 270 |
+
raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
|
| 271 |
+
if isinstance(exception, type):
|
| 272 |
+
exception = exception()
|
| 273 |
+
if type(exception) is StopIteration:
|
| 274 |
+
raise TypeError("StopIteration interacts badly with generators "
|
| 275 |
+
"and cannot be raised into a Future")
|
| 276 |
+
self._exception = exception
|
| 277 |
+
self._exception_tb = exception.__traceback__
|
| 278 |
+
self._state = _FINISHED
|
| 279 |
+
self.__schedule_callbacks()
|
| 280 |
+
self.__log_traceback = True
|
| 281 |
+
|
| 282 |
+
def __await__(self):
|
| 283 |
+
if not self.done():
|
| 284 |
+
self._asyncio_future_blocking = True
|
| 285 |
+
yield self # This tells Task to wait for completion.
|
| 286 |
+
if not self.done():
|
| 287 |
+
raise RuntimeError("await wasn't used with future")
|
| 288 |
+
return self.result() # May raise too.
|
| 289 |
+
|
| 290 |
+
__iter__ = __await__ # make compatible with 'yield from'.
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
# Needed for testing purposes.
|
| 294 |
+
_PyFuture = Future
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
def _get_loop(fut):
|
| 298 |
+
# Tries to call Future.get_loop() if it's available.
|
| 299 |
+
# Otherwise fallbacks to using the old '_loop' property.
|
| 300 |
+
try:
|
| 301 |
+
get_loop = fut.get_loop
|
| 302 |
+
except AttributeError:
|
| 303 |
+
pass
|
| 304 |
+
else:
|
| 305 |
+
return get_loop()
|
| 306 |
+
return fut._loop
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
def _set_result_unless_cancelled(fut, result):
|
| 310 |
+
"""Helper setting the result only if the future was not cancelled."""
|
| 311 |
+
if fut.cancelled():
|
| 312 |
+
return
|
| 313 |
+
fut.set_result(result)
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
def _convert_future_exc(exc):
|
| 317 |
+
exc_class = type(exc)
|
| 318 |
+
if exc_class is concurrent.futures.CancelledError:
|
| 319 |
+
return exceptions.CancelledError(*exc.args)
|
| 320 |
+
elif exc_class is concurrent.futures.TimeoutError:
|
| 321 |
+
return exceptions.TimeoutError(*exc.args)
|
| 322 |
+
elif exc_class is concurrent.futures.InvalidStateError:
|
| 323 |
+
return exceptions.InvalidStateError(*exc.args)
|
| 324 |
+
else:
|
| 325 |
+
return exc
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
def _set_concurrent_future_state(concurrent, source):
|
| 329 |
+
"""Copy state from a future to a concurrent.futures.Future."""
|
| 330 |
+
assert source.done()
|
| 331 |
+
if source.cancelled():
|
| 332 |
+
concurrent.cancel()
|
| 333 |
+
if not concurrent.set_running_or_notify_cancel():
|
| 334 |
+
return
|
| 335 |
+
exception = source.exception()
|
| 336 |
+
if exception is not None:
|
| 337 |
+
concurrent.set_exception(_convert_future_exc(exception))
|
| 338 |
+
else:
|
| 339 |
+
result = source.result()
|
| 340 |
+
concurrent.set_result(result)
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def _copy_future_state(source, dest):
|
| 344 |
+
"""Internal helper to copy state from another Future.
|
| 345 |
+
|
| 346 |
+
The other Future may be a concurrent.futures.Future.
|
| 347 |
+
"""
|
| 348 |
+
assert source.done()
|
| 349 |
+
if dest.cancelled():
|
| 350 |
+
return
|
| 351 |
+
assert not dest.done()
|
| 352 |
+
if source.cancelled():
|
| 353 |
+
dest.cancel()
|
| 354 |
+
else:
|
| 355 |
+
exception = source.exception()
|
| 356 |
+
if exception is not None:
|
| 357 |
+
dest.set_exception(_convert_future_exc(exception))
|
| 358 |
+
else:
|
| 359 |
+
result = source.result()
|
| 360 |
+
dest.set_result(result)
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def _chain_future(source, destination):
|
| 364 |
+
"""Chain two futures so that when one completes, so does the other.
|
| 365 |
+
|
| 366 |
+
The result (or exception) of source will be copied to destination.
|
| 367 |
+
If destination is cancelled, source gets cancelled too.
|
| 368 |
+
Compatible with both asyncio.Future and concurrent.futures.Future.
|
| 369 |
+
"""
|
| 370 |
+
if not isfuture(source) and not isinstance(source,
|
| 371 |
+
concurrent.futures.Future):
|
| 372 |
+
raise TypeError('A future is required for source argument')
|
| 373 |
+
if not isfuture(destination) and not isinstance(destination,
|
| 374 |
+
concurrent.futures.Future):
|
| 375 |
+
raise TypeError('A future is required for destination argument')
|
| 376 |
+
source_loop = _get_loop(source) if isfuture(source) else None
|
| 377 |
+
dest_loop = _get_loop(destination) if isfuture(destination) else None
|
| 378 |
+
|
| 379 |
+
def _set_state(future, other):
|
| 380 |
+
if isfuture(future):
|
| 381 |
+
_copy_future_state(other, future)
|
| 382 |
+
else:
|
| 383 |
+
_set_concurrent_future_state(future, other)
|
| 384 |
+
|
| 385 |
+
def _call_check_cancel(destination):
|
| 386 |
+
if destination.cancelled():
|
| 387 |
+
if source_loop is None or source_loop is dest_loop:
|
| 388 |
+
source.cancel()
|
| 389 |
+
else:
|
| 390 |
+
source_loop.call_soon_threadsafe(source.cancel)
|
| 391 |
+
|
| 392 |
+
def _call_set_state(source):
|
| 393 |
+
if (destination.cancelled() and
|
| 394 |
+
dest_loop is not None and dest_loop.is_closed()):
|
| 395 |
+
return
|
| 396 |
+
if dest_loop is None or dest_loop is source_loop:
|
| 397 |
+
_set_state(destination, source)
|
| 398 |
+
else:
|
| 399 |
+
if dest_loop.is_closed():
|
| 400 |
+
return
|
| 401 |
+
dest_loop.call_soon_threadsafe(_set_state, destination, source)
|
| 402 |
+
|
| 403 |
+
destination.add_done_callback(_call_check_cancel)
|
| 404 |
+
source.add_done_callback(_call_set_state)
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
def wrap_future(future, *, loop=None):
|
| 408 |
+
"""Wrap concurrent.futures.Future object."""
|
| 409 |
+
if isfuture(future):
|
| 410 |
+
return future
|
| 411 |
+
assert isinstance(future, concurrent.futures.Future), \
|
| 412 |
+
f'concurrent.futures.Future is expected, got {future!r}'
|
| 413 |
+
if loop is None:
|
| 414 |
+
loop = events._get_event_loop()
|
| 415 |
+
new_future = loop.create_future()
|
| 416 |
+
_chain_future(future, new_future)
|
| 417 |
+
return new_future
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
try:
|
| 421 |
+
import _asyncio
|
| 422 |
+
except ImportError:
|
| 423 |
+
pass
|
| 424 |
+
else:
|
| 425 |
+
# _CFuture is needed for tests.
|
| 426 |
+
Future = _CFuture = _asyncio.Future
|
omnilmm/lib/python3.10/asyncio/locks.py
ADDED
|
@@ -0,0 +1,438 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Synchronization primitives."""
|
| 2 |
+
|
| 3 |
+
__all__ = ('Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore')
|
| 4 |
+
|
| 5 |
+
import collections
|
| 6 |
+
|
| 7 |
+
from . import exceptions
|
| 8 |
+
from . import mixins
|
| 9 |
+
from . import tasks
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class _ContextManagerMixin:
|
| 13 |
+
async def __aenter__(self):
|
| 14 |
+
await self.acquire()
|
| 15 |
+
# We have no use for the "as ..." clause in the with
|
| 16 |
+
# statement for locks.
|
| 17 |
+
return None
|
| 18 |
+
|
| 19 |
+
async def __aexit__(self, exc_type, exc, tb):
|
| 20 |
+
self.release()
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class Lock(_ContextManagerMixin, mixins._LoopBoundMixin):
|
| 24 |
+
"""Primitive lock objects.
|
| 25 |
+
|
| 26 |
+
A primitive lock is a synchronization primitive that is not owned
|
| 27 |
+
by a particular coroutine when locked. A primitive lock is in one
|
| 28 |
+
of two states, 'locked' or 'unlocked'.
|
| 29 |
+
|
| 30 |
+
It is created in the unlocked state. It has two basic methods,
|
| 31 |
+
acquire() and release(). When the state is unlocked, acquire()
|
| 32 |
+
changes the state to locked and returns immediately. When the
|
| 33 |
+
state is locked, acquire() blocks until a call to release() in
|
| 34 |
+
another coroutine changes it to unlocked, then the acquire() call
|
| 35 |
+
resets it to locked and returns. The release() method should only
|
| 36 |
+
be called in the locked state; it changes the state to unlocked
|
| 37 |
+
and returns immediately. If an attempt is made to release an
|
| 38 |
+
unlocked lock, a RuntimeError will be raised.
|
| 39 |
+
|
| 40 |
+
When more than one coroutine is blocked in acquire() waiting for
|
| 41 |
+
the state to turn to unlocked, only one coroutine proceeds when a
|
| 42 |
+
release() call resets the state to unlocked; first coroutine which
|
| 43 |
+
is blocked in acquire() is being processed.
|
| 44 |
+
|
| 45 |
+
acquire() is a coroutine and should be called with 'await'.
|
| 46 |
+
|
| 47 |
+
Locks also support the asynchronous context management protocol.
|
| 48 |
+
'async with lock' statement should be used.
|
| 49 |
+
|
| 50 |
+
Usage:
|
| 51 |
+
|
| 52 |
+
lock = Lock()
|
| 53 |
+
...
|
| 54 |
+
await lock.acquire()
|
| 55 |
+
try:
|
| 56 |
+
...
|
| 57 |
+
finally:
|
| 58 |
+
lock.release()
|
| 59 |
+
|
| 60 |
+
Context manager usage:
|
| 61 |
+
|
| 62 |
+
lock = Lock()
|
| 63 |
+
...
|
| 64 |
+
async with lock:
|
| 65 |
+
...
|
| 66 |
+
|
| 67 |
+
Lock objects can be tested for locking state:
|
| 68 |
+
|
| 69 |
+
if not lock.locked():
|
| 70 |
+
await lock.acquire()
|
| 71 |
+
else:
|
| 72 |
+
# lock is acquired
|
| 73 |
+
...
|
| 74 |
+
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
def __init__(self, *, loop=mixins._marker):
|
| 78 |
+
super().__init__(loop=loop)
|
| 79 |
+
self._waiters = None
|
| 80 |
+
self._locked = False
|
| 81 |
+
|
| 82 |
+
def __repr__(self):
|
| 83 |
+
res = super().__repr__()
|
| 84 |
+
extra = 'locked' if self._locked else 'unlocked'
|
| 85 |
+
if self._waiters:
|
| 86 |
+
extra = f'{extra}, waiters:{len(self._waiters)}'
|
| 87 |
+
return f'<{res[1:-1]} [{extra}]>'
|
| 88 |
+
|
| 89 |
+
def locked(self):
|
| 90 |
+
"""Return True if lock is acquired."""
|
| 91 |
+
return self._locked
|
| 92 |
+
|
| 93 |
+
async def acquire(self):
|
| 94 |
+
"""Acquire a lock.
|
| 95 |
+
|
| 96 |
+
This method blocks until the lock is unlocked, then sets it to
|
| 97 |
+
locked and returns True.
|
| 98 |
+
"""
|
| 99 |
+
if (not self._locked and (self._waiters is None or
|
| 100 |
+
all(w.cancelled() for w in self._waiters))):
|
| 101 |
+
self._locked = True
|
| 102 |
+
return True
|
| 103 |
+
|
| 104 |
+
if self._waiters is None:
|
| 105 |
+
self._waiters = collections.deque()
|
| 106 |
+
fut = self._get_loop().create_future()
|
| 107 |
+
self._waiters.append(fut)
|
| 108 |
+
|
| 109 |
+
# Finally block should be called before the CancelledError
|
| 110 |
+
# handling as we don't want CancelledError to call
|
| 111 |
+
# _wake_up_first() and attempt to wake up itself.
|
| 112 |
+
try:
|
| 113 |
+
try:
|
| 114 |
+
await fut
|
| 115 |
+
finally:
|
| 116 |
+
self._waiters.remove(fut)
|
| 117 |
+
except exceptions.CancelledError:
|
| 118 |
+
if not self._locked:
|
| 119 |
+
self._wake_up_first()
|
| 120 |
+
raise
|
| 121 |
+
|
| 122 |
+
self._locked = True
|
| 123 |
+
return True
|
| 124 |
+
|
| 125 |
+
def release(self):
|
| 126 |
+
"""Release a lock.
|
| 127 |
+
|
| 128 |
+
When the lock is locked, reset it to unlocked, and return.
|
| 129 |
+
If any other coroutines are blocked waiting for the lock to become
|
| 130 |
+
unlocked, allow exactly one of them to proceed.
|
| 131 |
+
|
| 132 |
+
When invoked on an unlocked lock, a RuntimeError is raised.
|
| 133 |
+
|
| 134 |
+
There is no return value.
|
| 135 |
+
"""
|
| 136 |
+
if self._locked:
|
| 137 |
+
self._locked = False
|
| 138 |
+
self._wake_up_first()
|
| 139 |
+
else:
|
| 140 |
+
raise RuntimeError('Lock is not acquired.')
|
| 141 |
+
|
| 142 |
+
def _wake_up_first(self):
|
| 143 |
+
"""Wake up the first waiter if it isn't done."""
|
| 144 |
+
if not self._waiters:
|
| 145 |
+
return
|
| 146 |
+
try:
|
| 147 |
+
fut = next(iter(self._waiters))
|
| 148 |
+
except StopIteration:
|
| 149 |
+
return
|
| 150 |
+
|
| 151 |
+
# .done() necessarily means that a waiter will wake up later on and
|
| 152 |
+
# either take the lock, or, if it was cancelled and lock wasn't
|
| 153 |
+
# taken already, will hit this again and wake up a new waiter.
|
| 154 |
+
if not fut.done():
|
| 155 |
+
fut.set_result(True)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
class Event(mixins._LoopBoundMixin):
|
| 159 |
+
"""Asynchronous equivalent to threading.Event.
|
| 160 |
+
|
| 161 |
+
Class implementing event objects. An event manages a flag that can be set
|
| 162 |
+
to true with the set() method and reset to false with the clear() method.
|
| 163 |
+
The wait() method blocks until the flag is true. The flag is initially
|
| 164 |
+
false.
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
def __init__(self, *, loop=mixins._marker):
|
| 168 |
+
super().__init__(loop=loop)
|
| 169 |
+
self._waiters = collections.deque()
|
| 170 |
+
self._value = False
|
| 171 |
+
|
| 172 |
+
def __repr__(self):
|
| 173 |
+
res = super().__repr__()
|
| 174 |
+
extra = 'set' if self._value else 'unset'
|
| 175 |
+
if self._waiters:
|
| 176 |
+
extra = f'{extra}, waiters:{len(self._waiters)}'
|
| 177 |
+
return f'<{res[1:-1]} [{extra}]>'
|
| 178 |
+
|
| 179 |
+
def is_set(self):
|
| 180 |
+
"""Return True if and only if the internal flag is true."""
|
| 181 |
+
return self._value
|
| 182 |
+
|
| 183 |
+
def set(self):
|
| 184 |
+
"""Set the internal flag to true. All coroutines waiting for it to
|
| 185 |
+
become true are awakened. Coroutine that call wait() once the flag is
|
| 186 |
+
true will not block at all.
|
| 187 |
+
"""
|
| 188 |
+
if not self._value:
|
| 189 |
+
self._value = True
|
| 190 |
+
|
| 191 |
+
for fut in self._waiters:
|
| 192 |
+
if not fut.done():
|
| 193 |
+
fut.set_result(True)
|
| 194 |
+
|
| 195 |
+
def clear(self):
|
| 196 |
+
"""Reset the internal flag to false. Subsequently, coroutines calling
|
| 197 |
+
wait() will block until set() is called to set the internal flag
|
| 198 |
+
to true again."""
|
| 199 |
+
self._value = False
|
| 200 |
+
|
| 201 |
+
async def wait(self):
|
| 202 |
+
"""Block until the internal flag is true.
|
| 203 |
+
|
| 204 |
+
If the internal flag is true on entry, return True
|
| 205 |
+
immediately. Otherwise, block until another coroutine calls
|
| 206 |
+
set() to set the flag to true, then return True.
|
| 207 |
+
"""
|
| 208 |
+
if self._value:
|
| 209 |
+
return True
|
| 210 |
+
|
| 211 |
+
fut = self._get_loop().create_future()
|
| 212 |
+
self._waiters.append(fut)
|
| 213 |
+
try:
|
| 214 |
+
await fut
|
| 215 |
+
return True
|
| 216 |
+
finally:
|
| 217 |
+
self._waiters.remove(fut)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
class Condition(_ContextManagerMixin, mixins._LoopBoundMixin):
|
| 221 |
+
"""Asynchronous equivalent to threading.Condition.
|
| 222 |
+
|
| 223 |
+
This class implements condition variable objects. A condition variable
|
| 224 |
+
allows one or more coroutines to wait until they are notified by another
|
| 225 |
+
coroutine.
|
| 226 |
+
|
| 227 |
+
A new Lock object is created and used as the underlying lock.
|
| 228 |
+
"""
|
| 229 |
+
|
| 230 |
+
def __init__(self, lock=None, *, loop=mixins._marker):
|
| 231 |
+
super().__init__(loop=loop)
|
| 232 |
+
if lock is None:
|
| 233 |
+
lock = Lock()
|
| 234 |
+
|
| 235 |
+
self._lock = lock
|
| 236 |
+
# Export the lock's locked(), acquire() and release() methods.
|
| 237 |
+
self.locked = lock.locked
|
| 238 |
+
self.acquire = lock.acquire
|
| 239 |
+
self.release = lock.release
|
| 240 |
+
|
| 241 |
+
self._waiters = collections.deque()
|
| 242 |
+
|
| 243 |
+
def __repr__(self):
|
| 244 |
+
res = super().__repr__()
|
| 245 |
+
extra = 'locked' if self.locked() else 'unlocked'
|
| 246 |
+
if self._waiters:
|
| 247 |
+
extra = f'{extra}, waiters:{len(self._waiters)}'
|
| 248 |
+
return f'<{res[1:-1]} [{extra}]>'
|
| 249 |
+
|
| 250 |
+
async def wait(self):
|
| 251 |
+
"""Wait until notified.
|
| 252 |
+
|
| 253 |
+
If the calling coroutine has not acquired the lock when this
|
| 254 |
+
method is called, a RuntimeError is raised.
|
| 255 |
+
|
| 256 |
+
This method releases the underlying lock, and then blocks
|
| 257 |
+
until it is awakened by a notify() or notify_all() call for
|
| 258 |
+
the same condition variable in another coroutine. Once
|
| 259 |
+
awakened, it re-acquires the lock and returns True.
|
| 260 |
+
"""
|
| 261 |
+
if not self.locked():
|
| 262 |
+
raise RuntimeError('cannot wait on un-acquired lock')
|
| 263 |
+
|
| 264 |
+
self.release()
|
| 265 |
+
try:
|
| 266 |
+
fut = self._get_loop().create_future()
|
| 267 |
+
self._waiters.append(fut)
|
| 268 |
+
try:
|
| 269 |
+
await fut
|
| 270 |
+
return True
|
| 271 |
+
finally:
|
| 272 |
+
self._waiters.remove(fut)
|
| 273 |
+
|
| 274 |
+
finally:
|
| 275 |
+
# Must reacquire lock even if wait is cancelled
|
| 276 |
+
cancelled = False
|
| 277 |
+
while True:
|
| 278 |
+
try:
|
| 279 |
+
await self.acquire()
|
| 280 |
+
break
|
| 281 |
+
except exceptions.CancelledError:
|
| 282 |
+
cancelled = True
|
| 283 |
+
|
| 284 |
+
if cancelled:
|
| 285 |
+
raise exceptions.CancelledError
|
| 286 |
+
|
| 287 |
+
async def wait_for(self, predicate):
|
| 288 |
+
"""Wait until a predicate becomes true.
|
| 289 |
+
|
| 290 |
+
The predicate should be a callable which result will be
|
| 291 |
+
interpreted as a boolean value. The final predicate value is
|
| 292 |
+
the return value.
|
| 293 |
+
"""
|
| 294 |
+
result = predicate()
|
| 295 |
+
while not result:
|
| 296 |
+
await self.wait()
|
| 297 |
+
result = predicate()
|
| 298 |
+
return result
|
| 299 |
+
|
| 300 |
+
def notify(self, n=1):
|
| 301 |
+
"""By default, wake up one coroutine waiting on this condition, if any.
|
| 302 |
+
If the calling coroutine has not acquired the lock when this method
|
| 303 |
+
is called, a RuntimeError is raised.
|
| 304 |
+
|
| 305 |
+
This method wakes up at most n of the coroutines waiting for the
|
| 306 |
+
condition variable; it is a no-op if no coroutines are waiting.
|
| 307 |
+
|
| 308 |
+
Note: an awakened coroutine does not actually return from its
|
| 309 |
+
wait() call until it can reacquire the lock. Since notify() does
|
| 310 |
+
not release the lock, its caller should.
|
| 311 |
+
"""
|
| 312 |
+
if not self.locked():
|
| 313 |
+
raise RuntimeError('cannot notify on un-acquired lock')
|
| 314 |
+
|
| 315 |
+
idx = 0
|
| 316 |
+
for fut in self._waiters:
|
| 317 |
+
if idx >= n:
|
| 318 |
+
break
|
| 319 |
+
|
| 320 |
+
if not fut.done():
|
| 321 |
+
idx += 1
|
| 322 |
+
fut.set_result(False)
|
| 323 |
+
|
| 324 |
+
def notify_all(self):
|
| 325 |
+
"""Wake up all threads waiting on this condition. This method acts
|
| 326 |
+
like notify(), but wakes up all waiting threads instead of one. If the
|
| 327 |
+
calling thread has not acquired the lock when this method is called,
|
| 328 |
+
a RuntimeError is raised.
|
| 329 |
+
"""
|
| 330 |
+
self.notify(len(self._waiters))
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
class Semaphore(_ContextManagerMixin, mixins._LoopBoundMixin):
|
| 334 |
+
"""A Semaphore implementation.
|
| 335 |
+
|
| 336 |
+
A semaphore manages an internal counter which is decremented by each
|
| 337 |
+
acquire() call and incremented by each release() call. The counter
|
| 338 |
+
can never go below zero; when acquire() finds that it is zero, it blocks,
|
| 339 |
+
waiting until some other thread calls release().
|
| 340 |
+
|
| 341 |
+
Semaphores also support the context management protocol.
|
| 342 |
+
|
| 343 |
+
The optional argument gives the initial value for the internal
|
| 344 |
+
counter; it defaults to 1. If the value given is less than 0,
|
| 345 |
+
ValueError is raised.
|
| 346 |
+
"""
|
| 347 |
+
|
| 348 |
+
def __init__(self, value=1, *, loop=mixins._marker):
|
| 349 |
+
super().__init__(loop=loop)
|
| 350 |
+
if value < 0:
|
| 351 |
+
raise ValueError("Semaphore initial value must be >= 0")
|
| 352 |
+
self._waiters = None
|
| 353 |
+
self._value = value
|
| 354 |
+
|
| 355 |
+
def __repr__(self):
|
| 356 |
+
res = super().__repr__()
|
| 357 |
+
extra = 'locked' if self.locked() else f'unlocked, value:{self._value}'
|
| 358 |
+
if self._waiters:
|
| 359 |
+
extra = f'{extra}, waiters:{len(self._waiters)}'
|
| 360 |
+
return f'<{res[1:-1]} [{extra}]>'
|
| 361 |
+
|
| 362 |
+
def locked(self):
|
| 363 |
+
"""Returns True if semaphore cannot be acquired immediately."""
|
| 364 |
+
return self._value == 0 or (
|
| 365 |
+
any(not w.cancelled() for w in (self._waiters or ())))
|
| 366 |
+
|
| 367 |
+
async def acquire(self):
|
| 368 |
+
"""Acquire a semaphore.
|
| 369 |
+
|
| 370 |
+
If the internal counter is larger than zero on entry,
|
| 371 |
+
decrement it by one and return True immediately. If it is
|
| 372 |
+
zero on entry, block, waiting until some other coroutine has
|
| 373 |
+
called release() to make it larger than 0, and then return
|
| 374 |
+
True.
|
| 375 |
+
"""
|
| 376 |
+
if not self.locked():
|
| 377 |
+
self._value -= 1
|
| 378 |
+
return True
|
| 379 |
+
|
| 380 |
+
if self._waiters is None:
|
| 381 |
+
self._waiters = collections.deque()
|
| 382 |
+
fut = self._get_loop().create_future()
|
| 383 |
+
self._waiters.append(fut)
|
| 384 |
+
|
| 385 |
+
# Finally block should be called before the CancelledError
|
| 386 |
+
# handling as we don't want CancelledError to call
|
| 387 |
+
# _wake_up_first() and attempt to wake up itself.
|
| 388 |
+
try:
|
| 389 |
+
try:
|
| 390 |
+
await fut
|
| 391 |
+
finally:
|
| 392 |
+
self._waiters.remove(fut)
|
| 393 |
+
except exceptions.CancelledError:
|
| 394 |
+
if not fut.cancelled():
|
| 395 |
+
self._value += 1
|
| 396 |
+
self._wake_up_next()
|
| 397 |
+
raise
|
| 398 |
+
|
| 399 |
+
if self._value > 0:
|
| 400 |
+
self._wake_up_next()
|
| 401 |
+
return True
|
| 402 |
+
|
| 403 |
+
def release(self):
|
| 404 |
+
"""Release a semaphore, incrementing the internal counter by one.
|
| 405 |
+
|
| 406 |
+
When it was zero on entry and another coroutine is waiting for it to
|
| 407 |
+
become larger than zero again, wake up that coroutine.
|
| 408 |
+
"""
|
| 409 |
+
self._value += 1
|
| 410 |
+
self._wake_up_next()
|
| 411 |
+
|
| 412 |
+
def _wake_up_next(self):
|
| 413 |
+
"""Wake up the first waiter that isn't done."""
|
| 414 |
+
if not self._waiters:
|
| 415 |
+
return
|
| 416 |
+
|
| 417 |
+
for fut in self._waiters:
|
| 418 |
+
if not fut.done():
|
| 419 |
+
self._value -= 1
|
| 420 |
+
fut.set_result(True)
|
| 421 |
+
return
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
class BoundedSemaphore(Semaphore):
|
| 425 |
+
"""A bounded semaphore implementation.
|
| 426 |
+
|
| 427 |
+
This raises ValueError in release() if it would increase the value
|
| 428 |
+
above the initial value.
|
| 429 |
+
"""
|
| 430 |
+
|
| 431 |
+
def __init__(self, value=1, *, loop=mixins._marker):
|
| 432 |
+
self._bound_value = value
|
| 433 |
+
super().__init__(value, loop=loop)
|
| 434 |
+
|
| 435 |
+
def release(self):
|
| 436 |
+
if self._value >= self._bound_value:
|
| 437 |
+
raise ValueError('BoundedSemaphore released too many times')
|
| 438 |
+
super().release()
|
omnilmm/lib/python3.10/asyncio/log.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Logging configuration."""
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
# Name the logger after the package.
|
| 7 |
+
logger = logging.getLogger(__package__)
|
omnilmm/lib/python3.10/asyncio/mixins.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Event loop mixins."""
|
| 2 |
+
|
| 3 |
+
import threading
|
| 4 |
+
from . import events
|
| 5 |
+
|
| 6 |
+
_global_lock = threading.Lock()
|
| 7 |
+
|
| 8 |
+
# Used as a sentinel for loop parameter
|
| 9 |
+
_marker = object()
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class _LoopBoundMixin:
|
| 13 |
+
_loop = None
|
| 14 |
+
|
| 15 |
+
def __init__(self, *, loop=_marker):
|
| 16 |
+
if loop is not _marker:
|
| 17 |
+
raise TypeError(
|
| 18 |
+
f'As of 3.10, the *loop* parameter was removed from '
|
| 19 |
+
f'{type(self).__name__}() since it is no longer necessary'
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
def _get_loop(self):
|
| 23 |
+
loop = events._get_running_loop()
|
| 24 |
+
|
| 25 |
+
if self._loop is None:
|
| 26 |
+
with _global_lock:
|
| 27 |
+
if self._loop is None:
|
| 28 |
+
self._loop = loop
|
| 29 |
+
if loop is not self._loop:
|
| 30 |
+
raise RuntimeError(f'{self!r} is bound to a different event loop')
|
| 31 |
+
return loop
|
omnilmm/lib/python3.10/asyncio/protocols.py
ADDED
|
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Abstract Protocol base classes."""
|
| 2 |
+
|
| 3 |
+
__all__ = (
|
| 4 |
+
'BaseProtocol', 'Protocol', 'DatagramProtocol',
|
| 5 |
+
'SubprocessProtocol', 'BufferedProtocol',
|
| 6 |
+
)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class BaseProtocol:
|
| 10 |
+
"""Common base class for protocol interfaces.
|
| 11 |
+
|
| 12 |
+
Usually user implements protocols that derived from BaseProtocol
|
| 13 |
+
like Protocol or ProcessProtocol.
|
| 14 |
+
|
| 15 |
+
The only case when BaseProtocol should be implemented directly is
|
| 16 |
+
write-only transport like write pipe
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
__slots__ = ()
|
| 20 |
+
|
| 21 |
+
def connection_made(self, transport):
|
| 22 |
+
"""Called when a connection is made.
|
| 23 |
+
|
| 24 |
+
The argument is the transport representing the pipe connection.
|
| 25 |
+
To receive data, wait for data_received() calls.
|
| 26 |
+
When the connection is closed, connection_lost() is called.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def connection_lost(self, exc):
|
| 30 |
+
"""Called when the connection is lost or closed.
|
| 31 |
+
|
| 32 |
+
The argument is an exception object or None (the latter
|
| 33 |
+
meaning a regular EOF is received or the connection was
|
| 34 |
+
aborted or closed).
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def pause_writing(self):
|
| 38 |
+
"""Called when the transport's buffer goes over the high-water mark.
|
| 39 |
+
|
| 40 |
+
Pause and resume calls are paired -- pause_writing() is called
|
| 41 |
+
once when the buffer goes strictly over the high-water mark
|
| 42 |
+
(even if subsequent writes increases the buffer size even
|
| 43 |
+
more), and eventually resume_writing() is called once when the
|
| 44 |
+
buffer size reaches the low-water mark.
|
| 45 |
+
|
| 46 |
+
Note that if the buffer size equals the high-water mark,
|
| 47 |
+
pause_writing() is not called -- it must go strictly over.
|
| 48 |
+
Conversely, resume_writing() is called when the buffer size is
|
| 49 |
+
equal or lower than the low-water mark. These end conditions
|
| 50 |
+
are important to ensure that things go as expected when either
|
| 51 |
+
mark is zero.
|
| 52 |
+
|
| 53 |
+
NOTE: This is the only Protocol callback that is not called
|
| 54 |
+
through EventLoop.call_soon() -- if it were, it would have no
|
| 55 |
+
effect when it's most needed (when the app keeps writing
|
| 56 |
+
without yielding until pause_writing() is called).
|
| 57 |
+
"""
|
| 58 |
+
|
| 59 |
+
def resume_writing(self):
|
| 60 |
+
"""Called when the transport's buffer drains below the low-water mark.
|
| 61 |
+
|
| 62 |
+
See pause_writing() for details.
|
| 63 |
+
"""
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class Protocol(BaseProtocol):
|
| 67 |
+
"""Interface for stream protocol.
|
| 68 |
+
|
| 69 |
+
The user should implement this interface. They can inherit from
|
| 70 |
+
this class but don't need to. The implementations here do
|
| 71 |
+
nothing (they don't raise exceptions).
|
| 72 |
+
|
| 73 |
+
When the user wants to requests a transport, they pass a protocol
|
| 74 |
+
factory to a utility function (e.g., EventLoop.create_connection()).
|
| 75 |
+
|
| 76 |
+
When the connection is made successfully, connection_made() is
|
| 77 |
+
called with a suitable transport object. Then data_received()
|
| 78 |
+
will be called 0 or more times with data (bytes) received from the
|
| 79 |
+
transport; finally, connection_lost() will be called exactly once
|
| 80 |
+
with either an exception object or None as an argument.
|
| 81 |
+
|
| 82 |
+
State machine of calls:
|
| 83 |
+
|
| 84 |
+
start -> CM [-> DR*] [-> ER?] -> CL -> end
|
| 85 |
+
|
| 86 |
+
* CM: connection_made()
|
| 87 |
+
* DR: data_received()
|
| 88 |
+
* ER: eof_received()
|
| 89 |
+
* CL: connection_lost()
|
| 90 |
+
"""
|
| 91 |
+
|
| 92 |
+
__slots__ = ()
|
| 93 |
+
|
| 94 |
+
def data_received(self, data):
|
| 95 |
+
"""Called when some data is received.
|
| 96 |
+
|
| 97 |
+
The argument is a bytes object.
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
def eof_received(self):
|
| 101 |
+
"""Called when the other end calls write_eof() or equivalent.
|
| 102 |
+
|
| 103 |
+
If this returns a false value (including None), the transport
|
| 104 |
+
will close itself. If it returns a true value, closing the
|
| 105 |
+
transport is up to the protocol.
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
class BufferedProtocol(BaseProtocol):
|
| 110 |
+
"""Interface for stream protocol with manual buffer control.
|
| 111 |
+
|
| 112 |
+
Event methods, such as `create_server` and `create_connection`,
|
| 113 |
+
accept factories that return protocols that implement this interface.
|
| 114 |
+
|
| 115 |
+
The idea of BufferedProtocol is that it allows to manually allocate
|
| 116 |
+
and control the receive buffer. Event loops can then use the buffer
|
| 117 |
+
provided by the protocol to avoid unnecessary data copies. This
|
| 118 |
+
can result in noticeable performance improvement for protocols that
|
| 119 |
+
receive big amounts of data. Sophisticated protocols can allocate
|
| 120 |
+
the buffer only once at creation time.
|
| 121 |
+
|
| 122 |
+
State machine of calls:
|
| 123 |
+
|
| 124 |
+
start -> CM [-> GB [-> BU?]]* [-> ER?] -> CL -> end
|
| 125 |
+
|
| 126 |
+
* CM: connection_made()
|
| 127 |
+
* GB: get_buffer()
|
| 128 |
+
* BU: buffer_updated()
|
| 129 |
+
* ER: eof_received()
|
| 130 |
+
* CL: connection_lost()
|
| 131 |
+
"""
|
| 132 |
+
|
| 133 |
+
__slots__ = ()
|
| 134 |
+
|
| 135 |
+
def get_buffer(self, sizehint):
|
| 136 |
+
"""Called to allocate a new receive buffer.
|
| 137 |
+
|
| 138 |
+
*sizehint* is a recommended minimal size for the returned
|
| 139 |
+
buffer. When set to -1, the buffer size can be arbitrary.
|
| 140 |
+
|
| 141 |
+
Must return an object that implements the
|
| 142 |
+
:ref:`buffer protocol <bufferobjects>`.
|
| 143 |
+
It is an error to return a zero-sized buffer.
|
| 144 |
+
"""
|
| 145 |
+
|
| 146 |
+
def buffer_updated(self, nbytes):
|
| 147 |
+
"""Called when the buffer was updated with the received data.
|
| 148 |
+
|
| 149 |
+
*nbytes* is the total number of bytes that were written to
|
| 150 |
+
the buffer.
|
| 151 |
+
"""
|
| 152 |
+
|
| 153 |
+
def eof_received(self):
|
| 154 |
+
"""Called when the other end calls write_eof() or equivalent.
|
| 155 |
+
|
| 156 |
+
If this returns a false value (including None), the transport
|
| 157 |
+
will close itself. If it returns a true value, closing the
|
| 158 |
+
transport is up to the protocol.
|
| 159 |
+
"""
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class DatagramProtocol(BaseProtocol):
|
| 163 |
+
"""Interface for datagram protocol."""
|
| 164 |
+
|
| 165 |
+
__slots__ = ()
|
| 166 |
+
|
| 167 |
+
def datagram_received(self, data, addr):
|
| 168 |
+
"""Called when some datagram is received."""
|
| 169 |
+
|
| 170 |
+
def error_received(self, exc):
|
| 171 |
+
"""Called when a send or receive operation raises an OSError.
|
| 172 |
+
|
| 173 |
+
(Other than BlockingIOError or InterruptedError.)
|
| 174 |
+
"""
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
class SubprocessProtocol(BaseProtocol):
|
| 178 |
+
"""Interface for protocol for subprocess calls."""
|
| 179 |
+
|
| 180 |
+
__slots__ = ()
|
| 181 |
+
|
| 182 |
+
def pipe_data_received(self, fd, data):
|
| 183 |
+
"""Called when the subprocess writes data into stdout/stderr pipe.
|
| 184 |
+
|
| 185 |
+
fd is int file descriptor.
|
| 186 |
+
data is bytes object.
|
| 187 |
+
"""
|
| 188 |
+
|
| 189 |
+
def pipe_connection_lost(self, fd, exc):
|
| 190 |
+
"""Called when a file descriptor associated with the child process is
|
| 191 |
+
closed.
|
| 192 |
+
|
| 193 |
+
fd is the int file descriptor that was closed.
|
| 194 |
+
"""
|
| 195 |
+
|
| 196 |
+
def process_exited(self):
|
| 197 |
+
"""Called when subprocess has exited."""
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def _feed_data_to_buffered_proto(proto, data):
|
| 201 |
+
data_len = len(data)
|
| 202 |
+
while data_len:
|
| 203 |
+
buf = proto.get_buffer(data_len)
|
| 204 |
+
buf_len = len(buf)
|
| 205 |
+
if not buf_len:
|
| 206 |
+
raise RuntimeError('get_buffer() returned an empty buffer')
|
| 207 |
+
|
| 208 |
+
if buf_len >= data_len:
|
| 209 |
+
buf[:data_len] = data
|
| 210 |
+
proto.buffer_updated(data_len)
|
| 211 |
+
return
|
| 212 |
+
else:
|
| 213 |
+
buf[:buf_len] = data[:buf_len]
|
| 214 |
+
proto.buffer_updated(buf_len)
|
| 215 |
+
data = data[buf_len:]
|
| 216 |
+
data_len = len(data)
|
omnilmm/lib/python3.10/asyncio/runners.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__ = 'run',
|
| 2 |
+
|
| 3 |
+
from . import coroutines
|
| 4 |
+
from . import events
|
| 5 |
+
from . import tasks
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def run(main, *, debug=None):
|
| 9 |
+
"""Execute the coroutine and return the result.
|
| 10 |
+
|
| 11 |
+
This function runs the passed coroutine, taking care of
|
| 12 |
+
managing the asyncio event loop and finalizing asynchronous
|
| 13 |
+
generators.
|
| 14 |
+
|
| 15 |
+
This function cannot be called when another asyncio event loop is
|
| 16 |
+
running in the same thread.
|
| 17 |
+
|
| 18 |
+
If debug is True, the event loop will be run in debug mode.
|
| 19 |
+
|
| 20 |
+
This function always creates a new event loop and closes it at the end.
|
| 21 |
+
It should be used as a main entry point for asyncio programs, and should
|
| 22 |
+
ideally only be called once.
|
| 23 |
+
|
| 24 |
+
Example:
|
| 25 |
+
|
| 26 |
+
async def main():
|
| 27 |
+
await asyncio.sleep(1)
|
| 28 |
+
print('hello')
|
| 29 |
+
|
| 30 |
+
asyncio.run(main())
|
| 31 |
+
"""
|
| 32 |
+
if events._get_running_loop() is not None:
|
| 33 |
+
raise RuntimeError(
|
| 34 |
+
"asyncio.run() cannot be called from a running event loop")
|
| 35 |
+
|
| 36 |
+
if not coroutines.iscoroutine(main):
|
| 37 |
+
raise ValueError("a coroutine was expected, got {!r}".format(main))
|
| 38 |
+
|
| 39 |
+
loop = events.new_event_loop()
|
| 40 |
+
try:
|
| 41 |
+
events.set_event_loop(loop)
|
| 42 |
+
if debug is not None:
|
| 43 |
+
loop.set_debug(debug)
|
| 44 |
+
return loop.run_until_complete(main)
|
| 45 |
+
finally:
|
| 46 |
+
try:
|
| 47 |
+
_cancel_all_tasks(loop)
|
| 48 |
+
loop.run_until_complete(loop.shutdown_asyncgens())
|
| 49 |
+
loop.run_until_complete(loop.shutdown_default_executor())
|
| 50 |
+
finally:
|
| 51 |
+
events.set_event_loop(None)
|
| 52 |
+
loop.close()
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def _cancel_all_tasks(loop):
|
| 56 |
+
to_cancel = tasks.all_tasks(loop)
|
| 57 |
+
if not to_cancel:
|
| 58 |
+
return
|
| 59 |
+
|
| 60 |
+
for task in to_cancel:
|
| 61 |
+
task.cancel()
|
| 62 |
+
|
| 63 |
+
loop.run_until_complete(tasks.gather(*to_cancel, return_exceptions=True))
|
| 64 |
+
|
| 65 |
+
for task in to_cancel:
|
| 66 |
+
if task.cancelled():
|
| 67 |
+
continue
|
| 68 |
+
if task.exception() is not None:
|
| 69 |
+
loop.call_exception_handler({
|
| 70 |
+
'message': 'unhandled exception during asyncio.run() shutdown',
|
| 71 |
+
'exception': task.exception(),
|
| 72 |
+
'task': task,
|
| 73 |
+
})
|
omnilmm/lib/python3.10/asyncio/selector_events.py
ADDED
|
@@ -0,0 +1,1105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Event loop using a selector and related classes.
|
| 2 |
+
|
| 3 |
+
A selector is a "notify-when-ready" multiplexer. For a subclass which
|
| 4 |
+
also includes support for signal handling, see the unix_events sub-module.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
__all__ = 'BaseSelectorEventLoop',
|
| 8 |
+
|
| 9 |
+
import collections
|
| 10 |
+
import errno
|
| 11 |
+
import functools
|
| 12 |
+
import selectors
|
| 13 |
+
import socket
|
| 14 |
+
import warnings
|
| 15 |
+
import weakref
|
| 16 |
+
try:
|
| 17 |
+
import ssl
|
| 18 |
+
except ImportError: # pragma: no cover
|
| 19 |
+
ssl = None
|
| 20 |
+
|
| 21 |
+
from . import base_events
|
| 22 |
+
from . import constants
|
| 23 |
+
from . import events
|
| 24 |
+
from . import futures
|
| 25 |
+
from . import protocols
|
| 26 |
+
from . import sslproto
|
| 27 |
+
from . import transports
|
| 28 |
+
from . import trsock
|
| 29 |
+
from .log import logger
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _test_selector_event(selector, fd, event):
|
| 33 |
+
# Test if the selector is monitoring 'event' events
|
| 34 |
+
# for the file descriptor 'fd'.
|
| 35 |
+
try:
|
| 36 |
+
key = selector.get_key(fd)
|
| 37 |
+
except KeyError:
|
| 38 |
+
return False
|
| 39 |
+
else:
|
| 40 |
+
return bool(key.events & event)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class BaseSelectorEventLoop(base_events.BaseEventLoop):
|
| 44 |
+
"""Selector event loop.
|
| 45 |
+
|
| 46 |
+
See events.EventLoop for API specification.
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
def __init__(self, selector=None):
|
| 50 |
+
super().__init__()
|
| 51 |
+
|
| 52 |
+
if selector is None:
|
| 53 |
+
selector = selectors.DefaultSelector()
|
| 54 |
+
logger.debug('Using selector: %s', selector.__class__.__name__)
|
| 55 |
+
self._selector = selector
|
| 56 |
+
self._make_self_pipe()
|
| 57 |
+
self._transports = weakref.WeakValueDictionary()
|
| 58 |
+
|
| 59 |
+
def _make_socket_transport(self, sock, protocol, waiter=None, *,
|
| 60 |
+
extra=None, server=None):
|
| 61 |
+
return _SelectorSocketTransport(self, sock, protocol, waiter,
|
| 62 |
+
extra, server)
|
| 63 |
+
|
| 64 |
+
def _make_ssl_transport(
|
| 65 |
+
self, rawsock, protocol, sslcontext, waiter=None,
|
| 66 |
+
*, server_side=False, server_hostname=None,
|
| 67 |
+
extra=None, server=None,
|
| 68 |
+
ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT):
|
| 69 |
+
ssl_protocol = sslproto.SSLProtocol(
|
| 70 |
+
self, protocol, sslcontext, waiter,
|
| 71 |
+
server_side, server_hostname,
|
| 72 |
+
ssl_handshake_timeout=ssl_handshake_timeout)
|
| 73 |
+
_SelectorSocketTransport(self, rawsock, ssl_protocol,
|
| 74 |
+
extra=extra, server=server)
|
| 75 |
+
return ssl_protocol._app_transport
|
| 76 |
+
|
| 77 |
+
def _make_datagram_transport(self, sock, protocol,
|
| 78 |
+
address=None, waiter=None, extra=None):
|
| 79 |
+
return _SelectorDatagramTransport(self, sock, protocol,
|
| 80 |
+
address, waiter, extra)
|
| 81 |
+
|
| 82 |
+
def close(self):
|
| 83 |
+
if self.is_running():
|
| 84 |
+
raise RuntimeError("Cannot close a running event loop")
|
| 85 |
+
if self.is_closed():
|
| 86 |
+
return
|
| 87 |
+
self._close_self_pipe()
|
| 88 |
+
super().close()
|
| 89 |
+
if self._selector is not None:
|
| 90 |
+
self._selector.close()
|
| 91 |
+
self._selector = None
|
| 92 |
+
|
| 93 |
+
def _close_self_pipe(self):
|
| 94 |
+
self._remove_reader(self._ssock.fileno())
|
| 95 |
+
self._ssock.close()
|
| 96 |
+
self._ssock = None
|
| 97 |
+
self._csock.close()
|
| 98 |
+
self._csock = None
|
| 99 |
+
self._internal_fds -= 1
|
| 100 |
+
|
| 101 |
+
def _make_self_pipe(self):
|
| 102 |
+
# A self-socket, really. :-)
|
| 103 |
+
self._ssock, self._csock = socket.socketpair()
|
| 104 |
+
self._ssock.setblocking(False)
|
| 105 |
+
self._csock.setblocking(False)
|
| 106 |
+
self._internal_fds += 1
|
| 107 |
+
self._add_reader(self._ssock.fileno(), self._read_from_self)
|
| 108 |
+
|
| 109 |
+
def _process_self_data(self, data):
|
| 110 |
+
pass
|
| 111 |
+
|
| 112 |
+
def _read_from_self(self):
|
| 113 |
+
while True:
|
| 114 |
+
try:
|
| 115 |
+
data = self._ssock.recv(4096)
|
| 116 |
+
if not data:
|
| 117 |
+
break
|
| 118 |
+
self._process_self_data(data)
|
| 119 |
+
except InterruptedError:
|
| 120 |
+
continue
|
| 121 |
+
except BlockingIOError:
|
| 122 |
+
break
|
| 123 |
+
|
| 124 |
+
def _write_to_self(self):
|
| 125 |
+
# This may be called from a different thread, possibly after
|
| 126 |
+
# _close_self_pipe() has been called or even while it is
|
| 127 |
+
# running. Guard for self._csock being None or closed. When
|
| 128 |
+
# a socket is closed, send() raises OSError (with errno set to
|
| 129 |
+
# EBADF, but let's not rely on the exact error code).
|
| 130 |
+
csock = self._csock
|
| 131 |
+
if csock is None:
|
| 132 |
+
return
|
| 133 |
+
|
| 134 |
+
try:
|
| 135 |
+
csock.send(b'\0')
|
| 136 |
+
except OSError:
|
| 137 |
+
if self._debug:
|
| 138 |
+
logger.debug("Fail to write a null byte into the "
|
| 139 |
+
"self-pipe socket",
|
| 140 |
+
exc_info=True)
|
| 141 |
+
|
| 142 |
+
def _start_serving(self, protocol_factory, sock,
|
| 143 |
+
sslcontext=None, server=None, backlog=100,
|
| 144 |
+
ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT):
|
| 145 |
+
self._add_reader(sock.fileno(), self._accept_connection,
|
| 146 |
+
protocol_factory, sock, sslcontext, server, backlog,
|
| 147 |
+
ssl_handshake_timeout)
|
| 148 |
+
|
| 149 |
+
def _accept_connection(
|
| 150 |
+
self, protocol_factory, sock,
|
| 151 |
+
sslcontext=None, server=None, backlog=100,
|
| 152 |
+
ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT):
|
| 153 |
+
# This method is only called once for each event loop tick where the
|
| 154 |
+
# listening socket has triggered an EVENT_READ. There may be multiple
|
| 155 |
+
# connections waiting for an .accept() so it is called in a loop.
|
| 156 |
+
# See https://bugs.python.org/issue27906 for more details.
|
| 157 |
+
for _ in range(backlog):
|
| 158 |
+
try:
|
| 159 |
+
conn, addr = sock.accept()
|
| 160 |
+
if self._debug:
|
| 161 |
+
logger.debug("%r got a new connection from %r: %r",
|
| 162 |
+
server, addr, conn)
|
| 163 |
+
conn.setblocking(False)
|
| 164 |
+
except (BlockingIOError, InterruptedError, ConnectionAbortedError):
|
| 165 |
+
# Early exit because the socket accept buffer is empty.
|
| 166 |
+
return None
|
| 167 |
+
except OSError as exc:
|
| 168 |
+
# There's nowhere to send the error, so just log it.
|
| 169 |
+
if exc.errno in (errno.EMFILE, errno.ENFILE,
|
| 170 |
+
errno.ENOBUFS, errno.ENOMEM):
|
| 171 |
+
# Some platforms (e.g. Linux keep reporting the FD as
|
| 172 |
+
# ready, so we remove the read handler temporarily.
|
| 173 |
+
# We'll try again in a while.
|
| 174 |
+
self.call_exception_handler({
|
| 175 |
+
'message': 'socket.accept() out of system resource',
|
| 176 |
+
'exception': exc,
|
| 177 |
+
'socket': trsock.TransportSocket(sock),
|
| 178 |
+
})
|
| 179 |
+
self._remove_reader(sock.fileno())
|
| 180 |
+
self.call_later(constants.ACCEPT_RETRY_DELAY,
|
| 181 |
+
self._start_serving,
|
| 182 |
+
protocol_factory, sock, sslcontext, server,
|
| 183 |
+
backlog, ssl_handshake_timeout)
|
| 184 |
+
else:
|
| 185 |
+
raise # The event loop will catch, log and ignore it.
|
| 186 |
+
else:
|
| 187 |
+
extra = {'peername': addr}
|
| 188 |
+
accept = self._accept_connection2(
|
| 189 |
+
protocol_factory, conn, extra, sslcontext, server,
|
| 190 |
+
ssl_handshake_timeout)
|
| 191 |
+
self.create_task(accept)
|
| 192 |
+
|
| 193 |
+
async def _accept_connection2(
|
| 194 |
+
self, protocol_factory, conn, extra,
|
| 195 |
+
sslcontext=None, server=None,
|
| 196 |
+
ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT):
|
| 197 |
+
protocol = None
|
| 198 |
+
transport = None
|
| 199 |
+
try:
|
| 200 |
+
protocol = protocol_factory()
|
| 201 |
+
waiter = self.create_future()
|
| 202 |
+
if sslcontext:
|
| 203 |
+
transport = self._make_ssl_transport(
|
| 204 |
+
conn, protocol, sslcontext, waiter=waiter,
|
| 205 |
+
server_side=True, extra=extra, server=server,
|
| 206 |
+
ssl_handshake_timeout=ssl_handshake_timeout)
|
| 207 |
+
else:
|
| 208 |
+
transport = self._make_socket_transport(
|
| 209 |
+
conn, protocol, waiter=waiter, extra=extra,
|
| 210 |
+
server=server)
|
| 211 |
+
|
| 212 |
+
try:
|
| 213 |
+
await waiter
|
| 214 |
+
except BaseException:
|
| 215 |
+
transport.close()
|
| 216 |
+
raise
|
| 217 |
+
# It's now up to the protocol to handle the connection.
|
| 218 |
+
|
| 219 |
+
except (SystemExit, KeyboardInterrupt):
|
| 220 |
+
raise
|
| 221 |
+
except BaseException as exc:
|
| 222 |
+
if self._debug:
|
| 223 |
+
context = {
|
| 224 |
+
'message':
|
| 225 |
+
'Error on transport creation for incoming connection',
|
| 226 |
+
'exception': exc,
|
| 227 |
+
}
|
| 228 |
+
if protocol is not None:
|
| 229 |
+
context['protocol'] = protocol
|
| 230 |
+
if transport is not None:
|
| 231 |
+
context['transport'] = transport
|
| 232 |
+
self.call_exception_handler(context)
|
| 233 |
+
|
| 234 |
+
def _ensure_fd_no_transport(self, fd):
|
| 235 |
+
fileno = fd
|
| 236 |
+
if not isinstance(fileno, int):
|
| 237 |
+
try:
|
| 238 |
+
fileno = int(fileno.fileno())
|
| 239 |
+
except (AttributeError, TypeError, ValueError):
|
| 240 |
+
# This code matches selectors._fileobj_to_fd function.
|
| 241 |
+
raise ValueError(f"Invalid file object: {fd!r}") from None
|
| 242 |
+
try:
|
| 243 |
+
transport = self._transports[fileno]
|
| 244 |
+
except KeyError:
|
| 245 |
+
pass
|
| 246 |
+
else:
|
| 247 |
+
if not transport.is_closing():
|
| 248 |
+
raise RuntimeError(
|
| 249 |
+
f'File descriptor {fd!r} is used by transport '
|
| 250 |
+
f'{transport!r}')
|
| 251 |
+
|
| 252 |
+
def _add_reader(self, fd, callback, *args):
|
| 253 |
+
self._check_closed()
|
| 254 |
+
handle = events.Handle(callback, args, self, None)
|
| 255 |
+
try:
|
| 256 |
+
key = self._selector.get_key(fd)
|
| 257 |
+
except KeyError:
|
| 258 |
+
self._selector.register(fd, selectors.EVENT_READ,
|
| 259 |
+
(handle, None))
|
| 260 |
+
else:
|
| 261 |
+
mask, (reader, writer) = key.events, key.data
|
| 262 |
+
self._selector.modify(fd, mask | selectors.EVENT_READ,
|
| 263 |
+
(handle, writer))
|
| 264 |
+
if reader is not None:
|
| 265 |
+
reader.cancel()
|
| 266 |
+
return handle
|
| 267 |
+
|
| 268 |
+
def _remove_reader(self, fd):
|
| 269 |
+
if self.is_closed():
|
| 270 |
+
return False
|
| 271 |
+
try:
|
| 272 |
+
key = self._selector.get_key(fd)
|
| 273 |
+
except KeyError:
|
| 274 |
+
return False
|
| 275 |
+
else:
|
| 276 |
+
mask, (reader, writer) = key.events, key.data
|
| 277 |
+
mask &= ~selectors.EVENT_READ
|
| 278 |
+
if not mask:
|
| 279 |
+
self._selector.unregister(fd)
|
| 280 |
+
else:
|
| 281 |
+
self._selector.modify(fd, mask, (None, writer))
|
| 282 |
+
|
| 283 |
+
if reader is not None:
|
| 284 |
+
reader.cancel()
|
| 285 |
+
return True
|
| 286 |
+
else:
|
| 287 |
+
return False
|
| 288 |
+
|
| 289 |
+
def _add_writer(self, fd, callback, *args):
|
| 290 |
+
self._check_closed()
|
| 291 |
+
handle = events.Handle(callback, args, self, None)
|
| 292 |
+
try:
|
| 293 |
+
key = self._selector.get_key(fd)
|
| 294 |
+
except KeyError:
|
| 295 |
+
self._selector.register(fd, selectors.EVENT_WRITE,
|
| 296 |
+
(None, handle))
|
| 297 |
+
else:
|
| 298 |
+
mask, (reader, writer) = key.events, key.data
|
| 299 |
+
self._selector.modify(fd, mask | selectors.EVENT_WRITE,
|
| 300 |
+
(reader, handle))
|
| 301 |
+
if writer is not None:
|
| 302 |
+
writer.cancel()
|
| 303 |
+
return handle
|
| 304 |
+
|
| 305 |
+
def _remove_writer(self, fd):
|
| 306 |
+
"""Remove a writer callback."""
|
| 307 |
+
if self.is_closed():
|
| 308 |
+
return False
|
| 309 |
+
try:
|
| 310 |
+
key = self._selector.get_key(fd)
|
| 311 |
+
except KeyError:
|
| 312 |
+
return False
|
| 313 |
+
else:
|
| 314 |
+
mask, (reader, writer) = key.events, key.data
|
| 315 |
+
# Remove both writer and connector.
|
| 316 |
+
mask &= ~selectors.EVENT_WRITE
|
| 317 |
+
if not mask:
|
| 318 |
+
self._selector.unregister(fd)
|
| 319 |
+
else:
|
| 320 |
+
self._selector.modify(fd, mask, (reader, None))
|
| 321 |
+
|
| 322 |
+
if writer is not None:
|
| 323 |
+
writer.cancel()
|
| 324 |
+
return True
|
| 325 |
+
else:
|
| 326 |
+
return False
|
| 327 |
+
|
| 328 |
+
def add_reader(self, fd, callback, *args):
|
| 329 |
+
"""Add a reader callback."""
|
| 330 |
+
self._ensure_fd_no_transport(fd)
|
| 331 |
+
self._add_reader(fd, callback, *args)
|
| 332 |
+
|
| 333 |
+
def remove_reader(self, fd):
|
| 334 |
+
"""Remove a reader callback."""
|
| 335 |
+
self._ensure_fd_no_transport(fd)
|
| 336 |
+
return self._remove_reader(fd)
|
| 337 |
+
|
| 338 |
+
def add_writer(self, fd, callback, *args):
|
| 339 |
+
"""Add a writer callback.."""
|
| 340 |
+
self._ensure_fd_no_transport(fd)
|
| 341 |
+
self._add_writer(fd, callback, *args)
|
| 342 |
+
|
| 343 |
+
def remove_writer(self, fd):
|
| 344 |
+
"""Remove a writer callback."""
|
| 345 |
+
self._ensure_fd_no_transport(fd)
|
| 346 |
+
return self._remove_writer(fd)
|
| 347 |
+
|
| 348 |
+
async def sock_recv(self, sock, n):
|
| 349 |
+
"""Receive data from the socket.
|
| 350 |
+
|
| 351 |
+
The return value is a bytes object representing the data received.
|
| 352 |
+
The maximum amount of data to be received at once is specified by
|
| 353 |
+
nbytes.
|
| 354 |
+
"""
|
| 355 |
+
base_events._check_ssl_socket(sock)
|
| 356 |
+
if self._debug and sock.gettimeout() != 0:
|
| 357 |
+
raise ValueError("the socket must be non-blocking")
|
| 358 |
+
try:
|
| 359 |
+
return sock.recv(n)
|
| 360 |
+
except (BlockingIOError, InterruptedError):
|
| 361 |
+
pass
|
| 362 |
+
fut = self.create_future()
|
| 363 |
+
fd = sock.fileno()
|
| 364 |
+
self._ensure_fd_no_transport(fd)
|
| 365 |
+
handle = self._add_reader(fd, self._sock_recv, fut, sock, n)
|
| 366 |
+
fut.add_done_callback(
|
| 367 |
+
functools.partial(self._sock_read_done, fd, handle=handle))
|
| 368 |
+
return await fut
|
| 369 |
+
|
| 370 |
+
def _sock_read_done(self, fd, fut, handle=None):
|
| 371 |
+
if handle is None or not handle.cancelled():
|
| 372 |
+
self.remove_reader(fd)
|
| 373 |
+
|
| 374 |
+
def _sock_recv(self, fut, sock, n):
|
| 375 |
+
# _sock_recv() can add itself as an I/O callback if the operation can't
|
| 376 |
+
# be done immediately. Don't use it directly, call sock_recv().
|
| 377 |
+
if fut.done():
|
| 378 |
+
return
|
| 379 |
+
try:
|
| 380 |
+
data = sock.recv(n)
|
| 381 |
+
except (BlockingIOError, InterruptedError):
|
| 382 |
+
return # try again next time
|
| 383 |
+
except (SystemExit, KeyboardInterrupt):
|
| 384 |
+
raise
|
| 385 |
+
except BaseException as exc:
|
| 386 |
+
fut.set_exception(exc)
|
| 387 |
+
else:
|
| 388 |
+
fut.set_result(data)
|
| 389 |
+
|
| 390 |
+
async def sock_recv_into(self, sock, buf):
|
| 391 |
+
"""Receive data from the socket.
|
| 392 |
+
|
| 393 |
+
The received data is written into *buf* (a writable buffer).
|
| 394 |
+
The return value is the number of bytes written.
|
| 395 |
+
"""
|
| 396 |
+
base_events._check_ssl_socket(sock)
|
| 397 |
+
if self._debug and sock.gettimeout() != 0:
|
| 398 |
+
raise ValueError("the socket must be non-blocking")
|
| 399 |
+
try:
|
| 400 |
+
return sock.recv_into(buf)
|
| 401 |
+
except (BlockingIOError, InterruptedError):
|
| 402 |
+
pass
|
| 403 |
+
fut = self.create_future()
|
| 404 |
+
fd = sock.fileno()
|
| 405 |
+
self._ensure_fd_no_transport(fd)
|
| 406 |
+
handle = self._add_reader(fd, self._sock_recv_into, fut, sock, buf)
|
| 407 |
+
fut.add_done_callback(
|
| 408 |
+
functools.partial(self._sock_read_done, fd, handle=handle))
|
| 409 |
+
return await fut
|
| 410 |
+
|
| 411 |
+
def _sock_recv_into(self, fut, sock, buf):
|
| 412 |
+
# _sock_recv_into() can add itself as an I/O callback if the operation
|
| 413 |
+
# can't be done immediately. Don't use it directly, call
|
| 414 |
+
# sock_recv_into().
|
| 415 |
+
if fut.done():
|
| 416 |
+
return
|
| 417 |
+
try:
|
| 418 |
+
nbytes = sock.recv_into(buf)
|
| 419 |
+
except (BlockingIOError, InterruptedError):
|
| 420 |
+
return # try again next time
|
| 421 |
+
except (SystemExit, KeyboardInterrupt):
|
| 422 |
+
raise
|
| 423 |
+
except BaseException as exc:
|
| 424 |
+
fut.set_exception(exc)
|
| 425 |
+
else:
|
| 426 |
+
fut.set_result(nbytes)
|
| 427 |
+
|
| 428 |
+
async def sock_sendall(self, sock, data):
|
| 429 |
+
"""Send data to the socket.
|
| 430 |
+
|
| 431 |
+
The socket must be connected to a remote socket. This method continues
|
| 432 |
+
to send data from data until either all data has been sent or an
|
| 433 |
+
error occurs. None is returned on success. On error, an exception is
|
| 434 |
+
raised, and there is no way to determine how much data, if any, was
|
| 435 |
+
successfully processed by the receiving end of the connection.
|
| 436 |
+
"""
|
| 437 |
+
base_events._check_ssl_socket(sock)
|
| 438 |
+
if self._debug and sock.gettimeout() != 0:
|
| 439 |
+
raise ValueError("the socket must be non-blocking")
|
| 440 |
+
try:
|
| 441 |
+
n = sock.send(data)
|
| 442 |
+
except (BlockingIOError, InterruptedError):
|
| 443 |
+
n = 0
|
| 444 |
+
|
| 445 |
+
if n == len(data):
|
| 446 |
+
# all data sent
|
| 447 |
+
return
|
| 448 |
+
|
| 449 |
+
fut = self.create_future()
|
| 450 |
+
fd = sock.fileno()
|
| 451 |
+
self._ensure_fd_no_transport(fd)
|
| 452 |
+
# use a trick with a list in closure to store a mutable state
|
| 453 |
+
handle = self._add_writer(fd, self._sock_sendall, fut, sock,
|
| 454 |
+
memoryview(data), [n])
|
| 455 |
+
fut.add_done_callback(
|
| 456 |
+
functools.partial(self._sock_write_done, fd, handle=handle))
|
| 457 |
+
return await fut
|
| 458 |
+
|
| 459 |
+
def _sock_sendall(self, fut, sock, view, pos):
|
| 460 |
+
if fut.done():
|
| 461 |
+
# Future cancellation can be scheduled on previous loop iteration
|
| 462 |
+
return
|
| 463 |
+
start = pos[0]
|
| 464 |
+
try:
|
| 465 |
+
n = sock.send(view[start:])
|
| 466 |
+
except (BlockingIOError, InterruptedError):
|
| 467 |
+
return
|
| 468 |
+
except (SystemExit, KeyboardInterrupt):
|
| 469 |
+
raise
|
| 470 |
+
except BaseException as exc:
|
| 471 |
+
fut.set_exception(exc)
|
| 472 |
+
return
|
| 473 |
+
|
| 474 |
+
start += n
|
| 475 |
+
|
| 476 |
+
if start == len(view):
|
| 477 |
+
fut.set_result(None)
|
| 478 |
+
else:
|
| 479 |
+
pos[0] = start
|
| 480 |
+
|
| 481 |
+
async def sock_connect(self, sock, address):
|
| 482 |
+
"""Connect to a remote socket at address.
|
| 483 |
+
|
| 484 |
+
This method is a coroutine.
|
| 485 |
+
"""
|
| 486 |
+
base_events._check_ssl_socket(sock)
|
| 487 |
+
if self._debug and sock.gettimeout() != 0:
|
| 488 |
+
raise ValueError("the socket must be non-blocking")
|
| 489 |
+
|
| 490 |
+
if sock.family == socket.AF_INET or (
|
| 491 |
+
base_events._HAS_IPv6 and sock.family == socket.AF_INET6):
|
| 492 |
+
resolved = await self._ensure_resolved(
|
| 493 |
+
address, family=sock.family, type=sock.type, proto=sock.proto,
|
| 494 |
+
loop=self,
|
| 495 |
+
)
|
| 496 |
+
_, _, _, _, address = resolved[0]
|
| 497 |
+
|
| 498 |
+
fut = self.create_future()
|
| 499 |
+
self._sock_connect(fut, sock, address)
|
| 500 |
+
try:
|
| 501 |
+
return await fut
|
| 502 |
+
finally:
|
| 503 |
+
# Needed to break cycles when an exception occurs.
|
| 504 |
+
fut = None
|
| 505 |
+
|
| 506 |
+
def _sock_connect(self, fut, sock, address):
|
| 507 |
+
fd = sock.fileno()
|
| 508 |
+
try:
|
| 509 |
+
sock.connect(address)
|
| 510 |
+
except (BlockingIOError, InterruptedError):
|
| 511 |
+
# Issue #23618: When the C function connect() fails with EINTR, the
|
| 512 |
+
# connection runs in background. We have to wait until the socket
|
| 513 |
+
# becomes writable to be notified when the connection succeed or
|
| 514 |
+
# fails.
|
| 515 |
+
self._ensure_fd_no_transport(fd)
|
| 516 |
+
handle = self._add_writer(
|
| 517 |
+
fd, self._sock_connect_cb, fut, sock, address)
|
| 518 |
+
fut.add_done_callback(
|
| 519 |
+
functools.partial(self._sock_write_done, fd, handle=handle))
|
| 520 |
+
except (SystemExit, KeyboardInterrupt):
|
| 521 |
+
raise
|
| 522 |
+
except BaseException as exc:
|
| 523 |
+
fut.set_exception(exc)
|
| 524 |
+
else:
|
| 525 |
+
fut.set_result(None)
|
| 526 |
+
finally:
|
| 527 |
+
fut = None
|
| 528 |
+
|
| 529 |
+
def _sock_write_done(self, fd, fut, handle=None):
|
| 530 |
+
if handle is None or not handle.cancelled():
|
| 531 |
+
self.remove_writer(fd)
|
| 532 |
+
|
| 533 |
+
def _sock_connect_cb(self, fut, sock, address):
|
| 534 |
+
if fut.done():
|
| 535 |
+
return
|
| 536 |
+
|
| 537 |
+
try:
|
| 538 |
+
err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
| 539 |
+
if err != 0:
|
| 540 |
+
# Jump to any except clause below.
|
| 541 |
+
raise OSError(err, f'Connect call failed {address}')
|
| 542 |
+
except (BlockingIOError, InterruptedError):
|
| 543 |
+
# socket is still registered, the callback will be retried later
|
| 544 |
+
pass
|
| 545 |
+
except (SystemExit, KeyboardInterrupt):
|
| 546 |
+
raise
|
| 547 |
+
except BaseException as exc:
|
| 548 |
+
fut.set_exception(exc)
|
| 549 |
+
else:
|
| 550 |
+
fut.set_result(None)
|
| 551 |
+
finally:
|
| 552 |
+
fut = None
|
| 553 |
+
|
| 554 |
+
async def sock_accept(self, sock):
|
| 555 |
+
"""Accept a connection.
|
| 556 |
+
|
| 557 |
+
The socket must be bound to an address and listening for connections.
|
| 558 |
+
The return value is a pair (conn, address) where conn is a new socket
|
| 559 |
+
object usable to send and receive data on the connection, and address
|
| 560 |
+
is the address bound to the socket on the other end of the connection.
|
| 561 |
+
"""
|
| 562 |
+
base_events._check_ssl_socket(sock)
|
| 563 |
+
if self._debug and sock.gettimeout() != 0:
|
| 564 |
+
raise ValueError("the socket must be non-blocking")
|
| 565 |
+
fut = self.create_future()
|
| 566 |
+
self._sock_accept(fut, sock)
|
| 567 |
+
return await fut
|
| 568 |
+
|
| 569 |
+
def _sock_accept(self, fut, sock):
|
| 570 |
+
fd = sock.fileno()
|
| 571 |
+
try:
|
| 572 |
+
conn, address = sock.accept()
|
| 573 |
+
conn.setblocking(False)
|
| 574 |
+
except (BlockingIOError, InterruptedError):
|
| 575 |
+
self._ensure_fd_no_transport(fd)
|
| 576 |
+
handle = self._add_reader(fd, self._sock_accept, fut, sock)
|
| 577 |
+
fut.add_done_callback(
|
| 578 |
+
functools.partial(self._sock_read_done, fd, handle=handle))
|
| 579 |
+
except (SystemExit, KeyboardInterrupt):
|
| 580 |
+
raise
|
| 581 |
+
except BaseException as exc:
|
| 582 |
+
fut.set_exception(exc)
|
| 583 |
+
else:
|
| 584 |
+
fut.set_result((conn, address))
|
| 585 |
+
|
| 586 |
+
async def _sendfile_native(self, transp, file, offset, count):
|
| 587 |
+
del self._transports[transp._sock_fd]
|
| 588 |
+
resume_reading = transp.is_reading()
|
| 589 |
+
transp.pause_reading()
|
| 590 |
+
await transp._make_empty_waiter()
|
| 591 |
+
try:
|
| 592 |
+
return await self.sock_sendfile(transp._sock, file, offset, count,
|
| 593 |
+
fallback=False)
|
| 594 |
+
finally:
|
| 595 |
+
transp._reset_empty_waiter()
|
| 596 |
+
if resume_reading:
|
| 597 |
+
transp.resume_reading()
|
| 598 |
+
self._transports[transp._sock_fd] = transp
|
| 599 |
+
|
| 600 |
+
def _process_events(self, event_list):
|
| 601 |
+
for key, mask in event_list:
|
| 602 |
+
fileobj, (reader, writer) = key.fileobj, key.data
|
| 603 |
+
if mask & selectors.EVENT_READ and reader is not None:
|
| 604 |
+
if reader._cancelled:
|
| 605 |
+
self._remove_reader(fileobj)
|
| 606 |
+
else:
|
| 607 |
+
self._add_callback(reader)
|
| 608 |
+
if mask & selectors.EVENT_WRITE and writer is not None:
|
| 609 |
+
if writer._cancelled:
|
| 610 |
+
self._remove_writer(fileobj)
|
| 611 |
+
else:
|
| 612 |
+
self._add_callback(writer)
|
| 613 |
+
|
| 614 |
+
def _stop_serving(self, sock):
|
| 615 |
+
self._remove_reader(sock.fileno())
|
| 616 |
+
sock.close()
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
class _SelectorTransport(transports._FlowControlMixin,
|
| 620 |
+
transports.Transport):
|
| 621 |
+
|
| 622 |
+
max_size = 256 * 1024 # Buffer size passed to recv().
|
| 623 |
+
|
| 624 |
+
_buffer_factory = bytearray # Constructs initial value for self._buffer.
|
| 625 |
+
|
| 626 |
+
# Attribute used in the destructor: it must be set even if the constructor
|
| 627 |
+
# is not called (see _SelectorSslTransport which may start by raising an
|
| 628 |
+
# exception)
|
| 629 |
+
_sock = None
|
| 630 |
+
|
| 631 |
+
def __init__(self, loop, sock, protocol, extra=None, server=None):
|
| 632 |
+
super().__init__(extra, loop)
|
| 633 |
+
self._extra['socket'] = trsock.TransportSocket(sock)
|
| 634 |
+
try:
|
| 635 |
+
self._extra['sockname'] = sock.getsockname()
|
| 636 |
+
except OSError:
|
| 637 |
+
self._extra['sockname'] = None
|
| 638 |
+
if 'peername' not in self._extra:
|
| 639 |
+
try:
|
| 640 |
+
self._extra['peername'] = sock.getpeername()
|
| 641 |
+
except socket.error:
|
| 642 |
+
self._extra['peername'] = None
|
| 643 |
+
self._sock = sock
|
| 644 |
+
self._sock_fd = sock.fileno()
|
| 645 |
+
|
| 646 |
+
self._protocol_connected = False
|
| 647 |
+
self.set_protocol(protocol)
|
| 648 |
+
|
| 649 |
+
self._server = server
|
| 650 |
+
self._buffer = self._buffer_factory()
|
| 651 |
+
self._conn_lost = 0 # Set when call to connection_lost scheduled.
|
| 652 |
+
self._closing = False # Set when close() called.
|
| 653 |
+
if self._server is not None:
|
| 654 |
+
self._server._attach()
|
| 655 |
+
loop._transports[self._sock_fd] = self
|
| 656 |
+
|
| 657 |
+
def __repr__(self):
|
| 658 |
+
info = [self.__class__.__name__]
|
| 659 |
+
if self._sock is None:
|
| 660 |
+
info.append('closed')
|
| 661 |
+
elif self._closing:
|
| 662 |
+
info.append('closing')
|
| 663 |
+
info.append(f'fd={self._sock_fd}')
|
| 664 |
+
# test if the transport was closed
|
| 665 |
+
if self._loop is not None and not self._loop.is_closed():
|
| 666 |
+
polling = _test_selector_event(self._loop._selector,
|
| 667 |
+
self._sock_fd, selectors.EVENT_READ)
|
| 668 |
+
if polling:
|
| 669 |
+
info.append('read=polling')
|
| 670 |
+
else:
|
| 671 |
+
info.append('read=idle')
|
| 672 |
+
|
| 673 |
+
polling = _test_selector_event(self._loop._selector,
|
| 674 |
+
self._sock_fd,
|
| 675 |
+
selectors.EVENT_WRITE)
|
| 676 |
+
if polling:
|
| 677 |
+
state = 'polling'
|
| 678 |
+
else:
|
| 679 |
+
state = 'idle'
|
| 680 |
+
|
| 681 |
+
bufsize = self.get_write_buffer_size()
|
| 682 |
+
info.append(f'write=<{state}, bufsize={bufsize}>')
|
| 683 |
+
return '<{}>'.format(' '.join(info))
|
| 684 |
+
|
| 685 |
+
def abort(self):
|
| 686 |
+
self._force_close(None)
|
| 687 |
+
|
| 688 |
+
def set_protocol(self, protocol):
|
| 689 |
+
self._protocol = protocol
|
| 690 |
+
self._protocol_connected = True
|
| 691 |
+
|
| 692 |
+
def get_protocol(self):
|
| 693 |
+
return self._protocol
|
| 694 |
+
|
| 695 |
+
def is_closing(self):
|
| 696 |
+
return self._closing
|
| 697 |
+
|
| 698 |
+
def close(self):
|
| 699 |
+
if self._closing:
|
| 700 |
+
return
|
| 701 |
+
self._closing = True
|
| 702 |
+
self._loop._remove_reader(self._sock_fd)
|
| 703 |
+
if not self._buffer:
|
| 704 |
+
self._conn_lost += 1
|
| 705 |
+
self._loop._remove_writer(self._sock_fd)
|
| 706 |
+
self._loop.call_soon(self._call_connection_lost, None)
|
| 707 |
+
|
| 708 |
+
def __del__(self, _warn=warnings.warn):
|
| 709 |
+
if self._sock is not None:
|
| 710 |
+
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
|
| 711 |
+
self._sock.close()
|
| 712 |
+
|
| 713 |
+
def _fatal_error(self, exc, message='Fatal error on transport'):
|
| 714 |
+
# Should be called from exception handler only.
|
| 715 |
+
if isinstance(exc, OSError):
|
| 716 |
+
if self._loop.get_debug():
|
| 717 |
+
logger.debug("%r: %s", self, message, exc_info=True)
|
| 718 |
+
else:
|
| 719 |
+
self._loop.call_exception_handler({
|
| 720 |
+
'message': message,
|
| 721 |
+
'exception': exc,
|
| 722 |
+
'transport': self,
|
| 723 |
+
'protocol': self._protocol,
|
| 724 |
+
})
|
| 725 |
+
self._force_close(exc)
|
| 726 |
+
|
| 727 |
+
def _force_close(self, exc):
|
| 728 |
+
if self._conn_lost:
|
| 729 |
+
return
|
| 730 |
+
if self._buffer:
|
| 731 |
+
self._buffer.clear()
|
| 732 |
+
self._loop._remove_writer(self._sock_fd)
|
| 733 |
+
if not self._closing:
|
| 734 |
+
self._closing = True
|
| 735 |
+
self._loop._remove_reader(self._sock_fd)
|
| 736 |
+
self._conn_lost += 1
|
| 737 |
+
self._loop.call_soon(self._call_connection_lost, exc)
|
| 738 |
+
|
| 739 |
+
def _call_connection_lost(self, exc):
|
| 740 |
+
try:
|
| 741 |
+
if self._protocol_connected:
|
| 742 |
+
self._protocol.connection_lost(exc)
|
| 743 |
+
finally:
|
| 744 |
+
self._sock.close()
|
| 745 |
+
self._sock = None
|
| 746 |
+
self._protocol = None
|
| 747 |
+
self._loop = None
|
| 748 |
+
server = self._server
|
| 749 |
+
if server is not None:
|
| 750 |
+
server._detach()
|
| 751 |
+
self._server = None
|
| 752 |
+
|
| 753 |
+
def get_write_buffer_size(self):
|
| 754 |
+
return len(self._buffer)
|
| 755 |
+
|
| 756 |
+
def _add_reader(self, fd, callback, *args):
|
| 757 |
+
if self._closing:
|
| 758 |
+
return
|
| 759 |
+
|
| 760 |
+
self._loop._add_reader(fd, callback, *args)
|
| 761 |
+
|
| 762 |
+
|
| 763 |
+
class _SelectorSocketTransport(_SelectorTransport):
|
| 764 |
+
|
| 765 |
+
_start_tls_compatible = True
|
| 766 |
+
_sendfile_compatible = constants._SendfileMode.TRY_NATIVE
|
| 767 |
+
|
| 768 |
+
def __init__(self, loop, sock, protocol, waiter=None,
|
| 769 |
+
extra=None, server=None):
|
| 770 |
+
|
| 771 |
+
self._read_ready_cb = None
|
| 772 |
+
super().__init__(loop, sock, protocol, extra, server)
|
| 773 |
+
self._eof = False
|
| 774 |
+
self._paused = False
|
| 775 |
+
self._empty_waiter = None
|
| 776 |
+
|
| 777 |
+
# Disable the Nagle algorithm -- small writes will be
|
| 778 |
+
# sent without waiting for the TCP ACK. This generally
|
| 779 |
+
# decreases the latency (in some cases significantly.)
|
| 780 |
+
base_events._set_nodelay(self._sock)
|
| 781 |
+
|
| 782 |
+
self._loop.call_soon(self._protocol.connection_made, self)
|
| 783 |
+
# only start reading when connection_made() has been called
|
| 784 |
+
self._loop.call_soon(self._add_reader,
|
| 785 |
+
self._sock_fd, self._read_ready)
|
| 786 |
+
if waiter is not None:
|
| 787 |
+
# only wake up the waiter when connection_made() has been called
|
| 788 |
+
self._loop.call_soon(futures._set_result_unless_cancelled,
|
| 789 |
+
waiter, None)
|
| 790 |
+
|
| 791 |
+
def set_protocol(self, protocol):
|
| 792 |
+
if isinstance(protocol, protocols.BufferedProtocol):
|
| 793 |
+
self._read_ready_cb = self._read_ready__get_buffer
|
| 794 |
+
else:
|
| 795 |
+
self._read_ready_cb = self._read_ready__data_received
|
| 796 |
+
|
| 797 |
+
super().set_protocol(protocol)
|
| 798 |
+
|
| 799 |
+
def is_reading(self):
|
| 800 |
+
return not self._paused and not self._closing
|
| 801 |
+
|
| 802 |
+
def pause_reading(self):
|
| 803 |
+
if self._closing or self._paused:
|
| 804 |
+
return
|
| 805 |
+
self._paused = True
|
| 806 |
+
self._loop._remove_reader(self._sock_fd)
|
| 807 |
+
if self._loop.get_debug():
|
| 808 |
+
logger.debug("%r pauses reading", self)
|
| 809 |
+
|
| 810 |
+
def resume_reading(self):
|
| 811 |
+
if self._closing or not self._paused:
|
| 812 |
+
return
|
| 813 |
+
self._paused = False
|
| 814 |
+
self._add_reader(self._sock_fd, self._read_ready)
|
| 815 |
+
if self._loop.get_debug():
|
| 816 |
+
logger.debug("%r resumes reading", self)
|
| 817 |
+
|
| 818 |
+
def _read_ready(self):
|
| 819 |
+
self._read_ready_cb()
|
| 820 |
+
|
| 821 |
+
def _read_ready__get_buffer(self):
|
| 822 |
+
if self._conn_lost:
|
| 823 |
+
return
|
| 824 |
+
|
| 825 |
+
try:
|
| 826 |
+
buf = self._protocol.get_buffer(-1)
|
| 827 |
+
if not len(buf):
|
| 828 |
+
raise RuntimeError('get_buffer() returned an empty buffer')
|
| 829 |
+
except (SystemExit, KeyboardInterrupt):
|
| 830 |
+
raise
|
| 831 |
+
except BaseException as exc:
|
| 832 |
+
self._fatal_error(
|
| 833 |
+
exc, 'Fatal error: protocol.get_buffer() call failed.')
|
| 834 |
+
return
|
| 835 |
+
|
| 836 |
+
try:
|
| 837 |
+
nbytes = self._sock.recv_into(buf)
|
| 838 |
+
except (BlockingIOError, InterruptedError):
|
| 839 |
+
return
|
| 840 |
+
except (SystemExit, KeyboardInterrupt):
|
| 841 |
+
raise
|
| 842 |
+
except BaseException as exc:
|
| 843 |
+
self._fatal_error(exc, 'Fatal read error on socket transport')
|
| 844 |
+
return
|
| 845 |
+
|
| 846 |
+
if not nbytes:
|
| 847 |
+
self._read_ready__on_eof()
|
| 848 |
+
return
|
| 849 |
+
|
| 850 |
+
try:
|
| 851 |
+
self._protocol.buffer_updated(nbytes)
|
| 852 |
+
except (SystemExit, KeyboardInterrupt):
|
| 853 |
+
raise
|
| 854 |
+
except BaseException as exc:
|
| 855 |
+
self._fatal_error(
|
| 856 |
+
exc, 'Fatal error: protocol.buffer_updated() call failed.')
|
| 857 |
+
|
| 858 |
+
def _read_ready__data_received(self):
|
| 859 |
+
if self._conn_lost:
|
| 860 |
+
return
|
| 861 |
+
try:
|
| 862 |
+
data = self._sock.recv(self.max_size)
|
| 863 |
+
except (BlockingIOError, InterruptedError):
|
| 864 |
+
return
|
| 865 |
+
except (SystemExit, KeyboardInterrupt):
|
| 866 |
+
raise
|
| 867 |
+
except BaseException as exc:
|
| 868 |
+
self._fatal_error(exc, 'Fatal read error on socket transport')
|
| 869 |
+
return
|
| 870 |
+
|
| 871 |
+
if not data:
|
| 872 |
+
self._read_ready__on_eof()
|
| 873 |
+
return
|
| 874 |
+
|
| 875 |
+
try:
|
| 876 |
+
self._protocol.data_received(data)
|
| 877 |
+
except (SystemExit, KeyboardInterrupt):
|
| 878 |
+
raise
|
| 879 |
+
except BaseException as exc:
|
| 880 |
+
self._fatal_error(
|
| 881 |
+
exc, 'Fatal error: protocol.data_received() call failed.')
|
| 882 |
+
|
| 883 |
+
def _read_ready__on_eof(self):
|
| 884 |
+
if self._loop.get_debug():
|
| 885 |
+
logger.debug("%r received EOF", self)
|
| 886 |
+
|
| 887 |
+
try:
|
| 888 |
+
keep_open = self._protocol.eof_received()
|
| 889 |
+
except (SystemExit, KeyboardInterrupt):
|
| 890 |
+
raise
|
| 891 |
+
except BaseException as exc:
|
| 892 |
+
self._fatal_error(
|
| 893 |
+
exc, 'Fatal error: protocol.eof_received() call failed.')
|
| 894 |
+
return
|
| 895 |
+
|
| 896 |
+
if keep_open:
|
| 897 |
+
# We're keeping the connection open so the
|
| 898 |
+
# protocol can write more, but we still can't
|
| 899 |
+
# receive more, so remove the reader callback.
|
| 900 |
+
self._loop._remove_reader(self._sock_fd)
|
| 901 |
+
else:
|
| 902 |
+
self.close()
|
| 903 |
+
|
| 904 |
+
def write(self, data):
|
| 905 |
+
if not isinstance(data, (bytes, bytearray, memoryview)):
|
| 906 |
+
raise TypeError(f'data argument must be a bytes-like object, '
|
| 907 |
+
f'not {type(data).__name__!r}')
|
| 908 |
+
if self._eof:
|
| 909 |
+
raise RuntimeError('Cannot call write() after write_eof()')
|
| 910 |
+
if self._empty_waiter is not None:
|
| 911 |
+
raise RuntimeError('unable to write; sendfile is in progress')
|
| 912 |
+
if not data:
|
| 913 |
+
return
|
| 914 |
+
|
| 915 |
+
if self._conn_lost:
|
| 916 |
+
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
|
| 917 |
+
logger.warning('socket.send() raised exception.')
|
| 918 |
+
self._conn_lost += 1
|
| 919 |
+
return
|
| 920 |
+
|
| 921 |
+
if not self._buffer:
|
| 922 |
+
# Optimization: try to send now.
|
| 923 |
+
try:
|
| 924 |
+
n = self._sock.send(data)
|
| 925 |
+
except (BlockingIOError, InterruptedError):
|
| 926 |
+
pass
|
| 927 |
+
except (SystemExit, KeyboardInterrupt):
|
| 928 |
+
raise
|
| 929 |
+
except BaseException as exc:
|
| 930 |
+
self._fatal_error(exc, 'Fatal write error on socket transport')
|
| 931 |
+
return
|
| 932 |
+
else:
|
| 933 |
+
data = data[n:]
|
| 934 |
+
if not data:
|
| 935 |
+
return
|
| 936 |
+
# Not all was written; register write handler.
|
| 937 |
+
self._loop._add_writer(self._sock_fd, self._write_ready)
|
| 938 |
+
|
| 939 |
+
# Add it to the buffer.
|
| 940 |
+
self._buffer.extend(data)
|
| 941 |
+
self._maybe_pause_protocol()
|
| 942 |
+
|
| 943 |
+
def _write_ready(self):
|
| 944 |
+
assert self._buffer, 'Data should not be empty'
|
| 945 |
+
|
| 946 |
+
if self._conn_lost:
|
| 947 |
+
return
|
| 948 |
+
try:
|
| 949 |
+
n = self._sock.send(self._buffer)
|
| 950 |
+
except (BlockingIOError, InterruptedError):
|
| 951 |
+
pass
|
| 952 |
+
except (SystemExit, KeyboardInterrupt):
|
| 953 |
+
raise
|
| 954 |
+
except BaseException as exc:
|
| 955 |
+
self._loop._remove_writer(self._sock_fd)
|
| 956 |
+
self._buffer.clear()
|
| 957 |
+
self._fatal_error(exc, 'Fatal write error on socket transport')
|
| 958 |
+
if self._empty_waiter is not None:
|
| 959 |
+
self._empty_waiter.set_exception(exc)
|
| 960 |
+
else:
|
| 961 |
+
if n:
|
| 962 |
+
del self._buffer[:n]
|
| 963 |
+
self._maybe_resume_protocol() # May append to buffer.
|
| 964 |
+
if not self._buffer:
|
| 965 |
+
self._loop._remove_writer(self._sock_fd)
|
| 966 |
+
if self._empty_waiter is not None:
|
| 967 |
+
self._empty_waiter.set_result(None)
|
| 968 |
+
if self._closing:
|
| 969 |
+
self._call_connection_lost(None)
|
| 970 |
+
elif self._eof:
|
| 971 |
+
self._sock.shutdown(socket.SHUT_WR)
|
| 972 |
+
|
| 973 |
+
def write_eof(self):
|
| 974 |
+
if self._closing or self._eof:
|
| 975 |
+
return
|
| 976 |
+
self._eof = True
|
| 977 |
+
if not self._buffer:
|
| 978 |
+
self._sock.shutdown(socket.SHUT_WR)
|
| 979 |
+
|
| 980 |
+
def can_write_eof(self):
|
| 981 |
+
return True
|
| 982 |
+
|
| 983 |
+
def _call_connection_lost(self, exc):
|
| 984 |
+
super()._call_connection_lost(exc)
|
| 985 |
+
if self._empty_waiter is not None:
|
| 986 |
+
self._empty_waiter.set_exception(
|
| 987 |
+
ConnectionError("Connection is closed by peer"))
|
| 988 |
+
|
| 989 |
+
def _make_empty_waiter(self):
|
| 990 |
+
if self._empty_waiter is not None:
|
| 991 |
+
raise RuntimeError("Empty waiter is already set")
|
| 992 |
+
self._empty_waiter = self._loop.create_future()
|
| 993 |
+
if not self._buffer:
|
| 994 |
+
self._empty_waiter.set_result(None)
|
| 995 |
+
return self._empty_waiter
|
| 996 |
+
|
| 997 |
+
def _reset_empty_waiter(self):
|
| 998 |
+
self._empty_waiter = None
|
| 999 |
+
|
| 1000 |
+
|
| 1001 |
+
class _SelectorDatagramTransport(_SelectorTransport):
|
| 1002 |
+
|
| 1003 |
+
_buffer_factory = collections.deque
|
| 1004 |
+
|
| 1005 |
+
def __init__(self, loop, sock, protocol, address=None,
|
| 1006 |
+
waiter=None, extra=None):
|
| 1007 |
+
super().__init__(loop, sock, protocol, extra)
|
| 1008 |
+
self._address = address
|
| 1009 |
+
self._loop.call_soon(self._protocol.connection_made, self)
|
| 1010 |
+
# only start reading when connection_made() has been called
|
| 1011 |
+
self._loop.call_soon(self._add_reader,
|
| 1012 |
+
self._sock_fd, self._read_ready)
|
| 1013 |
+
if waiter is not None:
|
| 1014 |
+
# only wake up the waiter when connection_made() has been called
|
| 1015 |
+
self._loop.call_soon(futures._set_result_unless_cancelled,
|
| 1016 |
+
waiter, None)
|
| 1017 |
+
|
| 1018 |
+
def get_write_buffer_size(self):
|
| 1019 |
+
return sum(len(data) for data, _ in self._buffer)
|
| 1020 |
+
|
| 1021 |
+
def _read_ready(self):
|
| 1022 |
+
if self._conn_lost:
|
| 1023 |
+
return
|
| 1024 |
+
try:
|
| 1025 |
+
data, addr = self._sock.recvfrom(self.max_size)
|
| 1026 |
+
except (BlockingIOError, InterruptedError):
|
| 1027 |
+
pass
|
| 1028 |
+
except OSError as exc:
|
| 1029 |
+
self._protocol.error_received(exc)
|
| 1030 |
+
except (SystemExit, KeyboardInterrupt):
|
| 1031 |
+
raise
|
| 1032 |
+
except BaseException as exc:
|
| 1033 |
+
self._fatal_error(exc, 'Fatal read error on datagram transport')
|
| 1034 |
+
else:
|
| 1035 |
+
self._protocol.datagram_received(data, addr)
|
| 1036 |
+
|
| 1037 |
+
def sendto(self, data, addr=None):
|
| 1038 |
+
if not isinstance(data, (bytes, bytearray, memoryview)):
|
| 1039 |
+
raise TypeError(f'data argument must be a bytes-like object, '
|
| 1040 |
+
f'not {type(data).__name__!r}')
|
| 1041 |
+
if not data:
|
| 1042 |
+
return
|
| 1043 |
+
|
| 1044 |
+
if self._address:
|
| 1045 |
+
if addr not in (None, self._address):
|
| 1046 |
+
raise ValueError(
|
| 1047 |
+
f'Invalid address: must be None or {self._address}')
|
| 1048 |
+
addr = self._address
|
| 1049 |
+
|
| 1050 |
+
if self._conn_lost and self._address:
|
| 1051 |
+
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
|
| 1052 |
+
logger.warning('socket.send() raised exception.')
|
| 1053 |
+
self._conn_lost += 1
|
| 1054 |
+
return
|
| 1055 |
+
|
| 1056 |
+
if not self._buffer:
|
| 1057 |
+
# Attempt to send it right away first.
|
| 1058 |
+
try:
|
| 1059 |
+
if self._extra['peername']:
|
| 1060 |
+
self._sock.send(data)
|
| 1061 |
+
else:
|
| 1062 |
+
self._sock.sendto(data, addr)
|
| 1063 |
+
return
|
| 1064 |
+
except (BlockingIOError, InterruptedError):
|
| 1065 |
+
self._loop._add_writer(self._sock_fd, self._sendto_ready)
|
| 1066 |
+
except OSError as exc:
|
| 1067 |
+
self._protocol.error_received(exc)
|
| 1068 |
+
return
|
| 1069 |
+
except (SystemExit, KeyboardInterrupt):
|
| 1070 |
+
raise
|
| 1071 |
+
except BaseException as exc:
|
| 1072 |
+
self._fatal_error(
|
| 1073 |
+
exc, 'Fatal write error on datagram transport')
|
| 1074 |
+
return
|
| 1075 |
+
|
| 1076 |
+
# Ensure that what we buffer is immutable.
|
| 1077 |
+
self._buffer.append((bytes(data), addr))
|
| 1078 |
+
self._maybe_pause_protocol()
|
| 1079 |
+
|
| 1080 |
+
def _sendto_ready(self):
|
| 1081 |
+
while self._buffer:
|
| 1082 |
+
data, addr = self._buffer.popleft()
|
| 1083 |
+
try:
|
| 1084 |
+
if self._extra['peername']:
|
| 1085 |
+
self._sock.send(data)
|
| 1086 |
+
else:
|
| 1087 |
+
self._sock.sendto(data, addr)
|
| 1088 |
+
except (BlockingIOError, InterruptedError):
|
| 1089 |
+
self._buffer.appendleft((data, addr)) # Try again later.
|
| 1090 |
+
break
|
| 1091 |
+
except OSError as exc:
|
| 1092 |
+
self._protocol.error_received(exc)
|
| 1093 |
+
return
|
| 1094 |
+
except (SystemExit, KeyboardInterrupt):
|
| 1095 |
+
raise
|
| 1096 |
+
except BaseException as exc:
|
| 1097 |
+
self._fatal_error(
|
| 1098 |
+
exc, 'Fatal write error on datagram transport')
|
| 1099 |
+
return
|
| 1100 |
+
|
| 1101 |
+
self._maybe_resume_protocol() # May append to buffer.
|
| 1102 |
+
if not self._buffer:
|
| 1103 |
+
self._loop._remove_writer(self._sock_fd)
|
| 1104 |
+
if self._closing:
|
| 1105 |
+
self._call_connection_lost(None)
|
omnilmm/lib/python3.10/asyncio/sslproto.py
ADDED
|
@@ -0,0 +1,739 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import warnings
|
| 3 |
+
try:
|
| 4 |
+
import ssl
|
| 5 |
+
except ImportError: # pragma: no cover
|
| 6 |
+
ssl = None
|
| 7 |
+
|
| 8 |
+
from . import constants
|
| 9 |
+
from . import protocols
|
| 10 |
+
from . import transports
|
| 11 |
+
from .log import logger
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _create_transport_context(server_side, server_hostname):
|
| 15 |
+
if server_side:
|
| 16 |
+
raise ValueError('Server side SSL needs a valid SSLContext')
|
| 17 |
+
|
| 18 |
+
# Client side may pass ssl=True to use a default
|
| 19 |
+
# context; in that case the sslcontext passed is None.
|
| 20 |
+
# The default is secure for client connections.
|
| 21 |
+
# Python 3.4+: use up-to-date strong settings.
|
| 22 |
+
sslcontext = ssl.create_default_context()
|
| 23 |
+
if not server_hostname:
|
| 24 |
+
sslcontext.check_hostname = False
|
| 25 |
+
return sslcontext
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# States of an _SSLPipe.
|
| 29 |
+
_UNWRAPPED = "UNWRAPPED"
|
| 30 |
+
_DO_HANDSHAKE = "DO_HANDSHAKE"
|
| 31 |
+
_WRAPPED = "WRAPPED"
|
| 32 |
+
_SHUTDOWN = "SHUTDOWN"
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class _SSLPipe(object):
|
| 36 |
+
"""An SSL "Pipe".
|
| 37 |
+
|
| 38 |
+
An SSL pipe allows you to communicate with an SSL/TLS protocol instance
|
| 39 |
+
through memory buffers. It can be used to implement a security layer for an
|
| 40 |
+
existing connection where you don't have access to the connection's file
|
| 41 |
+
descriptor, or for some reason you don't want to use it.
|
| 42 |
+
|
| 43 |
+
An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode,
|
| 44 |
+
data is passed through untransformed. In wrapped mode, application level
|
| 45 |
+
data is encrypted to SSL record level data and vice versa. The SSL record
|
| 46 |
+
level is the lowest level in the SSL protocol suite and is what travels
|
| 47 |
+
as-is over the wire.
|
| 48 |
+
|
| 49 |
+
An SslPipe initially is in "unwrapped" mode. To start SSL, call
|
| 50 |
+
do_handshake(). To shutdown SSL again, call unwrap().
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
max_size = 256 * 1024 # Buffer size passed to read()
|
| 54 |
+
|
| 55 |
+
def __init__(self, context, server_side, server_hostname=None):
|
| 56 |
+
"""
|
| 57 |
+
The *context* argument specifies the ssl.SSLContext to use.
|
| 58 |
+
|
| 59 |
+
The *server_side* argument indicates whether this is a server side or
|
| 60 |
+
client side transport.
|
| 61 |
+
|
| 62 |
+
The optional *server_hostname* argument can be used to specify the
|
| 63 |
+
hostname you are connecting to. You may only specify this parameter if
|
| 64 |
+
the _ssl module supports Server Name Indication (SNI).
|
| 65 |
+
"""
|
| 66 |
+
self._context = context
|
| 67 |
+
self._server_side = server_side
|
| 68 |
+
self._server_hostname = server_hostname
|
| 69 |
+
self._state = _UNWRAPPED
|
| 70 |
+
self._incoming = ssl.MemoryBIO()
|
| 71 |
+
self._outgoing = ssl.MemoryBIO()
|
| 72 |
+
self._sslobj = None
|
| 73 |
+
self._need_ssldata = False
|
| 74 |
+
self._handshake_cb = None
|
| 75 |
+
self._shutdown_cb = None
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
def context(self):
|
| 79 |
+
"""The SSL context passed to the constructor."""
|
| 80 |
+
return self._context
|
| 81 |
+
|
| 82 |
+
@property
|
| 83 |
+
def ssl_object(self):
|
| 84 |
+
"""The internal ssl.SSLObject instance.
|
| 85 |
+
|
| 86 |
+
Return None if the pipe is not wrapped.
|
| 87 |
+
"""
|
| 88 |
+
return self._sslobj
|
| 89 |
+
|
| 90 |
+
@property
|
| 91 |
+
def need_ssldata(self):
|
| 92 |
+
"""Whether more record level data is needed to complete a handshake
|
| 93 |
+
that is currently in progress."""
|
| 94 |
+
return self._need_ssldata
|
| 95 |
+
|
| 96 |
+
@property
|
| 97 |
+
def wrapped(self):
|
| 98 |
+
"""
|
| 99 |
+
Whether a security layer is currently in effect.
|
| 100 |
+
|
| 101 |
+
Return False during handshake.
|
| 102 |
+
"""
|
| 103 |
+
return self._state == _WRAPPED
|
| 104 |
+
|
| 105 |
+
def do_handshake(self, callback=None):
|
| 106 |
+
"""Start the SSL handshake.
|
| 107 |
+
|
| 108 |
+
Return a list of ssldata. A ssldata element is a list of buffers
|
| 109 |
+
|
| 110 |
+
The optional *callback* argument can be used to install a callback that
|
| 111 |
+
will be called when the handshake is complete. The callback will be
|
| 112 |
+
called with None if successful, else an exception instance.
|
| 113 |
+
"""
|
| 114 |
+
if self._state != _UNWRAPPED:
|
| 115 |
+
raise RuntimeError('handshake in progress or completed')
|
| 116 |
+
self._sslobj = self._context.wrap_bio(
|
| 117 |
+
self._incoming, self._outgoing,
|
| 118 |
+
server_side=self._server_side,
|
| 119 |
+
server_hostname=self._server_hostname)
|
| 120 |
+
self._state = _DO_HANDSHAKE
|
| 121 |
+
self._handshake_cb = callback
|
| 122 |
+
ssldata, appdata = self.feed_ssldata(b'', only_handshake=True)
|
| 123 |
+
assert len(appdata) == 0
|
| 124 |
+
return ssldata
|
| 125 |
+
|
| 126 |
+
def shutdown(self, callback=None):
|
| 127 |
+
"""Start the SSL shutdown sequence.
|
| 128 |
+
|
| 129 |
+
Return a list of ssldata. A ssldata element is a list of buffers
|
| 130 |
+
|
| 131 |
+
The optional *callback* argument can be used to install a callback that
|
| 132 |
+
will be called when the shutdown is complete. The callback will be
|
| 133 |
+
called without arguments.
|
| 134 |
+
"""
|
| 135 |
+
if self._state == _UNWRAPPED:
|
| 136 |
+
raise RuntimeError('no security layer present')
|
| 137 |
+
if self._state == _SHUTDOWN:
|
| 138 |
+
raise RuntimeError('shutdown in progress')
|
| 139 |
+
assert self._state in (_WRAPPED, _DO_HANDSHAKE)
|
| 140 |
+
self._state = _SHUTDOWN
|
| 141 |
+
self._shutdown_cb = callback
|
| 142 |
+
ssldata, appdata = self.feed_ssldata(b'')
|
| 143 |
+
assert appdata == [] or appdata == [b'']
|
| 144 |
+
return ssldata
|
| 145 |
+
|
| 146 |
+
def feed_eof(self):
|
| 147 |
+
"""Send a potentially "ragged" EOF.
|
| 148 |
+
|
| 149 |
+
This method will raise an SSL_ERROR_EOF exception if the EOF is
|
| 150 |
+
unexpected.
|
| 151 |
+
"""
|
| 152 |
+
self._incoming.write_eof()
|
| 153 |
+
ssldata, appdata = self.feed_ssldata(b'')
|
| 154 |
+
assert appdata == [] or appdata == [b'']
|
| 155 |
+
|
| 156 |
+
def feed_ssldata(self, data, only_handshake=False):
|
| 157 |
+
"""Feed SSL record level data into the pipe.
|
| 158 |
+
|
| 159 |
+
The data must be a bytes instance. It is OK to send an empty bytes
|
| 160 |
+
instance. This can be used to get ssldata for a handshake initiated by
|
| 161 |
+
this endpoint.
|
| 162 |
+
|
| 163 |
+
Return a (ssldata, appdata) tuple. The ssldata element is a list of
|
| 164 |
+
buffers containing SSL data that needs to be sent to the remote SSL.
|
| 165 |
+
|
| 166 |
+
The appdata element is a list of buffers containing plaintext data that
|
| 167 |
+
needs to be forwarded to the application. The appdata list may contain
|
| 168 |
+
an empty buffer indicating an SSL "close_notify" alert. This alert must
|
| 169 |
+
be acknowledged by calling shutdown().
|
| 170 |
+
"""
|
| 171 |
+
if self._state == _UNWRAPPED:
|
| 172 |
+
# If unwrapped, pass plaintext data straight through.
|
| 173 |
+
if data:
|
| 174 |
+
appdata = [data]
|
| 175 |
+
else:
|
| 176 |
+
appdata = []
|
| 177 |
+
return ([], appdata)
|
| 178 |
+
|
| 179 |
+
self._need_ssldata = False
|
| 180 |
+
if data:
|
| 181 |
+
self._incoming.write(data)
|
| 182 |
+
|
| 183 |
+
ssldata = []
|
| 184 |
+
appdata = []
|
| 185 |
+
try:
|
| 186 |
+
if self._state == _DO_HANDSHAKE:
|
| 187 |
+
# Call do_handshake() until it doesn't raise anymore.
|
| 188 |
+
self._sslobj.do_handshake()
|
| 189 |
+
self._state = _WRAPPED
|
| 190 |
+
if self._handshake_cb:
|
| 191 |
+
self._handshake_cb(None)
|
| 192 |
+
if only_handshake:
|
| 193 |
+
return (ssldata, appdata)
|
| 194 |
+
# Handshake done: execute the wrapped block
|
| 195 |
+
|
| 196 |
+
if self._state == _WRAPPED:
|
| 197 |
+
# Main state: read data from SSL until close_notify
|
| 198 |
+
while True:
|
| 199 |
+
chunk = self._sslobj.read(self.max_size)
|
| 200 |
+
appdata.append(chunk)
|
| 201 |
+
if not chunk: # close_notify
|
| 202 |
+
break
|
| 203 |
+
|
| 204 |
+
elif self._state == _SHUTDOWN:
|
| 205 |
+
# Call shutdown() until it doesn't raise anymore.
|
| 206 |
+
self._sslobj.unwrap()
|
| 207 |
+
self._sslobj = None
|
| 208 |
+
self._state = _UNWRAPPED
|
| 209 |
+
if self._shutdown_cb:
|
| 210 |
+
self._shutdown_cb()
|
| 211 |
+
|
| 212 |
+
elif self._state == _UNWRAPPED:
|
| 213 |
+
# Drain possible plaintext data after close_notify.
|
| 214 |
+
appdata.append(self._incoming.read())
|
| 215 |
+
except (ssl.SSLError, ssl.CertificateError) as exc:
|
| 216 |
+
exc_errno = getattr(exc, 'errno', None)
|
| 217 |
+
if exc_errno not in (
|
| 218 |
+
ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE,
|
| 219 |
+
ssl.SSL_ERROR_SYSCALL):
|
| 220 |
+
if self._state == _DO_HANDSHAKE and self._handshake_cb:
|
| 221 |
+
self._handshake_cb(exc)
|
| 222 |
+
raise
|
| 223 |
+
self._need_ssldata = (exc_errno == ssl.SSL_ERROR_WANT_READ)
|
| 224 |
+
|
| 225 |
+
# Check for record level data that needs to be sent back.
|
| 226 |
+
# Happens for the initial handshake and renegotiations.
|
| 227 |
+
if self._outgoing.pending:
|
| 228 |
+
ssldata.append(self._outgoing.read())
|
| 229 |
+
return (ssldata, appdata)
|
| 230 |
+
|
| 231 |
+
def feed_appdata(self, data, offset=0):
|
| 232 |
+
"""Feed plaintext data into the pipe.
|
| 233 |
+
|
| 234 |
+
Return an (ssldata, offset) tuple. The ssldata element is a list of
|
| 235 |
+
buffers containing record level data that needs to be sent to the
|
| 236 |
+
remote SSL instance. The offset is the number of plaintext bytes that
|
| 237 |
+
were processed, which may be less than the length of data.
|
| 238 |
+
|
| 239 |
+
NOTE: In case of short writes, this call MUST be retried with the SAME
|
| 240 |
+
buffer passed into the *data* argument (i.e. the id() must be the
|
| 241 |
+
same). This is an OpenSSL requirement. A further particularity is that
|
| 242 |
+
a short write will always have offset == 0, because the _ssl module
|
| 243 |
+
does not enable partial writes. And even though the offset is zero,
|
| 244 |
+
there will still be encrypted data in ssldata.
|
| 245 |
+
"""
|
| 246 |
+
assert 0 <= offset <= len(data)
|
| 247 |
+
if self._state == _UNWRAPPED:
|
| 248 |
+
# pass through data in unwrapped mode
|
| 249 |
+
if offset < len(data):
|
| 250 |
+
ssldata = [data[offset:]]
|
| 251 |
+
else:
|
| 252 |
+
ssldata = []
|
| 253 |
+
return (ssldata, len(data))
|
| 254 |
+
|
| 255 |
+
ssldata = []
|
| 256 |
+
view = memoryview(data)
|
| 257 |
+
while True:
|
| 258 |
+
self._need_ssldata = False
|
| 259 |
+
try:
|
| 260 |
+
if offset < len(view):
|
| 261 |
+
offset += self._sslobj.write(view[offset:])
|
| 262 |
+
except ssl.SSLError as exc:
|
| 263 |
+
# It is not allowed to call write() after unwrap() until the
|
| 264 |
+
# close_notify is acknowledged. We return the condition to the
|
| 265 |
+
# caller as a short write.
|
| 266 |
+
exc_errno = getattr(exc, 'errno', None)
|
| 267 |
+
if exc.reason == 'PROTOCOL_IS_SHUTDOWN':
|
| 268 |
+
exc_errno = exc.errno = ssl.SSL_ERROR_WANT_READ
|
| 269 |
+
if exc_errno not in (ssl.SSL_ERROR_WANT_READ,
|
| 270 |
+
ssl.SSL_ERROR_WANT_WRITE,
|
| 271 |
+
ssl.SSL_ERROR_SYSCALL):
|
| 272 |
+
raise
|
| 273 |
+
self._need_ssldata = (exc_errno == ssl.SSL_ERROR_WANT_READ)
|
| 274 |
+
|
| 275 |
+
# See if there's any record level data back for us.
|
| 276 |
+
if self._outgoing.pending:
|
| 277 |
+
ssldata.append(self._outgoing.read())
|
| 278 |
+
if offset == len(view) or self._need_ssldata:
|
| 279 |
+
break
|
| 280 |
+
return (ssldata, offset)
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class _SSLProtocolTransport(transports._FlowControlMixin,
|
| 284 |
+
transports.Transport):
|
| 285 |
+
|
| 286 |
+
_sendfile_compatible = constants._SendfileMode.FALLBACK
|
| 287 |
+
|
| 288 |
+
def __init__(self, loop, ssl_protocol):
|
| 289 |
+
self._loop = loop
|
| 290 |
+
# SSLProtocol instance
|
| 291 |
+
self._ssl_protocol = ssl_protocol
|
| 292 |
+
self._closed = False
|
| 293 |
+
|
| 294 |
+
def get_extra_info(self, name, default=None):
|
| 295 |
+
"""Get optional transport information."""
|
| 296 |
+
return self._ssl_protocol._get_extra_info(name, default)
|
| 297 |
+
|
| 298 |
+
def set_protocol(self, protocol):
|
| 299 |
+
self._ssl_protocol._set_app_protocol(protocol)
|
| 300 |
+
|
| 301 |
+
def get_protocol(self):
|
| 302 |
+
return self._ssl_protocol._app_protocol
|
| 303 |
+
|
| 304 |
+
def is_closing(self):
|
| 305 |
+
return self._closed
|
| 306 |
+
|
| 307 |
+
def close(self):
|
| 308 |
+
"""Close the transport.
|
| 309 |
+
|
| 310 |
+
Buffered data will be flushed asynchronously. No more data
|
| 311 |
+
will be received. After all buffered data is flushed, the
|
| 312 |
+
protocol's connection_lost() method will (eventually) called
|
| 313 |
+
with None as its argument.
|
| 314 |
+
"""
|
| 315 |
+
self._closed = True
|
| 316 |
+
self._ssl_protocol._start_shutdown()
|
| 317 |
+
|
| 318 |
+
def __del__(self, _warn=warnings.warn):
|
| 319 |
+
if not self._closed:
|
| 320 |
+
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
|
| 321 |
+
self.close()
|
| 322 |
+
|
| 323 |
+
def is_reading(self):
|
| 324 |
+
tr = self._ssl_protocol._transport
|
| 325 |
+
if tr is None:
|
| 326 |
+
raise RuntimeError('SSL transport has not been initialized yet')
|
| 327 |
+
return tr.is_reading()
|
| 328 |
+
|
| 329 |
+
def pause_reading(self):
|
| 330 |
+
"""Pause the receiving end.
|
| 331 |
+
|
| 332 |
+
No data will be passed to the protocol's data_received()
|
| 333 |
+
method until resume_reading() is called.
|
| 334 |
+
"""
|
| 335 |
+
self._ssl_protocol._transport.pause_reading()
|
| 336 |
+
|
| 337 |
+
def resume_reading(self):
|
| 338 |
+
"""Resume the receiving end.
|
| 339 |
+
|
| 340 |
+
Data received will once again be passed to the protocol's
|
| 341 |
+
data_received() method.
|
| 342 |
+
"""
|
| 343 |
+
self._ssl_protocol._transport.resume_reading()
|
| 344 |
+
|
| 345 |
+
def set_write_buffer_limits(self, high=None, low=None):
|
| 346 |
+
"""Set the high- and low-water limits for write flow control.
|
| 347 |
+
|
| 348 |
+
These two values control when to call the protocol's
|
| 349 |
+
pause_writing() and resume_writing() methods. If specified,
|
| 350 |
+
the low-water limit must be less than or equal to the
|
| 351 |
+
high-water limit. Neither value can be negative.
|
| 352 |
+
|
| 353 |
+
The defaults are implementation-specific. If only the
|
| 354 |
+
high-water limit is given, the low-water limit defaults to an
|
| 355 |
+
implementation-specific value less than or equal to the
|
| 356 |
+
high-water limit. Setting high to zero forces low to zero as
|
| 357 |
+
well, and causes pause_writing() to be called whenever the
|
| 358 |
+
buffer becomes non-empty. Setting low to zero causes
|
| 359 |
+
resume_writing() to be called only once the buffer is empty.
|
| 360 |
+
Use of zero for either limit is generally sub-optimal as it
|
| 361 |
+
reduces opportunities for doing I/O and computation
|
| 362 |
+
concurrently.
|
| 363 |
+
"""
|
| 364 |
+
self._ssl_protocol._transport.set_write_buffer_limits(high, low)
|
| 365 |
+
|
| 366 |
+
def get_write_buffer_size(self):
|
| 367 |
+
"""Return the current size of the write buffer."""
|
| 368 |
+
return self._ssl_protocol._transport.get_write_buffer_size()
|
| 369 |
+
|
| 370 |
+
def get_write_buffer_limits(self):
|
| 371 |
+
"""Get the high and low watermarks for write flow control.
|
| 372 |
+
Return a tuple (low, high) where low and high are
|
| 373 |
+
positive number of bytes."""
|
| 374 |
+
return self._ssl_protocol._transport.get_write_buffer_limits()
|
| 375 |
+
|
| 376 |
+
@property
|
| 377 |
+
def _protocol_paused(self):
|
| 378 |
+
# Required for sendfile fallback pause_writing/resume_writing logic
|
| 379 |
+
return self._ssl_protocol._transport._protocol_paused
|
| 380 |
+
|
| 381 |
+
def write(self, data):
|
| 382 |
+
"""Write some data bytes to the transport.
|
| 383 |
+
|
| 384 |
+
This does not block; it buffers the data and arranges for it
|
| 385 |
+
to be sent out asynchronously.
|
| 386 |
+
"""
|
| 387 |
+
if not isinstance(data, (bytes, bytearray, memoryview)):
|
| 388 |
+
raise TypeError(f"data: expecting a bytes-like instance, "
|
| 389 |
+
f"got {type(data).__name__}")
|
| 390 |
+
if not data:
|
| 391 |
+
return
|
| 392 |
+
self._ssl_protocol._write_appdata(data)
|
| 393 |
+
|
| 394 |
+
def can_write_eof(self):
|
| 395 |
+
"""Return True if this transport supports write_eof(), False if not."""
|
| 396 |
+
return False
|
| 397 |
+
|
| 398 |
+
def abort(self):
|
| 399 |
+
"""Close the transport immediately.
|
| 400 |
+
|
| 401 |
+
Buffered data will be lost. No more data will be received.
|
| 402 |
+
The protocol's connection_lost() method will (eventually) be
|
| 403 |
+
called with None as its argument.
|
| 404 |
+
"""
|
| 405 |
+
self._ssl_protocol._abort()
|
| 406 |
+
self._closed = True
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
class SSLProtocol(protocols.Protocol):
|
| 410 |
+
"""SSL protocol.
|
| 411 |
+
|
| 412 |
+
Implementation of SSL on top of a socket using incoming and outgoing
|
| 413 |
+
buffers which are ssl.MemoryBIO objects.
|
| 414 |
+
"""
|
| 415 |
+
|
| 416 |
+
def __init__(self, loop, app_protocol, sslcontext, waiter,
|
| 417 |
+
server_side=False, server_hostname=None,
|
| 418 |
+
call_connection_made=True,
|
| 419 |
+
ssl_handshake_timeout=None):
|
| 420 |
+
if ssl is None:
|
| 421 |
+
raise RuntimeError('stdlib ssl module not available')
|
| 422 |
+
|
| 423 |
+
if ssl_handshake_timeout is None:
|
| 424 |
+
ssl_handshake_timeout = constants.SSL_HANDSHAKE_TIMEOUT
|
| 425 |
+
elif ssl_handshake_timeout <= 0:
|
| 426 |
+
raise ValueError(
|
| 427 |
+
f"ssl_handshake_timeout should be a positive number, "
|
| 428 |
+
f"got {ssl_handshake_timeout}")
|
| 429 |
+
|
| 430 |
+
if not sslcontext:
|
| 431 |
+
sslcontext = _create_transport_context(
|
| 432 |
+
server_side, server_hostname)
|
| 433 |
+
|
| 434 |
+
self._server_side = server_side
|
| 435 |
+
if server_hostname and not server_side:
|
| 436 |
+
self._server_hostname = server_hostname
|
| 437 |
+
else:
|
| 438 |
+
self._server_hostname = None
|
| 439 |
+
self._sslcontext = sslcontext
|
| 440 |
+
# SSL-specific extra info. More info are set when the handshake
|
| 441 |
+
# completes.
|
| 442 |
+
self._extra = dict(sslcontext=sslcontext)
|
| 443 |
+
|
| 444 |
+
# App data write buffering
|
| 445 |
+
self._write_backlog = collections.deque()
|
| 446 |
+
self._write_buffer_size = 0
|
| 447 |
+
|
| 448 |
+
self._waiter = waiter
|
| 449 |
+
self._loop = loop
|
| 450 |
+
self._set_app_protocol(app_protocol)
|
| 451 |
+
self._app_transport = _SSLProtocolTransport(self._loop, self)
|
| 452 |
+
# _SSLPipe instance (None until the connection is made)
|
| 453 |
+
self._sslpipe = None
|
| 454 |
+
self._session_established = False
|
| 455 |
+
self._in_handshake = False
|
| 456 |
+
self._in_shutdown = False
|
| 457 |
+
# transport, ex: SelectorSocketTransport
|
| 458 |
+
self._transport = None
|
| 459 |
+
self._call_connection_made = call_connection_made
|
| 460 |
+
self._ssl_handshake_timeout = ssl_handshake_timeout
|
| 461 |
+
|
| 462 |
+
def _set_app_protocol(self, app_protocol):
|
| 463 |
+
self._app_protocol = app_protocol
|
| 464 |
+
self._app_protocol_is_buffer = \
|
| 465 |
+
isinstance(app_protocol, protocols.BufferedProtocol)
|
| 466 |
+
|
| 467 |
+
def _wakeup_waiter(self, exc=None):
|
| 468 |
+
if self._waiter is None:
|
| 469 |
+
return
|
| 470 |
+
if not self._waiter.cancelled():
|
| 471 |
+
if exc is not None:
|
| 472 |
+
self._waiter.set_exception(exc)
|
| 473 |
+
else:
|
| 474 |
+
self._waiter.set_result(None)
|
| 475 |
+
self._waiter = None
|
| 476 |
+
|
| 477 |
+
def connection_made(self, transport):
|
| 478 |
+
"""Called when the low-level connection is made.
|
| 479 |
+
|
| 480 |
+
Start the SSL handshake.
|
| 481 |
+
"""
|
| 482 |
+
self._transport = transport
|
| 483 |
+
self._sslpipe = _SSLPipe(self._sslcontext,
|
| 484 |
+
self._server_side,
|
| 485 |
+
self._server_hostname)
|
| 486 |
+
self._start_handshake()
|
| 487 |
+
|
| 488 |
+
def connection_lost(self, exc):
|
| 489 |
+
"""Called when the low-level connection is lost or closed.
|
| 490 |
+
|
| 491 |
+
The argument is an exception object or None (the latter
|
| 492 |
+
meaning a regular EOF is received or the connection was
|
| 493 |
+
aborted or closed).
|
| 494 |
+
"""
|
| 495 |
+
if self._session_established:
|
| 496 |
+
self._session_established = False
|
| 497 |
+
self._loop.call_soon(self._app_protocol.connection_lost, exc)
|
| 498 |
+
else:
|
| 499 |
+
# Most likely an exception occurred while in SSL handshake.
|
| 500 |
+
# Just mark the app transport as closed so that its __del__
|
| 501 |
+
# doesn't complain.
|
| 502 |
+
if self._app_transport is not None:
|
| 503 |
+
self._app_transport._closed = True
|
| 504 |
+
self._transport = None
|
| 505 |
+
self._app_transport = None
|
| 506 |
+
if getattr(self, '_handshake_timeout_handle', None):
|
| 507 |
+
self._handshake_timeout_handle.cancel()
|
| 508 |
+
self._wakeup_waiter(exc)
|
| 509 |
+
self._app_protocol = None
|
| 510 |
+
self._sslpipe = None
|
| 511 |
+
|
| 512 |
+
def pause_writing(self):
|
| 513 |
+
"""Called when the low-level transport's buffer goes over
|
| 514 |
+
the high-water mark.
|
| 515 |
+
"""
|
| 516 |
+
self._app_protocol.pause_writing()
|
| 517 |
+
|
| 518 |
+
def resume_writing(self):
|
| 519 |
+
"""Called when the low-level transport's buffer drains below
|
| 520 |
+
the low-water mark.
|
| 521 |
+
"""
|
| 522 |
+
self._app_protocol.resume_writing()
|
| 523 |
+
|
| 524 |
+
def data_received(self, data):
|
| 525 |
+
"""Called when some SSL data is received.
|
| 526 |
+
|
| 527 |
+
The argument is a bytes object.
|
| 528 |
+
"""
|
| 529 |
+
if self._sslpipe is None:
|
| 530 |
+
# transport closing, sslpipe is destroyed
|
| 531 |
+
return
|
| 532 |
+
|
| 533 |
+
try:
|
| 534 |
+
ssldata, appdata = self._sslpipe.feed_ssldata(data)
|
| 535 |
+
except (SystemExit, KeyboardInterrupt):
|
| 536 |
+
raise
|
| 537 |
+
except BaseException as e:
|
| 538 |
+
self._fatal_error(e, 'SSL error in data received')
|
| 539 |
+
return
|
| 540 |
+
|
| 541 |
+
for chunk in ssldata:
|
| 542 |
+
self._transport.write(chunk)
|
| 543 |
+
|
| 544 |
+
for chunk in appdata:
|
| 545 |
+
if chunk:
|
| 546 |
+
try:
|
| 547 |
+
if self._app_protocol_is_buffer:
|
| 548 |
+
protocols._feed_data_to_buffered_proto(
|
| 549 |
+
self._app_protocol, chunk)
|
| 550 |
+
else:
|
| 551 |
+
self._app_protocol.data_received(chunk)
|
| 552 |
+
except (SystemExit, KeyboardInterrupt):
|
| 553 |
+
raise
|
| 554 |
+
except BaseException as ex:
|
| 555 |
+
self._fatal_error(
|
| 556 |
+
ex, 'application protocol failed to receive SSL data')
|
| 557 |
+
return
|
| 558 |
+
else:
|
| 559 |
+
self._start_shutdown()
|
| 560 |
+
break
|
| 561 |
+
|
| 562 |
+
def eof_received(self):
|
| 563 |
+
"""Called when the other end of the low-level stream
|
| 564 |
+
is half-closed.
|
| 565 |
+
|
| 566 |
+
If this returns a false value (including None), the transport
|
| 567 |
+
will close itself. If it returns a true value, closing the
|
| 568 |
+
transport is up to the protocol.
|
| 569 |
+
"""
|
| 570 |
+
try:
|
| 571 |
+
if self._loop.get_debug():
|
| 572 |
+
logger.debug("%r received EOF", self)
|
| 573 |
+
|
| 574 |
+
self._wakeup_waiter(ConnectionResetError)
|
| 575 |
+
|
| 576 |
+
if not self._in_handshake:
|
| 577 |
+
keep_open = self._app_protocol.eof_received()
|
| 578 |
+
if keep_open:
|
| 579 |
+
logger.warning('returning true from eof_received() '
|
| 580 |
+
'has no effect when using ssl')
|
| 581 |
+
finally:
|
| 582 |
+
self._transport.close()
|
| 583 |
+
|
| 584 |
+
def _get_extra_info(self, name, default=None):
|
| 585 |
+
if name in self._extra:
|
| 586 |
+
return self._extra[name]
|
| 587 |
+
elif self._transport is not None:
|
| 588 |
+
return self._transport.get_extra_info(name, default)
|
| 589 |
+
else:
|
| 590 |
+
return default
|
| 591 |
+
|
| 592 |
+
def _start_shutdown(self):
|
| 593 |
+
if self._in_shutdown:
|
| 594 |
+
return
|
| 595 |
+
if self._in_handshake:
|
| 596 |
+
self._abort()
|
| 597 |
+
else:
|
| 598 |
+
self._in_shutdown = True
|
| 599 |
+
self._write_appdata(b'')
|
| 600 |
+
|
| 601 |
+
def _write_appdata(self, data):
|
| 602 |
+
self._write_backlog.append((data, 0))
|
| 603 |
+
self._write_buffer_size += len(data)
|
| 604 |
+
self._process_write_backlog()
|
| 605 |
+
|
| 606 |
+
def _start_handshake(self):
|
| 607 |
+
if self._loop.get_debug():
|
| 608 |
+
logger.debug("%r starts SSL handshake", self)
|
| 609 |
+
self._handshake_start_time = self._loop.time()
|
| 610 |
+
else:
|
| 611 |
+
self._handshake_start_time = None
|
| 612 |
+
self._in_handshake = True
|
| 613 |
+
# (b'', 1) is a special value in _process_write_backlog() to do
|
| 614 |
+
# the SSL handshake
|
| 615 |
+
self._write_backlog.append((b'', 1))
|
| 616 |
+
self._handshake_timeout_handle = \
|
| 617 |
+
self._loop.call_later(self._ssl_handshake_timeout,
|
| 618 |
+
self._check_handshake_timeout)
|
| 619 |
+
self._process_write_backlog()
|
| 620 |
+
|
| 621 |
+
def _check_handshake_timeout(self):
|
| 622 |
+
if self._in_handshake is True:
|
| 623 |
+
msg = (
|
| 624 |
+
f"SSL handshake is taking longer than "
|
| 625 |
+
f"{self._ssl_handshake_timeout} seconds: "
|
| 626 |
+
f"aborting the connection"
|
| 627 |
+
)
|
| 628 |
+
self._fatal_error(ConnectionAbortedError(msg))
|
| 629 |
+
|
| 630 |
+
def _on_handshake_complete(self, handshake_exc):
|
| 631 |
+
self._in_handshake = False
|
| 632 |
+
self._handshake_timeout_handle.cancel()
|
| 633 |
+
|
| 634 |
+
sslobj = self._sslpipe.ssl_object
|
| 635 |
+
try:
|
| 636 |
+
if handshake_exc is not None:
|
| 637 |
+
raise handshake_exc
|
| 638 |
+
|
| 639 |
+
peercert = sslobj.getpeercert()
|
| 640 |
+
except (SystemExit, KeyboardInterrupt):
|
| 641 |
+
raise
|
| 642 |
+
except BaseException as exc:
|
| 643 |
+
if isinstance(exc, ssl.CertificateError):
|
| 644 |
+
msg = 'SSL handshake failed on verifying the certificate'
|
| 645 |
+
else:
|
| 646 |
+
msg = 'SSL handshake failed'
|
| 647 |
+
self._fatal_error(exc, msg)
|
| 648 |
+
return
|
| 649 |
+
|
| 650 |
+
if self._loop.get_debug():
|
| 651 |
+
dt = self._loop.time() - self._handshake_start_time
|
| 652 |
+
logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
|
| 653 |
+
|
| 654 |
+
# Add extra info that becomes available after handshake.
|
| 655 |
+
self._extra.update(peercert=peercert,
|
| 656 |
+
cipher=sslobj.cipher(),
|
| 657 |
+
compression=sslobj.compression(),
|
| 658 |
+
ssl_object=sslobj,
|
| 659 |
+
)
|
| 660 |
+
if self._call_connection_made:
|
| 661 |
+
self._app_protocol.connection_made(self._app_transport)
|
| 662 |
+
self._wakeup_waiter()
|
| 663 |
+
self._session_established = True
|
| 664 |
+
# In case transport.write() was already called. Don't call
|
| 665 |
+
# immediately _process_write_backlog(), but schedule it:
|
| 666 |
+
# _on_handshake_complete() can be called indirectly from
|
| 667 |
+
# _process_write_backlog(), and _process_write_backlog() is not
|
| 668 |
+
# reentrant.
|
| 669 |
+
self._loop.call_soon(self._process_write_backlog)
|
| 670 |
+
|
| 671 |
+
def _process_write_backlog(self):
|
| 672 |
+
# Try to make progress on the write backlog.
|
| 673 |
+
if self._transport is None or self._sslpipe is None:
|
| 674 |
+
return
|
| 675 |
+
|
| 676 |
+
try:
|
| 677 |
+
for i in range(len(self._write_backlog)):
|
| 678 |
+
data, offset = self._write_backlog[0]
|
| 679 |
+
if data:
|
| 680 |
+
ssldata, offset = self._sslpipe.feed_appdata(data, offset)
|
| 681 |
+
elif offset:
|
| 682 |
+
ssldata = self._sslpipe.do_handshake(
|
| 683 |
+
self._on_handshake_complete)
|
| 684 |
+
offset = 1
|
| 685 |
+
else:
|
| 686 |
+
ssldata = self._sslpipe.shutdown(self._finalize)
|
| 687 |
+
offset = 1
|
| 688 |
+
|
| 689 |
+
for chunk in ssldata:
|
| 690 |
+
self._transport.write(chunk)
|
| 691 |
+
|
| 692 |
+
if offset < len(data):
|
| 693 |
+
self._write_backlog[0] = (data, offset)
|
| 694 |
+
# A short write means that a write is blocked on a read
|
| 695 |
+
# We need to enable reading if it is paused!
|
| 696 |
+
assert self._sslpipe.need_ssldata
|
| 697 |
+
if self._transport._paused:
|
| 698 |
+
self._transport.resume_reading()
|
| 699 |
+
break
|
| 700 |
+
|
| 701 |
+
# An entire chunk from the backlog was processed. We can
|
| 702 |
+
# delete it and reduce the outstanding buffer size.
|
| 703 |
+
del self._write_backlog[0]
|
| 704 |
+
self._write_buffer_size -= len(data)
|
| 705 |
+
except (SystemExit, KeyboardInterrupt):
|
| 706 |
+
raise
|
| 707 |
+
except BaseException as exc:
|
| 708 |
+
if self._in_handshake:
|
| 709 |
+
# Exceptions will be re-raised in _on_handshake_complete.
|
| 710 |
+
self._on_handshake_complete(exc)
|
| 711 |
+
else:
|
| 712 |
+
self._fatal_error(exc, 'Fatal error on SSL transport')
|
| 713 |
+
|
| 714 |
+
def _fatal_error(self, exc, message='Fatal error on transport'):
|
| 715 |
+
if isinstance(exc, OSError):
|
| 716 |
+
if self._loop.get_debug():
|
| 717 |
+
logger.debug("%r: %s", self, message, exc_info=True)
|
| 718 |
+
else:
|
| 719 |
+
self._loop.call_exception_handler({
|
| 720 |
+
'message': message,
|
| 721 |
+
'exception': exc,
|
| 722 |
+
'transport': self._transport,
|
| 723 |
+
'protocol': self,
|
| 724 |
+
})
|
| 725 |
+
if self._transport:
|
| 726 |
+
self._transport._force_close(exc)
|
| 727 |
+
|
| 728 |
+
def _finalize(self):
|
| 729 |
+
self._sslpipe = None
|
| 730 |
+
|
| 731 |
+
if self._transport is not None:
|
| 732 |
+
self._transport.close()
|
| 733 |
+
|
| 734 |
+
def _abort(self):
|
| 735 |
+
try:
|
| 736 |
+
if self._transport is not None:
|
| 737 |
+
self._transport.abort()
|
| 738 |
+
finally:
|
| 739 |
+
self._finalize()
|
omnilmm/lib/python3.10/asyncio/subprocess.py
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__ = 'create_subprocess_exec', 'create_subprocess_shell'
|
| 2 |
+
|
| 3 |
+
import subprocess
|
| 4 |
+
|
| 5 |
+
from . import events
|
| 6 |
+
from . import protocols
|
| 7 |
+
from . import streams
|
| 8 |
+
from . import tasks
|
| 9 |
+
from .log import logger
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
PIPE = subprocess.PIPE
|
| 13 |
+
STDOUT = subprocess.STDOUT
|
| 14 |
+
DEVNULL = subprocess.DEVNULL
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class SubprocessStreamProtocol(streams.FlowControlMixin,
|
| 18 |
+
protocols.SubprocessProtocol):
|
| 19 |
+
"""Like StreamReaderProtocol, but for a subprocess."""
|
| 20 |
+
|
| 21 |
+
def __init__(self, limit, loop):
|
| 22 |
+
super().__init__(loop=loop)
|
| 23 |
+
self._limit = limit
|
| 24 |
+
self.stdin = self.stdout = self.stderr = None
|
| 25 |
+
self._transport = None
|
| 26 |
+
self._process_exited = False
|
| 27 |
+
self._pipe_fds = []
|
| 28 |
+
self._stdin_closed = self._loop.create_future()
|
| 29 |
+
|
| 30 |
+
def __repr__(self):
|
| 31 |
+
info = [self.__class__.__name__]
|
| 32 |
+
if self.stdin is not None:
|
| 33 |
+
info.append(f'stdin={self.stdin!r}')
|
| 34 |
+
if self.stdout is not None:
|
| 35 |
+
info.append(f'stdout={self.stdout!r}')
|
| 36 |
+
if self.stderr is not None:
|
| 37 |
+
info.append(f'stderr={self.stderr!r}')
|
| 38 |
+
return '<{}>'.format(' '.join(info))
|
| 39 |
+
|
| 40 |
+
def connection_made(self, transport):
|
| 41 |
+
self._transport = transport
|
| 42 |
+
|
| 43 |
+
stdout_transport = transport.get_pipe_transport(1)
|
| 44 |
+
if stdout_transport is not None:
|
| 45 |
+
self.stdout = streams.StreamReader(limit=self._limit,
|
| 46 |
+
loop=self._loop)
|
| 47 |
+
self.stdout.set_transport(stdout_transport)
|
| 48 |
+
self._pipe_fds.append(1)
|
| 49 |
+
|
| 50 |
+
stderr_transport = transport.get_pipe_transport(2)
|
| 51 |
+
if stderr_transport is not None:
|
| 52 |
+
self.stderr = streams.StreamReader(limit=self._limit,
|
| 53 |
+
loop=self._loop)
|
| 54 |
+
self.stderr.set_transport(stderr_transport)
|
| 55 |
+
self._pipe_fds.append(2)
|
| 56 |
+
|
| 57 |
+
stdin_transport = transport.get_pipe_transport(0)
|
| 58 |
+
if stdin_transport is not None:
|
| 59 |
+
self.stdin = streams.StreamWriter(stdin_transport,
|
| 60 |
+
protocol=self,
|
| 61 |
+
reader=None,
|
| 62 |
+
loop=self._loop)
|
| 63 |
+
|
| 64 |
+
def pipe_data_received(self, fd, data):
|
| 65 |
+
if fd == 1:
|
| 66 |
+
reader = self.stdout
|
| 67 |
+
elif fd == 2:
|
| 68 |
+
reader = self.stderr
|
| 69 |
+
else:
|
| 70 |
+
reader = None
|
| 71 |
+
if reader is not None:
|
| 72 |
+
reader.feed_data(data)
|
| 73 |
+
|
| 74 |
+
def pipe_connection_lost(self, fd, exc):
|
| 75 |
+
if fd == 0:
|
| 76 |
+
pipe = self.stdin
|
| 77 |
+
if pipe is not None:
|
| 78 |
+
pipe.close()
|
| 79 |
+
self.connection_lost(exc)
|
| 80 |
+
if exc is None:
|
| 81 |
+
self._stdin_closed.set_result(None)
|
| 82 |
+
else:
|
| 83 |
+
self._stdin_closed.set_exception(exc)
|
| 84 |
+
return
|
| 85 |
+
if fd == 1:
|
| 86 |
+
reader = self.stdout
|
| 87 |
+
elif fd == 2:
|
| 88 |
+
reader = self.stderr
|
| 89 |
+
else:
|
| 90 |
+
reader = None
|
| 91 |
+
if reader is not None:
|
| 92 |
+
if exc is None:
|
| 93 |
+
reader.feed_eof()
|
| 94 |
+
else:
|
| 95 |
+
reader.set_exception(exc)
|
| 96 |
+
|
| 97 |
+
if fd in self._pipe_fds:
|
| 98 |
+
self._pipe_fds.remove(fd)
|
| 99 |
+
self._maybe_close_transport()
|
| 100 |
+
|
| 101 |
+
def process_exited(self):
|
| 102 |
+
self._process_exited = True
|
| 103 |
+
self._maybe_close_transport()
|
| 104 |
+
|
| 105 |
+
def _maybe_close_transport(self):
|
| 106 |
+
if len(self._pipe_fds) == 0 and self._process_exited:
|
| 107 |
+
self._transport.close()
|
| 108 |
+
self._transport = None
|
| 109 |
+
|
| 110 |
+
def _get_close_waiter(self, stream):
|
| 111 |
+
if stream is self.stdin:
|
| 112 |
+
return self._stdin_closed
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class Process:
|
| 116 |
+
def __init__(self, transport, protocol, loop):
|
| 117 |
+
self._transport = transport
|
| 118 |
+
self._protocol = protocol
|
| 119 |
+
self._loop = loop
|
| 120 |
+
self.stdin = protocol.stdin
|
| 121 |
+
self.stdout = protocol.stdout
|
| 122 |
+
self.stderr = protocol.stderr
|
| 123 |
+
self.pid = transport.get_pid()
|
| 124 |
+
|
| 125 |
+
def __repr__(self):
|
| 126 |
+
return f'<{self.__class__.__name__} {self.pid}>'
|
| 127 |
+
|
| 128 |
+
@property
|
| 129 |
+
def returncode(self):
|
| 130 |
+
return self._transport.get_returncode()
|
| 131 |
+
|
| 132 |
+
async def wait(self):
|
| 133 |
+
"""Wait until the process exit and return the process return code."""
|
| 134 |
+
return await self._transport._wait()
|
| 135 |
+
|
| 136 |
+
def send_signal(self, signal):
|
| 137 |
+
self._transport.send_signal(signal)
|
| 138 |
+
|
| 139 |
+
def terminate(self):
|
| 140 |
+
self._transport.terminate()
|
| 141 |
+
|
| 142 |
+
def kill(self):
|
| 143 |
+
self._transport.kill()
|
| 144 |
+
|
| 145 |
+
async def _feed_stdin(self, input):
|
| 146 |
+
debug = self._loop.get_debug()
|
| 147 |
+
self.stdin.write(input)
|
| 148 |
+
if debug:
|
| 149 |
+
logger.debug(
|
| 150 |
+
'%r communicate: feed stdin (%s bytes)', self, len(input))
|
| 151 |
+
try:
|
| 152 |
+
await self.stdin.drain()
|
| 153 |
+
except (BrokenPipeError, ConnectionResetError) as exc:
|
| 154 |
+
# communicate() ignores BrokenPipeError and ConnectionResetError
|
| 155 |
+
if debug:
|
| 156 |
+
logger.debug('%r communicate: stdin got %r', self, exc)
|
| 157 |
+
|
| 158 |
+
if debug:
|
| 159 |
+
logger.debug('%r communicate: close stdin', self)
|
| 160 |
+
self.stdin.close()
|
| 161 |
+
|
| 162 |
+
async def _noop(self):
|
| 163 |
+
return None
|
| 164 |
+
|
| 165 |
+
async def _read_stream(self, fd):
|
| 166 |
+
transport = self._transport.get_pipe_transport(fd)
|
| 167 |
+
if fd == 2:
|
| 168 |
+
stream = self.stderr
|
| 169 |
+
else:
|
| 170 |
+
assert fd == 1
|
| 171 |
+
stream = self.stdout
|
| 172 |
+
if self._loop.get_debug():
|
| 173 |
+
name = 'stdout' if fd == 1 else 'stderr'
|
| 174 |
+
logger.debug('%r communicate: read %s', self, name)
|
| 175 |
+
output = await stream.read()
|
| 176 |
+
if self._loop.get_debug():
|
| 177 |
+
name = 'stdout' if fd == 1 else 'stderr'
|
| 178 |
+
logger.debug('%r communicate: close %s', self, name)
|
| 179 |
+
transport.close()
|
| 180 |
+
return output
|
| 181 |
+
|
| 182 |
+
async def communicate(self, input=None):
|
| 183 |
+
if input is not None:
|
| 184 |
+
stdin = self._feed_stdin(input)
|
| 185 |
+
else:
|
| 186 |
+
stdin = self._noop()
|
| 187 |
+
if self.stdout is not None:
|
| 188 |
+
stdout = self._read_stream(1)
|
| 189 |
+
else:
|
| 190 |
+
stdout = self._noop()
|
| 191 |
+
if self.stderr is not None:
|
| 192 |
+
stderr = self._read_stream(2)
|
| 193 |
+
else:
|
| 194 |
+
stderr = self._noop()
|
| 195 |
+
stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr)
|
| 196 |
+
await self.wait()
|
| 197 |
+
return (stdout, stderr)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
async def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
|
| 201 |
+
limit=streams._DEFAULT_LIMIT, **kwds):
|
| 202 |
+
loop = events.get_running_loop()
|
| 203 |
+
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
|
| 204 |
+
loop=loop)
|
| 205 |
+
transport, protocol = await loop.subprocess_shell(
|
| 206 |
+
protocol_factory,
|
| 207 |
+
cmd, stdin=stdin, stdout=stdout,
|
| 208 |
+
stderr=stderr, **kwds)
|
| 209 |
+
return Process(transport, protocol, loop)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
async def create_subprocess_exec(program, *args, stdin=None, stdout=None,
|
| 213 |
+
stderr=None, limit=streams._DEFAULT_LIMIT,
|
| 214 |
+
**kwds):
|
| 215 |
+
loop = events.get_running_loop()
|
| 216 |
+
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
|
| 217 |
+
loop=loop)
|
| 218 |
+
transport, protocol = await loop.subprocess_exec(
|
| 219 |
+
protocol_factory,
|
| 220 |
+
program, *args,
|
| 221 |
+
stdin=stdin, stdout=stdout,
|
| 222 |
+
stderr=stderr, **kwds)
|
| 223 |
+
return Process(transport, protocol, loop)
|
omnilmm/lib/python3.10/asyncio/tasks.py
ADDED
|
@@ -0,0 +1,946 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Support for tasks, coroutines and the scheduler."""
|
| 2 |
+
|
| 3 |
+
__all__ = (
|
| 4 |
+
'Task', 'create_task',
|
| 5 |
+
'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
|
| 6 |
+
'wait', 'wait_for', 'as_completed', 'sleep',
|
| 7 |
+
'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe',
|
| 8 |
+
'current_task', 'all_tasks',
|
| 9 |
+
'_register_task', '_unregister_task', '_enter_task', '_leave_task',
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
import concurrent.futures
|
| 13 |
+
import contextvars
|
| 14 |
+
import functools
|
| 15 |
+
import inspect
|
| 16 |
+
import itertools
|
| 17 |
+
import types
|
| 18 |
+
import warnings
|
| 19 |
+
import weakref
|
| 20 |
+
from types import GenericAlias
|
| 21 |
+
|
| 22 |
+
from . import base_tasks
|
| 23 |
+
from . import coroutines
|
| 24 |
+
from . import events
|
| 25 |
+
from . import exceptions
|
| 26 |
+
from . import futures
|
| 27 |
+
from .coroutines import _is_coroutine
|
| 28 |
+
|
| 29 |
+
# Helper to generate new task names
|
| 30 |
+
# This uses itertools.count() instead of a "+= 1" operation because the latter
|
| 31 |
+
# is not thread safe. See bpo-11866 for a longer explanation.
|
| 32 |
+
_task_name_counter = itertools.count(1).__next__
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def current_task(loop=None):
|
| 36 |
+
"""Return a currently executed task."""
|
| 37 |
+
if loop is None:
|
| 38 |
+
loop = events.get_running_loop()
|
| 39 |
+
return _current_tasks.get(loop)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def all_tasks(loop=None):
|
| 43 |
+
"""Return a set of all tasks for the loop."""
|
| 44 |
+
if loop is None:
|
| 45 |
+
loop = events.get_running_loop()
|
| 46 |
+
# Looping over a WeakSet (_all_tasks) isn't safe as it can be updated from another
|
| 47 |
+
# thread while we do so. Therefore we cast it to list prior to filtering. The list
|
| 48 |
+
# cast itself requires iteration, so we repeat it several times ignoring
|
| 49 |
+
# RuntimeErrors (which are not very likely to occur). See issues 34970 and 36607 for
|
| 50 |
+
# details.
|
| 51 |
+
i = 0
|
| 52 |
+
while True:
|
| 53 |
+
try:
|
| 54 |
+
tasks = list(_all_tasks)
|
| 55 |
+
except RuntimeError:
|
| 56 |
+
i += 1
|
| 57 |
+
if i >= 1000:
|
| 58 |
+
raise
|
| 59 |
+
else:
|
| 60 |
+
break
|
| 61 |
+
return {t for t in tasks
|
| 62 |
+
if futures._get_loop(t) is loop and not t.done()}
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def _set_task_name(task, name):
|
| 66 |
+
if name is not None:
|
| 67 |
+
try:
|
| 68 |
+
set_name = task.set_name
|
| 69 |
+
except AttributeError:
|
| 70 |
+
pass
|
| 71 |
+
else:
|
| 72 |
+
set_name(name)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class Task(futures._PyFuture): # Inherit Python Task implementation
|
| 76 |
+
# from a Python Future implementation.
|
| 77 |
+
|
| 78 |
+
"""A coroutine wrapped in a Future."""
|
| 79 |
+
|
| 80 |
+
# An important invariant maintained while a Task not done:
|
| 81 |
+
#
|
| 82 |
+
# - Either _fut_waiter is None, and _step() is scheduled;
|
| 83 |
+
# - or _fut_waiter is some Future, and _step() is *not* scheduled.
|
| 84 |
+
#
|
| 85 |
+
# The only transition from the latter to the former is through
|
| 86 |
+
# _wakeup(). When _fut_waiter is not None, one of its callbacks
|
| 87 |
+
# must be _wakeup().
|
| 88 |
+
|
| 89 |
+
# If False, don't log a message if the task is destroyed whereas its
|
| 90 |
+
# status is still pending
|
| 91 |
+
_log_destroy_pending = True
|
| 92 |
+
|
| 93 |
+
def __init__(self, coro, *, loop=None, name=None):
|
| 94 |
+
super().__init__(loop=loop)
|
| 95 |
+
if self._source_traceback:
|
| 96 |
+
del self._source_traceback[-1]
|
| 97 |
+
if not coroutines.iscoroutine(coro):
|
| 98 |
+
# raise after Future.__init__(), attrs are required for __del__
|
| 99 |
+
# prevent logging for pending task in __del__
|
| 100 |
+
self._log_destroy_pending = False
|
| 101 |
+
raise TypeError(f"a coroutine was expected, got {coro!r}")
|
| 102 |
+
|
| 103 |
+
if name is None:
|
| 104 |
+
self._name = f'Task-{_task_name_counter()}'
|
| 105 |
+
else:
|
| 106 |
+
self._name = str(name)
|
| 107 |
+
|
| 108 |
+
self._must_cancel = False
|
| 109 |
+
self._fut_waiter = None
|
| 110 |
+
self._coro = coro
|
| 111 |
+
self._context = contextvars.copy_context()
|
| 112 |
+
|
| 113 |
+
self._loop.call_soon(self.__step, context=self._context)
|
| 114 |
+
_register_task(self)
|
| 115 |
+
|
| 116 |
+
def __del__(self):
|
| 117 |
+
if self._state == futures._PENDING and self._log_destroy_pending:
|
| 118 |
+
context = {
|
| 119 |
+
'task': self,
|
| 120 |
+
'message': 'Task was destroyed but it is pending!',
|
| 121 |
+
}
|
| 122 |
+
if self._source_traceback:
|
| 123 |
+
context['source_traceback'] = self._source_traceback
|
| 124 |
+
self._loop.call_exception_handler(context)
|
| 125 |
+
super().__del__()
|
| 126 |
+
|
| 127 |
+
__class_getitem__ = classmethod(GenericAlias)
|
| 128 |
+
|
| 129 |
+
def _repr_info(self):
|
| 130 |
+
return base_tasks._task_repr_info(self)
|
| 131 |
+
|
| 132 |
+
def get_coro(self):
|
| 133 |
+
return self._coro
|
| 134 |
+
|
| 135 |
+
def get_name(self):
|
| 136 |
+
return self._name
|
| 137 |
+
|
| 138 |
+
def set_name(self, value):
|
| 139 |
+
self._name = str(value)
|
| 140 |
+
|
| 141 |
+
def set_result(self, result):
|
| 142 |
+
raise RuntimeError('Task does not support set_result operation')
|
| 143 |
+
|
| 144 |
+
def set_exception(self, exception):
|
| 145 |
+
raise RuntimeError('Task does not support set_exception operation')
|
| 146 |
+
|
| 147 |
+
def get_stack(self, *, limit=None):
|
| 148 |
+
"""Return the list of stack frames for this task's coroutine.
|
| 149 |
+
|
| 150 |
+
If the coroutine is not done, this returns the stack where it is
|
| 151 |
+
suspended. If the coroutine has completed successfully or was
|
| 152 |
+
cancelled, this returns an empty list. If the coroutine was
|
| 153 |
+
terminated by an exception, this returns the list of traceback
|
| 154 |
+
frames.
|
| 155 |
+
|
| 156 |
+
The frames are always ordered from oldest to newest.
|
| 157 |
+
|
| 158 |
+
The optional limit gives the maximum number of frames to
|
| 159 |
+
return; by default all available frames are returned. Its
|
| 160 |
+
meaning differs depending on whether a stack or a traceback is
|
| 161 |
+
returned: the newest frames of a stack are returned, but the
|
| 162 |
+
oldest frames of a traceback are returned. (This matches the
|
| 163 |
+
behavior of the traceback module.)
|
| 164 |
+
|
| 165 |
+
For reasons beyond our control, only one stack frame is
|
| 166 |
+
returned for a suspended coroutine.
|
| 167 |
+
"""
|
| 168 |
+
return base_tasks._task_get_stack(self, limit)
|
| 169 |
+
|
| 170 |
+
def print_stack(self, *, limit=None, file=None):
|
| 171 |
+
"""Print the stack or traceback for this task's coroutine.
|
| 172 |
+
|
| 173 |
+
This produces output similar to that of the traceback module,
|
| 174 |
+
for the frames retrieved by get_stack(). The limit argument
|
| 175 |
+
is passed to get_stack(). The file argument is an I/O stream
|
| 176 |
+
to which the output is written; by default output is written
|
| 177 |
+
to sys.stderr.
|
| 178 |
+
"""
|
| 179 |
+
return base_tasks._task_print_stack(self, limit, file)
|
| 180 |
+
|
| 181 |
+
def cancel(self, msg=None):
|
| 182 |
+
"""Request that this task cancel itself.
|
| 183 |
+
|
| 184 |
+
This arranges for a CancelledError to be thrown into the
|
| 185 |
+
wrapped coroutine on the next cycle through the event loop.
|
| 186 |
+
The coroutine then has a chance to clean up or even deny
|
| 187 |
+
the request using try/except/finally.
|
| 188 |
+
|
| 189 |
+
Unlike Future.cancel, this does not guarantee that the
|
| 190 |
+
task will be cancelled: the exception might be caught and
|
| 191 |
+
acted upon, delaying cancellation of the task or preventing
|
| 192 |
+
cancellation completely. The task may also return a value or
|
| 193 |
+
raise a different exception.
|
| 194 |
+
|
| 195 |
+
Immediately after this method is called, Task.cancelled() will
|
| 196 |
+
not return True (unless the task was already cancelled). A
|
| 197 |
+
task will be marked as cancelled when the wrapped coroutine
|
| 198 |
+
terminates with a CancelledError exception (even if cancel()
|
| 199 |
+
was not called).
|
| 200 |
+
"""
|
| 201 |
+
self._log_traceback = False
|
| 202 |
+
if self.done():
|
| 203 |
+
return False
|
| 204 |
+
if self._fut_waiter is not None:
|
| 205 |
+
if self._fut_waiter.cancel(msg=msg):
|
| 206 |
+
# Leave self._fut_waiter; it may be a Task that
|
| 207 |
+
# catches and ignores the cancellation so we may have
|
| 208 |
+
# to cancel it again later.
|
| 209 |
+
return True
|
| 210 |
+
# It must be the case that self.__step is already scheduled.
|
| 211 |
+
self._must_cancel = True
|
| 212 |
+
self._cancel_message = msg
|
| 213 |
+
return True
|
| 214 |
+
|
| 215 |
+
def __step(self, exc=None):
|
| 216 |
+
if self.done():
|
| 217 |
+
raise exceptions.InvalidStateError(
|
| 218 |
+
f'_step(): already done: {self!r}, {exc!r}')
|
| 219 |
+
if self._must_cancel:
|
| 220 |
+
if not isinstance(exc, exceptions.CancelledError):
|
| 221 |
+
exc = self._make_cancelled_error()
|
| 222 |
+
self._must_cancel = False
|
| 223 |
+
coro = self._coro
|
| 224 |
+
self._fut_waiter = None
|
| 225 |
+
|
| 226 |
+
_enter_task(self._loop, self)
|
| 227 |
+
# Call either coro.throw(exc) or coro.send(None).
|
| 228 |
+
try:
|
| 229 |
+
if exc is None:
|
| 230 |
+
# We use the `send` method directly, because coroutines
|
| 231 |
+
# don't have `__iter__` and `__next__` methods.
|
| 232 |
+
result = coro.send(None)
|
| 233 |
+
else:
|
| 234 |
+
result = coro.throw(exc)
|
| 235 |
+
except StopIteration as exc:
|
| 236 |
+
if self._must_cancel:
|
| 237 |
+
# Task is cancelled right before coro stops.
|
| 238 |
+
self._must_cancel = False
|
| 239 |
+
super().cancel(msg=self._cancel_message)
|
| 240 |
+
else:
|
| 241 |
+
super().set_result(exc.value)
|
| 242 |
+
except exceptions.CancelledError as exc:
|
| 243 |
+
# Save the original exception so we can chain it later.
|
| 244 |
+
self._cancelled_exc = exc
|
| 245 |
+
super().cancel() # I.e., Future.cancel(self).
|
| 246 |
+
except (KeyboardInterrupt, SystemExit) as exc:
|
| 247 |
+
super().set_exception(exc)
|
| 248 |
+
raise
|
| 249 |
+
except BaseException as exc:
|
| 250 |
+
super().set_exception(exc)
|
| 251 |
+
else:
|
| 252 |
+
blocking = getattr(result, '_asyncio_future_blocking', None)
|
| 253 |
+
if blocking is not None:
|
| 254 |
+
# Yielded Future must come from Future.__iter__().
|
| 255 |
+
if futures._get_loop(result) is not self._loop:
|
| 256 |
+
new_exc = RuntimeError(
|
| 257 |
+
f'Task {self!r} got Future '
|
| 258 |
+
f'{result!r} attached to a different loop')
|
| 259 |
+
self._loop.call_soon(
|
| 260 |
+
self.__step, new_exc, context=self._context)
|
| 261 |
+
elif blocking:
|
| 262 |
+
if result is self:
|
| 263 |
+
new_exc = RuntimeError(
|
| 264 |
+
f'Task cannot await on itself: {self!r}')
|
| 265 |
+
self._loop.call_soon(
|
| 266 |
+
self.__step, new_exc, context=self._context)
|
| 267 |
+
else:
|
| 268 |
+
result._asyncio_future_blocking = False
|
| 269 |
+
result.add_done_callback(
|
| 270 |
+
self.__wakeup, context=self._context)
|
| 271 |
+
self._fut_waiter = result
|
| 272 |
+
if self._must_cancel:
|
| 273 |
+
if self._fut_waiter.cancel(
|
| 274 |
+
msg=self._cancel_message):
|
| 275 |
+
self._must_cancel = False
|
| 276 |
+
else:
|
| 277 |
+
new_exc = RuntimeError(
|
| 278 |
+
f'yield was used instead of yield from '
|
| 279 |
+
f'in task {self!r} with {result!r}')
|
| 280 |
+
self._loop.call_soon(
|
| 281 |
+
self.__step, new_exc, context=self._context)
|
| 282 |
+
|
| 283 |
+
elif result is None:
|
| 284 |
+
# Bare yield relinquishes control for one event loop iteration.
|
| 285 |
+
self._loop.call_soon(self.__step, context=self._context)
|
| 286 |
+
elif inspect.isgenerator(result):
|
| 287 |
+
# Yielding a generator is just wrong.
|
| 288 |
+
new_exc = RuntimeError(
|
| 289 |
+
f'yield was used instead of yield from for '
|
| 290 |
+
f'generator in task {self!r} with {result!r}')
|
| 291 |
+
self._loop.call_soon(
|
| 292 |
+
self.__step, new_exc, context=self._context)
|
| 293 |
+
else:
|
| 294 |
+
# Yielding something else is an error.
|
| 295 |
+
new_exc = RuntimeError(f'Task got bad yield: {result!r}')
|
| 296 |
+
self._loop.call_soon(
|
| 297 |
+
self.__step, new_exc, context=self._context)
|
| 298 |
+
finally:
|
| 299 |
+
_leave_task(self._loop, self)
|
| 300 |
+
self = None # Needed to break cycles when an exception occurs.
|
| 301 |
+
|
| 302 |
+
def __wakeup(self, future):
|
| 303 |
+
try:
|
| 304 |
+
future.result()
|
| 305 |
+
except BaseException as exc:
|
| 306 |
+
# This may also be a cancellation.
|
| 307 |
+
self.__step(exc)
|
| 308 |
+
else:
|
| 309 |
+
# Don't pass the value of `future.result()` explicitly,
|
| 310 |
+
# as `Future.__iter__` and `Future.__await__` don't need it.
|
| 311 |
+
# If we call `_step(value, None)` instead of `_step()`,
|
| 312 |
+
# Python eval loop would use `.send(value)` method call,
|
| 313 |
+
# instead of `__next__()`, which is slower for futures
|
| 314 |
+
# that return non-generator iterators from their `__iter__`.
|
| 315 |
+
self.__step()
|
| 316 |
+
self = None # Needed to break cycles when an exception occurs.
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
_PyTask = Task
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
try:
|
| 323 |
+
import _asyncio
|
| 324 |
+
except ImportError:
|
| 325 |
+
pass
|
| 326 |
+
else:
|
| 327 |
+
# _CTask is needed for tests.
|
| 328 |
+
Task = _CTask = _asyncio.Task
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def create_task(coro, *, name=None):
|
| 332 |
+
"""Schedule the execution of a coroutine object in a spawn task.
|
| 333 |
+
|
| 334 |
+
Return a Task object.
|
| 335 |
+
"""
|
| 336 |
+
loop = events.get_running_loop()
|
| 337 |
+
task = loop.create_task(coro)
|
| 338 |
+
_set_task_name(task, name)
|
| 339 |
+
return task
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
# wait() and as_completed() similar to those in PEP 3148.
|
| 343 |
+
|
| 344 |
+
FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED
|
| 345 |
+
FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION
|
| 346 |
+
ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
async def wait(fs, *, timeout=None, return_when=ALL_COMPLETED):
|
| 350 |
+
"""Wait for the Futures and coroutines given by fs to complete.
|
| 351 |
+
|
| 352 |
+
The fs iterable must not be empty.
|
| 353 |
+
|
| 354 |
+
Coroutines will be wrapped in Tasks.
|
| 355 |
+
|
| 356 |
+
Returns two sets of Future: (done, pending).
|
| 357 |
+
|
| 358 |
+
Usage:
|
| 359 |
+
|
| 360 |
+
done, pending = await asyncio.wait(fs)
|
| 361 |
+
|
| 362 |
+
Note: This does not raise TimeoutError! Futures that aren't done
|
| 363 |
+
when the timeout occurs are returned in the second set.
|
| 364 |
+
"""
|
| 365 |
+
if futures.isfuture(fs) or coroutines.iscoroutine(fs):
|
| 366 |
+
raise TypeError(f"expect a list of futures, not {type(fs).__name__}")
|
| 367 |
+
if not fs:
|
| 368 |
+
raise ValueError('Set of coroutines/Futures is empty.')
|
| 369 |
+
if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED):
|
| 370 |
+
raise ValueError(f'Invalid return_when value: {return_when}')
|
| 371 |
+
|
| 372 |
+
loop = events.get_running_loop()
|
| 373 |
+
|
| 374 |
+
fs = set(fs)
|
| 375 |
+
|
| 376 |
+
if any(coroutines.iscoroutine(f) for f in fs):
|
| 377 |
+
warnings.warn("The explicit passing of coroutine objects to "
|
| 378 |
+
"asyncio.wait() is deprecated since Python 3.8, and "
|
| 379 |
+
"scheduled for removal in Python 3.11.",
|
| 380 |
+
DeprecationWarning, stacklevel=2)
|
| 381 |
+
|
| 382 |
+
fs = {ensure_future(f, loop=loop) for f in fs}
|
| 383 |
+
|
| 384 |
+
return await _wait(fs, timeout, return_when, loop)
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
def _release_waiter(waiter, *args):
|
| 388 |
+
if not waiter.done():
|
| 389 |
+
waiter.set_result(None)
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
async def wait_for(fut, timeout):
|
| 393 |
+
"""Wait for the single Future or coroutine to complete, with timeout.
|
| 394 |
+
|
| 395 |
+
Coroutine will be wrapped in Task.
|
| 396 |
+
|
| 397 |
+
Returns result of the Future or coroutine. When a timeout occurs,
|
| 398 |
+
it cancels the task and raises TimeoutError. To avoid the task
|
| 399 |
+
cancellation, wrap it in shield().
|
| 400 |
+
|
| 401 |
+
If the wait is cancelled, the task is also cancelled.
|
| 402 |
+
|
| 403 |
+
This function is a coroutine.
|
| 404 |
+
"""
|
| 405 |
+
loop = events.get_running_loop()
|
| 406 |
+
|
| 407 |
+
if timeout is None:
|
| 408 |
+
return await fut
|
| 409 |
+
|
| 410 |
+
if timeout <= 0:
|
| 411 |
+
fut = ensure_future(fut, loop=loop)
|
| 412 |
+
|
| 413 |
+
if fut.done():
|
| 414 |
+
return fut.result()
|
| 415 |
+
|
| 416 |
+
await _cancel_and_wait(fut, loop=loop)
|
| 417 |
+
try:
|
| 418 |
+
return fut.result()
|
| 419 |
+
except exceptions.CancelledError as exc:
|
| 420 |
+
raise exceptions.TimeoutError() from exc
|
| 421 |
+
|
| 422 |
+
waiter = loop.create_future()
|
| 423 |
+
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
|
| 424 |
+
cb = functools.partial(_release_waiter, waiter)
|
| 425 |
+
|
| 426 |
+
fut = ensure_future(fut, loop=loop)
|
| 427 |
+
fut.add_done_callback(cb)
|
| 428 |
+
|
| 429 |
+
try:
|
| 430 |
+
# wait until the future completes or the timeout
|
| 431 |
+
try:
|
| 432 |
+
await waiter
|
| 433 |
+
except exceptions.CancelledError:
|
| 434 |
+
if fut.done():
|
| 435 |
+
return fut.result()
|
| 436 |
+
else:
|
| 437 |
+
fut.remove_done_callback(cb)
|
| 438 |
+
# We must ensure that the task is not running
|
| 439 |
+
# after wait_for() returns.
|
| 440 |
+
# See https://bugs.python.org/issue32751
|
| 441 |
+
await _cancel_and_wait(fut, loop=loop)
|
| 442 |
+
raise
|
| 443 |
+
|
| 444 |
+
if fut.done():
|
| 445 |
+
return fut.result()
|
| 446 |
+
else:
|
| 447 |
+
fut.remove_done_callback(cb)
|
| 448 |
+
# We must ensure that the task is not running
|
| 449 |
+
# after wait_for() returns.
|
| 450 |
+
# See https://bugs.python.org/issue32751
|
| 451 |
+
await _cancel_and_wait(fut, loop=loop)
|
| 452 |
+
# In case task cancellation failed with some
|
| 453 |
+
# exception, we should re-raise it
|
| 454 |
+
# See https://bugs.python.org/issue40607
|
| 455 |
+
try:
|
| 456 |
+
return fut.result()
|
| 457 |
+
except exceptions.CancelledError as exc:
|
| 458 |
+
raise exceptions.TimeoutError() from exc
|
| 459 |
+
finally:
|
| 460 |
+
timeout_handle.cancel()
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
async def _wait(fs, timeout, return_when, loop):
|
| 464 |
+
"""Internal helper for wait().
|
| 465 |
+
|
| 466 |
+
The fs argument must be a collection of Futures.
|
| 467 |
+
"""
|
| 468 |
+
assert fs, 'Set of Futures is empty.'
|
| 469 |
+
waiter = loop.create_future()
|
| 470 |
+
timeout_handle = None
|
| 471 |
+
if timeout is not None:
|
| 472 |
+
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
|
| 473 |
+
counter = len(fs)
|
| 474 |
+
|
| 475 |
+
def _on_completion(f):
|
| 476 |
+
nonlocal counter
|
| 477 |
+
counter -= 1
|
| 478 |
+
if (counter <= 0 or
|
| 479 |
+
return_when == FIRST_COMPLETED or
|
| 480 |
+
return_when == FIRST_EXCEPTION and (not f.cancelled() and
|
| 481 |
+
f.exception() is not None)):
|
| 482 |
+
if timeout_handle is not None:
|
| 483 |
+
timeout_handle.cancel()
|
| 484 |
+
if not waiter.done():
|
| 485 |
+
waiter.set_result(None)
|
| 486 |
+
|
| 487 |
+
for f in fs:
|
| 488 |
+
f.add_done_callback(_on_completion)
|
| 489 |
+
|
| 490 |
+
try:
|
| 491 |
+
await waiter
|
| 492 |
+
finally:
|
| 493 |
+
if timeout_handle is not None:
|
| 494 |
+
timeout_handle.cancel()
|
| 495 |
+
for f in fs:
|
| 496 |
+
f.remove_done_callback(_on_completion)
|
| 497 |
+
|
| 498 |
+
done, pending = set(), set()
|
| 499 |
+
for f in fs:
|
| 500 |
+
if f.done():
|
| 501 |
+
done.add(f)
|
| 502 |
+
else:
|
| 503 |
+
pending.add(f)
|
| 504 |
+
return done, pending
|
| 505 |
+
|
| 506 |
+
|
| 507 |
+
async def _cancel_and_wait(fut, loop):
|
| 508 |
+
"""Cancel the *fut* future or task and wait until it completes."""
|
| 509 |
+
|
| 510 |
+
waiter = loop.create_future()
|
| 511 |
+
cb = functools.partial(_release_waiter, waiter)
|
| 512 |
+
fut.add_done_callback(cb)
|
| 513 |
+
|
| 514 |
+
try:
|
| 515 |
+
fut.cancel()
|
| 516 |
+
# We cannot wait on *fut* directly to make
|
| 517 |
+
# sure _cancel_and_wait itself is reliably cancellable.
|
| 518 |
+
await waiter
|
| 519 |
+
finally:
|
| 520 |
+
fut.remove_done_callback(cb)
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
# This is *not* a @coroutine! It is just an iterator (yielding Futures).
|
| 524 |
+
def as_completed(fs, *, timeout=None):
|
| 525 |
+
"""Return an iterator whose values are coroutines.
|
| 526 |
+
|
| 527 |
+
When waiting for the yielded coroutines you'll get the results (or
|
| 528 |
+
exceptions!) of the original Futures (or coroutines), in the order
|
| 529 |
+
in which and as soon as they complete.
|
| 530 |
+
|
| 531 |
+
This differs from PEP 3148; the proper way to use this is:
|
| 532 |
+
|
| 533 |
+
for f in as_completed(fs):
|
| 534 |
+
result = await f # The 'await' may raise.
|
| 535 |
+
# Use result.
|
| 536 |
+
|
| 537 |
+
If a timeout is specified, the 'await' will raise
|
| 538 |
+
TimeoutError when the timeout occurs before all Futures are done.
|
| 539 |
+
|
| 540 |
+
Note: The futures 'f' are not necessarily members of fs.
|
| 541 |
+
"""
|
| 542 |
+
if futures.isfuture(fs) or coroutines.iscoroutine(fs):
|
| 543 |
+
raise TypeError(f"expect an iterable of futures, not {type(fs).__name__}")
|
| 544 |
+
|
| 545 |
+
from .queues import Queue # Import here to avoid circular import problem.
|
| 546 |
+
done = Queue()
|
| 547 |
+
|
| 548 |
+
loop = events._get_event_loop()
|
| 549 |
+
todo = {ensure_future(f, loop=loop) for f in set(fs)}
|
| 550 |
+
timeout_handle = None
|
| 551 |
+
|
| 552 |
+
def _on_timeout():
|
| 553 |
+
for f in todo:
|
| 554 |
+
f.remove_done_callback(_on_completion)
|
| 555 |
+
done.put_nowait(None) # Queue a dummy value for _wait_for_one().
|
| 556 |
+
todo.clear() # Can't do todo.remove(f) in the loop.
|
| 557 |
+
|
| 558 |
+
def _on_completion(f):
|
| 559 |
+
if not todo:
|
| 560 |
+
return # _on_timeout() was here first.
|
| 561 |
+
todo.remove(f)
|
| 562 |
+
done.put_nowait(f)
|
| 563 |
+
if not todo and timeout_handle is not None:
|
| 564 |
+
timeout_handle.cancel()
|
| 565 |
+
|
| 566 |
+
async def _wait_for_one():
|
| 567 |
+
f = await done.get()
|
| 568 |
+
if f is None:
|
| 569 |
+
# Dummy value from _on_timeout().
|
| 570 |
+
raise exceptions.TimeoutError
|
| 571 |
+
return f.result() # May raise f.exception().
|
| 572 |
+
|
| 573 |
+
for f in todo:
|
| 574 |
+
f.add_done_callback(_on_completion)
|
| 575 |
+
if todo and timeout is not None:
|
| 576 |
+
timeout_handle = loop.call_later(timeout, _on_timeout)
|
| 577 |
+
for _ in range(len(todo)):
|
| 578 |
+
yield _wait_for_one()
|
| 579 |
+
|
| 580 |
+
|
| 581 |
+
@types.coroutine
|
| 582 |
+
def __sleep0():
|
| 583 |
+
"""Skip one event loop run cycle.
|
| 584 |
+
|
| 585 |
+
This is a private helper for 'asyncio.sleep()', used
|
| 586 |
+
when the 'delay' is set to 0. It uses a bare 'yield'
|
| 587 |
+
expression (which Task.__step knows how to handle)
|
| 588 |
+
instead of creating a Future object.
|
| 589 |
+
"""
|
| 590 |
+
yield
|
| 591 |
+
|
| 592 |
+
|
| 593 |
+
async def sleep(delay, result=None):
|
| 594 |
+
"""Coroutine that completes after a given time (in seconds)."""
|
| 595 |
+
if delay <= 0:
|
| 596 |
+
await __sleep0()
|
| 597 |
+
return result
|
| 598 |
+
|
| 599 |
+
loop = events.get_running_loop()
|
| 600 |
+
future = loop.create_future()
|
| 601 |
+
h = loop.call_later(delay,
|
| 602 |
+
futures._set_result_unless_cancelled,
|
| 603 |
+
future, result)
|
| 604 |
+
try:
|
| 605 |
+
return await future
|
| 606 |
+
finally:
|
| 607 |
+
h.cancel()
|
| 608 |
+
|
| 609 |
+
|
| 610 |
+
def ensure_future(coro_or_future, *, loop=None):
|
| 611 |
+
"""Wrap a coroutine or an awaitable in a future.
|
| 612 |
+
|
| 613 |
+
If the argument is a Future, it is returned directly.
|
| 614 |
+
"""
|
| 615 |
+
return _ensure_future(coro_or_future, loop=loop)
|
| 616 |
+
|
| 617 |
+
|
| 618 |
+
def _ensure_future(coro_or_future, *, loop=None):
|
| 619 |
+
if futures.isfuture(coro_or_future):
|
| 620 |
+
if loop is not None and loop is not futures._get_loop(coro_or_future):
|
| 621 |
+
raise ValueError('The future belongs to a different loop than '
|
| 622 |
+
'the one specified as the loop argument')
|
| 623 |
+
return coro_or_future
|
| 624 |
+
called_wrap_awaitable = False
|
| 625 |
+
if not coroutines.iscoroutine(coro_or_future):
|
| 626 |
+
if inspect.isawaitable(coro_or_future):
|
| 627 |
+
coro_or_future = _wrap_awaitable(coro_or_future)
|
| 628 |
+
called_wrap_awaitable = True
|
| 629 |
+
else:
|
| 630 |
+
raise TypeError('An asyncio.Future, a coroutine or an awaitable '
|
| 631 |
+
'is required')
|
| 632 |
+
|
| 633 |
+
if loop is None:
|
| 634 |
+
loop = events._get_event_loop(stacklevel=4)
|
| 635 |
+
try:
|
| 636 |
+
return loop.create_task(coro_or_future)
|
| 637 |
+
except RuntimeError:
|
| 638 |
+
if not called_wrap_awaitable:
|
| 639 |
+
coro_or_future.close()
|
| 640 |
+
raise
|
| 641 |
+
|
| 642 |
+
|
| 643 |
+
@types.coroutine
|
| 644 |
+
def _wrap_awaitable(awaitable):
|
| 645 |
+
"""Helper for asyncio.ensure_future().
|
| 646 |
+
|
| 647 |
+
Wraps awaitable (an object with __await__) into a coroutine
|
| 648 |
+
that will later be wrapped in a Task by ensure_future().
|
| 649 |
+
"""
|
| 650 |
+
return (yield from awaitable.__await__())
|
| 651 |
+
|
| 652 |
+
_wrap_awaitable._is_coroutine = _is_coroutine
|
| 653 |
+
|
| 654 |
+
|
| 655 |
+
class _GatheringFuture(futures.Future):
|
| 656 |
+
"""Helper for gather().
|
| 657 |
+
|
| 658 |
+
This overrides cancel() to cancel all the children and act more
|
| 659 |
+
like Task.cancel(), which doesn't immediately mark itself as
|
| 660 |
+
cancelled.
|
| 661 |
+
"""
|
| 662 |
+
|
| 663 |
+
def __init__(self, children, *, loop):
|
| 664 |
+
assert loop is not None
|
| 665 |
+
super().__init__(loop=loop)
|
| 666 |
+
self._children = children
|
| 667 |
+
self._cancel_requested = False
|
| 668 |
+
|
| 669 |
+
def cancel(self, msg=None):
|
| 670 |
+
if self.done():
|
| 671 |
+
return False
|
| 672 |
+
ret = False
|
| 673 |
+
for child in self._children:
|
| 674 |
+
if child.cancel(msg=msg):
|
| 675 |
+
ret = True
|
| 676 |
+
if ret:
|
| 677 |
+
# If any child tasks were actually cancelled, we should
|
| 678 |
+
# propagate the cancellation request regardless of
|
| 679 |
+
# *return_exceptions* argument. See issue 32684.
|
| 680 |
+
self._cancel_requested = True
|
| 681 |
+
return ret
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
def gather(*coros_or_futures, return_exceptions=False):
|
| 685 |
+
"""Return a future aggregating results from the given coroutines/futures.
|
| 686 |
+
|
| 687 |
+
Coroutines will be wrapped in a future and scheduled in the event
|
| 688 |
+
loop. They will not necessarily be scheduled in the same order as
|
| 689 |
+
passed in.
|
| 690 |
+
|
| 691 |
+
All futures must share the same event loop. If all the tasks are
|
| 692 |
+
done successfully, the returned future's result is the list of
|
| 693 |
+
results (in the order of the original sequence, not necessarily
|
| 694 |
+
the order of results arrival). If *return_exceptions* is True,
|
| 695 |
+
exceptions in the tasks are treated the same as successful
|
| 696 |
+
results, and gathered in the result list; otherwise, the first
|
| 697 |
+
raised exception will be immediately propagated to the returned
|
| 698 |
+
future.
|
| 699 |
+
|
| 700 |
+
Cancellation: if the outer Future is cancelled, all children (that
|
| 701 |
+
have not completed yet) are also cancelled. If any child is
|
| 702 |
+
cancelled, this is treated as if it raised CancelledError --
|
| 703 |
+
the outer Future is *not* cancelled in this case. (This is to
|
| 704 |
+
prevent the cancellation of one child to cause other children to
|
| 705 |
+
be cancelled.)
|
| 706 |
+
|
| 707 |
+
If *return_exceptions* is False, cancelling gather() after it
|
| 708 |
+
has been marked done won't cancel any submitted awaitables.
|
| 709 |
+
For instance, gather can be marked done after propagating an
|
| 710 |
+
exception to the caller, therefore, calling ``gather.cancel()``
|
| 711 |
+
after catching an exception (raised by one of the awaitables) from
|
| 712 |
+
gather won't cancel any other awaitables.
|
| 713 |
+
"""
|
| 714 |
+
if not coros_or_futures:
|
| 715 |
+
loop = events._get_event_loop()
|
| 716 |
+
outer = loop.create_future()
|
| 717 |
+
outer.set_result([])
|
| 718 |
+
return outer
|
| 719 |
+
|
| 720 |
+
def _done_callback(fut):
|
| 721 |
+
nonlocal nfinished
|
| 722 |
+
nfinished += 1
|
| 723 |
+
|
| 724 |
+
if outer is None or outer.done():
|
| 725 |
+
if not fut.cancelled():
|
| 726 |
+
# Mark exception retrieved.
|
| 727 |
+
fut.exception()
|
| 728 |
+
return
|
| 729 |
+
|
| 730 |
+
if not return_exceptions:
|
| 731 |
+
if fut.cancelled():
|
| 732 |
+
# Check if 'fut' is cancelled first, as
|
| 733 |
+
# 'fut.exception()' will *raise* a CancelledError
|
| 734 |
+
# instead of returning it.
|
| 735 |
+
exc = fut._make_cancelled_error()
|
| 736 |
+
outer.set_exception(exc)
|
| 737 |
+
return
|
| 738 |
+
else:
|
| 739 |
+
exc = fut.exception()
|
| 740 |
+
if exc is not None:
|
| 741 |
+
outer.set_exception(exc)
|
| 742 |
+
return
|
| 743 |
+
|
| 744 |
+
if nfinished == nfuts:
|
| 745 |
+
# All futures are done; create a list of results
|
| 746 |
+
# and set it to the 'outer' future.
|
| 747 |
+
results = []
|
| 748 |
+
|
| 749 |
+
for fut in children:
|
| 750 |
+
if fut.cancelled():
|
| 751 |
+
# Check if 'fut' is cancelled first, as 'fut.exception()'
|
| 752 |
+
# will *raise* a CancelledError instead of returning it.
|
| 753 |
+
# Also, since we're adding the exception return value
|
| 754 |
+
# to 'results' instead of raising it, don't bother
|
| 755 |
+
# setting __context__. This also lets us preserve
|
| 756 |
+
# calling '_make_cancelled_error()' at most once.
|
| 757 |
+
res = exceptions.CancelledError(
|
| 758 |
+
'' if fut._cancel_message is None else
|
| 759 |
+
fut._cancel_message)
|
| 760 |
+
else:
|
| 761 |
+
res = fut.exception()
|
| 762 |
+
if res is None:
|
| 763 |
+
res = fut.result()
|
| 764 |
+
results.append(res)
|
| 765 |
+
|
| 766 |
+
if outer._cancel_requested:
|
| 767 |
+
# If gather is being cancelled we must propagate the
|
| 768 |
+
# cancellation regardless of *return_exceptions* argument.
|
| 769 |
+
# See issue 32684.
|
| 770 |
+
exc = fut._make_cancelled_error()
|
| 771 |
+
outer.set_exception(exc)
|
| 772 |
+
else:
|
| 773 |
+
outer.set_result(results)
|
| 774 |
+
|
| 775 |
+
arg_to_fut = {}
|
| 776 |
+
children = []
|
| 777 |
+
nfuts = 0
|
| 778 |
+
nfinished = 0
|
| 779 |
+
loop = None
|
| 780 |
+
outer = None # bpo-46672
|
| 781 |
+
for arg in coros_or_futures:
|
| 782 |
+
if arg not in arg_to_fut:
|
| 783 |
+
fut = _ensure_future(arg, loop=loop)
|
| 784 |
+
if loop is None:
|
| 785 |
+
loop = futures._get_loop(fut)
|
| 786 |
+
if fut is not arg:
|
| 787 |
+
# 'arg' was not a Future, therefore, 'fut' is a new
|
| 788 |
+
# Future created specifically for 'arg'. Since the caller
|
| 789 |
+
# can't control it, disable the "destroy pending task"
|
| 790 |
+
# warning.
|
| 791 |
+
fut._log_destroy_pending = False
|
| 792 |
+
|
| 793 |
+
nfuts += 1
|
| 794 |
+
arg_to_fut[arg] = fut
|
| 795 |
+
fut.add_done_callback(_done_callback)
|
| 796 |
+
|
| 797 |
+
else:
|
| 798 |
+
# There's a duplicate Future object in coros_or_futures.
|
| 799 |
+
fut = arg_to_fut[arg]
|
| 800 |
+
|
| 801 |
+
children.append(fut)
|
| 802 |
+
|
| 803 |
+
outer = _GatheringFuture(children, loop=loop)
|
| 804 |
+
return outer
|
| 805 |
+
|
| 806 |
+
|
| 807 |
+
def shield(arg):
|
| 808 |
+
"""Wait for a future, shielding it from cancellation.
|
| 809 |
+
|
| 810 |
+
The statement
|
| 811 |
+
|
| 812 |
+
task = asyncio.create_task(something())
|
| 813 |
+
res = await shield(task)
|
| 814 |
+
|
| 815 |
+
is exactly equivalent to the statement
|
| 816 |
+
|
| 817 |
+
res = await something()
|
| 818 |
+
|
| 819 |
+
*except* that if the coroutine containing it is cancelled, the
|
| 820 |
+
task running in something() is not cancelled. From the POV of
|
| 821 |
+
something(), the cancellation did not happen. But its caller is
|
| 822 |
+
still cancelled, so the yield-from expression still raises
|
| 823 |
+
CancelledError. Note: If something() is cancelled by other means
|
| 824 |
+
this will still cancel shield().
|
| 825 |
+
|
| 826 |
+
If you want to completely ignore cancellation (not recommended)
|
| 827 |
+
you can combine shield() with a try/except clause, as follows:
|
| 828 |
+
|
| 829 |
+
task = asyncio.create_task(something())
|
| 830 |
+
try:
|
| 831 |
+
res = await shield(task)
|
| 832 |
+
except CancelledError:
|
| 833 |
+
res = None
|
| 834 |
+
|
| 835 |
+
Save a reference to tasks passed to this function, to avoid
|
| 836 |
+
a task disappearing mid-execution. The event loop only keeps
|
| 837 |
+
weak references to tasks. A task that isn't referenced elsewhere
|
| 838 |
+
may get garbage collected at any time, even before it's done.
|
| 839 |
+
"""
|
| 840 |
+
inner = _ensure_future(arg)
|
| 841 |
+
if inner.done():
|
| 842 |
+
# Shortcut.
|
| 843 |
+
return inner
|
| 844 |
+
loop = futures._get_loop(inner)
|
| 845 |
+
outer = loop.create_future()
|
| 846 |
+
|
| 847 |
+
def _inner_done_callback(inner):
|
| 848 |
+
if outer.cancelled():
|
| 849 |
+
if not inner.cancelled():
|
| 850 |
+
# Mark inner's result as retrieved.
|
| 851 |
+
inner.exception()
|
| 852 |
+
return
|
| 853 |
+
|
| 854 |
+
if inner.cancelled():
|
| 855 |
+
outer.cancel()
|
| 856 |
+
else:
|
| 857 |
+
exc = inner.exception()
|
| 858 |
+
if exc is not None:
|
| 859 |
+
outer.set_exception(exc)
|
| 860 |
+
else:
|
| 861 |
+
outer.set_result(inner.result())
|
| 862 |
+
|
| 863 |
+
|
| 864 |
+
def _outer_done_callback(outer):
|
| 865 |
+
if not inner.done():
|
| 866 |
+
inner.remove_done_callback(_inner_done_callback)
|
| 867 |
+
|
| 868 |
+
inner.add_done_callback(_inner_done_callback)
|
| 869 |
+
outer.add_done_callback(_outer_done_callback)
|
| 870 |
+
return outer
|
| 871 |
+
|
| 872 |
+
|
| 873 |
+
def run_coroutine_threadsafe(coro, loop):
|
| 874 |
+
"""Submit a coroutine object to a given event loop.
|
| 875 |
+
|
| 876 |
+
Return a concurrent.futures.Future to access the result.
|
| 877 |
+
"""
|
| 878 |
+
if not coroutines.iscoroutine(coro):
|
| 879 |
+
raise TypeError('A coroutine object is required')
|
| 880 |
+
future = concurrent.futures.Future()
|
| 881 |
+
|
| 882 |
+
def callback():
|
| 883 |
+
try:
|
| 884 |
+
futures._chain_future(ensure_future(coro, loop=loop), future)
|
| 885 |
+
except (SystemExit, KeyboardInterrupt):
|
| 886 |
+
raise
|
| 887 |
+
except BaseException as exc:
|
| 888 |
+
if future.set_running_or_notify_cancel():
|
| 889 |
+
future.set_exception(exc)
|
| 890 |
+
raise
|
| 891 |
+
|
| 892 |
+
loop.call_soon_threadsafe(callback)
|
| 893 |
+
return future
|
| 894 |
+
|
| 895 |
+
|
| 896 |
+
# WeakSet containing all alive tasks.
|
| 897 |
+
_all_tasks = weakref.WeakSet()
|
| 898 |
+
|
| 899 |
+
# Dictionary containing tasks that are currently active in
|
| 900 |
+
# all running event loops. {EventLoop: Task}
|
| 901 |
+
_current_tasks = {}
|
| 902 |
+
|
| 903 |
+
|
| 904 |
+
def _register_task(task):
|
| 905 |
+
"""Register a new task in asyncio as executed by loop."""
|
| 906 |
+
_all_tasks.add(task)
|
| 907 |
+
|
| 908 |
+
|
| 909 |
+
def _enter_task(loop, task):
|
| 910 |
+
current_task = _current_tasks.get(loop)
|
| 911 |
+
if current_task is not None:
|
| 912 |
+
raise RuntimeError(f"Cannot enter into task {task!r} while another "
|
| 913 |
+
f"task {current_task!r} is being executed.")
|
| 914 |
+
_current_tasks[loop] = task
|
| 915 |
+
|
| 916 |
+
|
| 917 |
+
def _leave_task(loop, task):
|
| 918 |
+
current_task = _current_tasks.get(loop)
|
| 919 |
+
if current_task is not task:
|
| 920 |
+
raise RuntimeError(f"Leaving task {task!r} does not match "
|
| 921 |
+
f"the current task {current_task!r}.")
|
| 922 |
+
del _current_tasks[loop]
|
| 923 |
+
|
| 924 |
+
|
| 925 |
+
def _unregister_task(task):
|
| 926 |
+
"""Unregister a task."""
|
| 927 |
+
_all_tasks.discard(task)
|
| 928 |
+
|
| 929 |
+
|
| 930 |
+
_py_register_task = _register_task
|
| 931 |
+
_py_unregister_task = _unregister_task
|
| 932 |
+
_py_enter_task = _enter_task
|
| 933 |
+
_py_leave_task = _leave_task
|
| 934 |
+
|
| 935 |
+
|
| 936 |
+
try:
|
| 937 |
+
from _asyncio import (_register_task, _unregister_task,
|
| 938 |
+
_enter_task, _leave_task,
|
| 939 |
+
_all_tasks, _current_tasks)
|
| 940 |
+
except ImportError:
|
| 941 |
+
pass
|
| 942 |
+
else:
|
| 943 |
+
_c_register_task = _register_task
|
| 944 |
+
_c_unregister_task = _unregister_task
|
| 945 |
+
_c_enter_task = _enter_task
|
| 946 |
+
_c_leave_task = _leave_task
|
omnilmm/lib/python3.10/asyncio/threads.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""High-level support for working with threads in asyncio"""
|
| 2 |
+
|
| 3 |
+
import functools
|
| 4 |
+
import contextvars
|
| 5 |
+
|
| 6 |
+
from . import events
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
__all__ = "to_thread",
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
async def to_thread(func, /, *args, **kwargs):
|
| 13 |
+
"""Asynchronously run function *func* in a separate thread.
|
| 14 |
+
|
| 15 |
+
Any *args and **kwargs supplied for this function are directly passed
|
| 16 |
+
to *func*. Also, the current :class:`contextvars.Context` is propagated,
|
| 17 |
+
allowing context variables from the main thread to be accessed in the
|
| 18 |
+
separate thread.
|
| 19 |
+
|
| 20 |
+
Return a coroutine that can be awaited to get the eventual result of *func*.
|
| 21 |
+
"""
|
| 22 |
+
loop = events.get_running_loop()
|
| 23 |
+
ctx = contextvars.copy_context()
|
| 24 |
+
func_call = functools.partial(ctx.run, func, *args, **kwargs)
|
| 25 |
+
return await loop.run_in_executor(None, func_call)
|
omnilmm/lib/python3.10/asyncio/transports.py
ADDED
|
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Abstract Transport class."""
|
| 2 |
+
|
| 3 |
+
__all__ = (
|
| 4 |
+
'BaseTransport', 'ReadTransport', 'WriteTransport',
|
| 5 |
+
'Transport', 'DatagramTransport', 'SubprocessTransport',
|
| 6 |
+
)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class BaseTransport:
|
| 10 |
+
"""Base class for transports."""
|
| 11 |
+
|
| 12 |
+
__slots__ = ('_extra',)
|
| 13 |
+
|
| 14 |
+
def __init__(self, extra=None):
|
| 15 |
+
if extra is None:
|
| 16 |
+
extra = {}
|
| 17 |
+
self._extra = extra
|
| 18 |
+
|
| 19 |
+
def get_extra_info(self, name, default=None):
|
| 20 |
+
"""Get optional transport information."""
|
| 21 |
+
return self._extra.get(name, default)
|
| 22 |
+
|
| 23 |
+
def is_closing(self):
|
| 24 |
+
"""Return True if the transport is closing or closed."""
|
| 25 |
+
raise NotImplementedError
|
| 26 |
+
|
| 27 |
+
def close(self):
|
| 28 |
+
"""Close the transport.
|
| 29 |
+
|
| 30 |
+
Buffered data will be flushed asynchronously. No more data
|
| 31 |
+
will be received. After all buffered data is flushed, the
|
| 32 |
+
protocol's connection_lost() method will (eventually) be
|
| 33 |
+
called with None as its argument.
|
| 34 |
+
"""
|
| 35 |
+
raise NotImplementedError
|
| 36 |
+
|
| 37 |
+
def set_protocol(self, protocol):
|
| 38 |
+
"""Set a new protocol."""
|
| 39 |
+
raise NotImplementedError
|
| 40 |
+
|
| 41 |
+
def get_protocol(self):
|
| 42 |
+
"""Return the current protocol."""
|
| 43 |
+
raise NotImplementedError
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class ReadTransport(BaseTransport):
|
| 47 |
+
"""Interface for read-only transports."""
|
| 48 |
+
|
| 49 |
+
__slots__ = ()
|
| 50 |
+
|
| 51 |
+
def is_reading(self):
|
| 52 |
+
"""Return True if the transport is receiving."""
|
| 53 |
+
raise NotImplementedError
|
| 54 |
+
|
| 55 |
+
def pause_reading(self):
|
| 56 |
+
"""Pause the receiving end.
|
| 57 |
+
|
| 58 |
+
No data will be passed to the protocol's data_received()
|
| 59 |
+
method until resume_reading() is called.
|
| 60 |
+
"""
|
| 61 |
+
raise NotImplementedError
|
| 62 |
+
|
| 63 |
+
def resume_reading(self):
|
| 64 |
+
"""Resume the receiving end.
|
| 65 |
+
|
| 66 |
+
Data received will once again be passed to the protocol's
|
| 67 |
+
data_received() method.
|
| 68 |
+
"""
|
| 69 |
+
raise NotImplementedError
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class WriteTransport(BaseTransport):
|
| 73 |
+
"""Interface for write-only transports."""
|
| 74 |
+
|
| 75 |
+
__slots__ = ()
|
| 76 |
+
|
| 77 |
+
def set_write_buffer_limits(self, high=None, low=None):
|
| 78 |
+
"""Set the high- and low-water limits for write flow control.
|
| 79 |
+
|
| 80 |
+
These two values control when to call the protocol's
|
| 81 |
+
pause_writing() and resume_writing() methods. If specified,
|
| 82 |
+
the low-water limit must be less than or equal to the
|
| 83 |
+
high-water limit. Neither value can be negative.
|
| 84 |
+
|
| 85 |
+
The defaults are implementation-specific. If only the
|
| 86 |
+
high-water limit is given, the low-water limit defaults to an
|
| 87 |
+
implementation-specific value less than or equal to the
|
| 88 |
+
high-water limit. Setting high to zero forces low to zero as
|
| 89 |
+
well, and causes pause_writing() to be called whenever the
|
| 90 |
+
buffer becomes non-empty. Setting low to zero causes
|
| 91 |
+
resume_writing() to be called only once the buffer is empty.
|
| 92 |
+
Use of zero for either limit is generally sub-optimal as it
|
| 93 |
+
reduces opportunities for doing I/O and computation
|
| 94 |
+
concurrently.
|
| 95 |
+
"""
|
| 96 |
+
raise NotImplementedError
|
| 97 |
+
|
| 98 |
+
def get_write_buffer_size(self):
|
| 99 |
+
"""Return the current size of the write buffer."""
|
| 100 |
+
raise NotImplementedError
|
| 101 |
+
|
| 102 |
+
def get_write_buffer_limits(self):
|
| 103 |
+
"""Get the high and low watermarks for write flow control.
|
| 104 |
+
Return a tuple (low, high) where low and high are
|
| 105 |
+
positive number of bytes."""
|
| 106 |
+
raise NotImplementedError
|
| 107 |
+
|
| 108 |
+
def write(self, data):
|
| 109 |
+
"""Write some data bytes to the transport.
|
| 110 |
+
|
| 111 |
+
This does not block; it buffers the data and arranges for it
|
| 112 |
+
to be sent out asynchronously.
|
| 113 |
+
"""
|
| 114 |
+
raise NotImplementedError
|
| 115 |
+
|
| 116 |
+
def writelines(self, list_of_data):
|
| 117 |
+
"""Write a list (or any iterable) of data bytes to the transport.
|
| 118 |
+
|
| 119 |
+
The default implementation concatenates the arguments and
|
| 120 |
+
calls write() on the result.
|
| 121 |
+
"""
|
| 122 |
+
data = b''.join(list_of_data)
|
| 123 |
+
self.write(data)
|
| 124 |
+
|
| 125 |
+
def write_eof(self):
|
| 126 |
+
"""Close the write end after flushing buffered data.
|
| 127 |
+
|
| 128 |
+
(This is like typing ^D into a UNIX program reading from stdin.)
|
| 129 |
+
|
| 130 |
+
Data may still be received.
|
| 131 |
+
"""
|
| 132 |
+
raise NotImplementedError
|
| 133 |
+
|
| 134 |
+
def can_write_eof(self):
|
| 135 |
+
"""Return True if this transport supports write_eof(), False if not."""
|
| 136 |
+
raise NotImplementedError
|
| 137 |
+
|
| 138 |
+
def abort(self):
|
| 139 |
+
"""Close the transport immediately.
|
| 140 |
+
|
| 141 |
+
Buffered data will be lost. No more data will be received.
|
| 142 |
+
The protocol's connection_lost() method will (eventually) be
|
| 143 |
+
called with None as its argument.
|
| 144 |
+
"""
|
| 145 |
+
raise NotImplementedError
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
class Transport(ReadTransport, WriteTransport):
|
| 149 |
+
"""Interface representing a bidirectional transport.
|
| 150 |
+
|
| 151 |
+
There may be several implementations, but typically, the user does
|
| 152 |
+
not implement new transports; rather, the platform provides some
|
| 153 |
+
useful transports that are implemented using the platform's best
|
| 154 |
+
practices.
|
| 155 |
+
|
| 156 |
+
The user never instantiates a transport directly; they call a
|
| 157 |
+
utility function, passing it a protocol factory and other
|
| 158 |
+
information necessary to create the transport and protocol. (E.g.
|
| 159 |
+
EventLoop.create_connection() or EventLoop.create_server().)
|
| 160 |
+
|
| 161 |
+
The utility function will asynchronously create a transport and a
|
| 162 |
+
protocol and hook them up by calling the protocol's
|
| 163 |
+
connection_made() method, passing it the transport.
|
| 164 |
+
|
| 165 |
+
The implementation here raises NotImplemented for every method
|
| 166 |
+
except writelines(), which calls write() in a loop.
|
| 167 |
+
"""
|
| 168 |
+
|
| 169 |
+
__slots__ = ()
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
class DatagramTransport(BaseTransport):
|
| 173 |
+
"""Interface for datagram (UDP) transports."""
|
| 174 |
+
|
| 175 |
+
__slots__ = ()
|
| 176 |
+
|
| 177 |
+
def sendto(self, data, addr=None):
|
| 178 |
+
"""Send data to the transport.
|
| 179 |
+
|
| 180 |
+
This does not block; it buffers the data and arranges for it
|
| 181 |
+
to be sent out asynchronously.
|
| 182 |
+
addr is target socket address.
|
| 183 |
+
If addr is None use target address pointed on transport creation.
|
| 184 |
+
"""
|
| 185 |
+
raise NotImplementedError
|
| 186 |
+
|
| 187 |
+
def abort(self):
|
| 188 |
+
"""Close the transport immediately.
|
| 189 |
+
|
| 190 |
+
Buffered data will be lost. No more data will be received.
|
| 191 |
+
The protocol's connection_lost() method will (eventually) be
|
| 192 |
+
called with None as its argument.
|
| 193 |
+
"""
|
| 194 |
+
raise NotImplementedError
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
class SubprocessTransport(BaseTransport):
|
| 198 |
+
|
| 199 |
+
__slots__ = ()
|
| 200 |
+
|
| 201 |
+
def get_pid(self):
|
| 202 |
+
"""Get subprocess id."""
|
| 203 |
+
raise NotImplementedError
|
| 204 |
+
|
| 205 |
+
def get_returncode(self):
|
| 206 |
+
"""Get subprocess returncode.
|
| 207 |
+
|
| 208 |
+
See also
|
| 209 |
+
http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
|
| 210 |
+
"""
|
| 211 |
+
raise NotImplementedError
|
| 212 |
+
|
| 213 |
+
def get_pipe_transport(self, fd):
|
| 214 |
+
"""Get transport for pipe with number fd."""
|
| 215 |
+
raise NotImplementedError
|
| 216 |
+
|
| 217 |
+
def send_signal(self, signal):
|
| 218 |
+
"""Send signal to subprocess.
|
| 219 |
+
|
| 220 |
+
See also:
|
| 221 |
+
docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
|
| 222 |
+
"""
|
| 223 |
+
raise NotImplementedError
|
| 224 |
+
|
| 225 |
+
def terminate(self):
|
| 226 |
+
"""Stop the subprocess.
|
| 227 |
+
|
| 228 |
+
Alias for close() method.
|
| 229 |
+
|
| 230 |
+
On Posix OSs the method sends SIGTERM to the subprocess.
|
| 231 |
+
On Windows the Win32 API function TerminateProcess()
|
| 232 |
+
is called to stop the subprocess.
|
| 233 |
+
|
| 234 |
+
See also:
|
| 235 |
+
http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
|
| 236 |
+
"""
|
| 237 |
+
raise NotImplementedError
|
| 238 |
+
|
| 239 |
+
def kill(self):
|
| 240 |
+
"""Kill the subprocess.
|
| 241 |
+
|
| 242 |
+
On Posix OSs the function sends SIGKILL to the subprocess.
|
| 243 |
+
On Windows kill() is an alias for terminate().
|
| 244 |
+
|
| 245 |
+
See also:
|
| 246 |
+
http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
|
| 247 |
+
"""
|
| 248 |
+
raise NotImplementedError
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
class _FlowControlMixin(Transport):
|
| 252 |
+
"""All the logic for (write) flow control in a mix-in base class.
|
| 253 |
+
|
| 254 |
+
The subclass must implement get_write_buffer_size(). It must call
|
| 255 |
+
_maybe_pause_protocol() whenever the write buffer size increases,
|
| 256 |
+
and _maybe_resume_protocol() whenever it decreases. It may also
|
| 257 |
+
override set_write_buffer_limits() (e.g. to specify different
|
| 258 |
+
defaults).
|
| 259 |
+
|
| 260 |
+
The subclass constructor must call super().__init__(extra). This
|
| 261 |
+
will call set_write_buffer_limits().
|
| 262 |
+
|
| 263 |
+
The user may call set_write_buffer_limits() and
|
| 264 |
+
get_write_buffer_size(), and their protocol's pause_writing() and
|
| 265 |
+
resume_writing() may be called.
|
| 266 |
+
"""
|
| 267 |
+
|
| 268 |
+
__slots__ = ('_loop', '_protocol_paused', '_high_water', '_low_water')
|
| 269 |
+
|
| 270 |
+
def __init__(self, extra=None, loop=None):
|
| 271 |
+
super().__init__(extra)
|
| 272 |
+
assert loop is not None
|
| 273 |
+
self._loop = loop
|
| 274 |
+
self._protocol_paused = False
|
| 275 |
+
self._set_write_buffer_limits()
|
| 276 |
+
|
| 277 |
+
def _maybe_pause_protocol(self):
|
| 278 |
+
size = self.get_write_buffer_size()
|
| 279 |
+
if size <= self._high_water:
|
| 280 |
+
return
|
| 281 |
+
if not self._protocol_paused:
|
| 282 |
+
self._protocol_paused = True
|
| 283 |
+
try:
|
| 284 |
+
self._protocol.pause_writing()
|
| 285 |
+
except (SystemExit, KeyboardInterrupt):
|
| 286 |
+
raise
|
| 287 |
+
except BaseException as exc:
|
| 288 |
+
self._loop.call_exception_handler({
|
| 289 |
+
'message': 'protocol.pause_writing() failed',
|
| 290 |
+
'exception': exc,
|
| 291 |
+
'transport': self,
|
| 292 |
+
'protocol': self._protocol,
|
| 293 |
+
})
|
| 294 |
+
|
| 295 |
+
def _maybe_resume_protocol(self):
|
| 296 |
+
if (self._protocol_paused and
|
| 297 |
+
self.get_write_buffer_size() <= self._low_water):
|
| 298 |
+
self._protocol_paused = False
|
| 299 |
+
try:
|
| 300 |
+
self._protocol.resume_writing()
|
| 301 |
+
except (SystemExit, KeyboardInterrupt):
|
| 302 |
+
raise
|
| 303 |
+
except BaseException as exc:
|
| 304 |
+
self._loop.call_exception_handler({
|
| 305 |
+
'message': 'protocol.resume_writing() failed',
|
| 306 |
+
'exception': exc,
|
| 307 |
+
'transport': self,
|
| 308 |
+
'protocol': self._protocol,
|
| 309 |
+
})
|
| 310 |
+
|
| 311 |
+
def get_write_buffer_limits(self):
|
| 312 |
+
return (self._low_water, self._high_water)
|
| 313 |
+
|
| 314 |
+
def _set_write_buffer_limits(self, high=None, low=None):
|
| 315 |
+
if high is None:
|
| 316 |
+
if low is None:
|
| 317 |
+
high = 64 * 1024
|
| 318 |
+
else:
|
| 319 |
+
high = 4 * low
|
| 320 |
+
if low is None:
|
| 321 |
+
low = high // 4
|
| 322 |
+
|
| 323 |
+
if not high >= low >= 0:
|
| 324 |
+
raise ValueError(
|
| 325 |
+
f'high ({high!r}) must be >= low ({low!r}) must be >= 0')
|
| 326 |
+
|
| 327 |
+
self._high_water = high
|
| 328 |
+
self._low_water = low
|
| 329 |
+
|
| 330 |
+
def set_write_buffer_limits(self, high=None, low=None):
|
| 331 |
+
self._set_write_buffer_limits(high=high, low=low)
|
| 332 |
+
self._maybe_pause_protocol()
|
| 333 |
+
|
| 334 |
+
def get_write_buffer_size(self):
|
| 335 |
+
raise NotImplementedError
|
omnilmm/lib/python3.10/asyncio/trsock.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import socket
|
| 2 |
+
import warnings
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class TransportSocket:
|
| 6 |
+
|
| 7 |
+
"""A socket-like wrapper for exposing real transport sockets.
|
| 8 |
+
|
| 9 |
+
These objects can be safely returned by APIs like
|
| 10 |
+
`transport.get_extra_info('socket')`. All potentially disruptive
|
| 11 |
+
operations (like "socket.close()") are banned.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
__slots__ = ('_sock',)
|
| 15 |
+
|
| 16 |
+
def __init__(self, sock: socket.socket):
|
| 17 |
+
self._sock = sock
|
| 18 |
+
|
| 19 |
+
def _na(self, what):
|
| 20 |
+
warnings.warn(
|
| 21 |
+
f"Using {what} on sockets returned from get_extra_info('socket') "
|
| 22 |
+
f"will be prohibited in asyncio 3.9. Please report your use case "
|
| 23 |
+
f"to bugs.python.org.",
|
| 24 |
+
DeprecationWarning, source=self)
|
| 25 |
+
|
| 26 |
+
@property
|
| 27 |
+
def family(self):
|
| 28 |
+
return self._sock.family
|
| 29 |
+
|
| 30 |
+
@property
|
| 31 |
+
def type(self):
|
| 32 |
+
return self._sock.type
|
| 33 |
+
|
| 34 |
+
@property
|
| 35 |
+
def proto(self):
|
| 36 |
+
return self._sock.proto
|
| 37 |
+
|
| 38 |
+
def __repr__(self):
|
| 39 |
+
s = (
|
| 40 |
+
f"<asyncio.TransportSocket fd={self.fileno()}, "
|
| 41 |
+
f"family={self.family!s}, type={self.type!s}, "
|
| 42 |
+
f"proto={self.proto}"
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
if self.fileno() != -1:
|
| 46 |
+
try:
|
| 47 |
+
laddr = self.getsockname()
|
| 48 |
+
if laddr:
|
| 49 |
+
s = f"{s}, laddr={laddr}"
|
| 50 |
+
except socket.error:
|
| 51 |
+
pass
|
| 52 |
+
try:
|
| 53 |
+
raddr = self.getpeername()
|
| 54 |
+
if raddr:
|
| 55 |
+
s = f"{s}, raddr={raddr}"
|
| 56 |
+
except socket.error:
|
| 57 |
+
pass
|
| 58 |
+
|
| 59 |
+
return f"{s}>"
|
| 60 |
+
|
| 61 |
+
def __getstate__(self):
|
| 62 |
+
raise TypeError("Cannot serialize asyncio.TransportSocket object")
|
| 63 |
+
|
| 64 |
+
def fileno(self):
|
| 65 |
+
return self._sock.fileno()
|
| 66 |
+
|
| 67 |
+
def dup(self):
|
| 68 |
+
return self._sock.dup()
|
| 69 |
+
|
| 70 |
+
def get_inheritable(self):
|
| 71 |
+
return self._sock.get_inheritable()
|
| 72 |
+
|
| 73 |
+
def shutdown(self, how):
|
| 74 |
+
# asyncio doesn't currently provide a high-level transport API
|
| 75 |
+
# to shutdown the connection.
|
| 76 |
+
self._sock.shutdown(how)
|
| 77 |
+
|
| 78 |
+
def getsockopt(self, *args, **kwargs):
|
| 79 |
+
return self._sock.getsockopt(*args, **kwargs)
|
| 80 |
+
|
| 81 |
+
def setsockopt(self, *args, **kwargs):
|
| 82 |
+
self._sock.setsockopt(*args, **kwargs)
|
| 83 |
+
|
| 84 |
+
def getpeername(self):
|
| 85 |
+
return self._sock.getpeername()
|
| 86 |
+
|
| 87 |
+
def getsockname(self):
|
| 88 |
+
return self._sock.getsockname()
|
| 89 |
+
|
| 90 |
+
def getsockbyname(self):
|
| 91 |
+
return self._sock.getsockbyname()
|
| 92 |
+
|
| 93 |
+
def accept(self):
|
| 94 |
+
self._na('accept() method')
|
| 95 |
+
return self._sock.accept()
|
| 96 |
+
|
| 97 |
+
def connect(self, *args, **kwargs):
|
| 98 |
+
self._na('connect() method')
|
| 99 |
+
return self._sock.connect(*args, **kwargs)
|
| 100 |
+
|
| 101 |
+
def connect_ex(self, *args, **kwargs):
|
| 102 |
+
self._na('connect_ex() method')
|
| 103 |
+
return self._sock.connect_ex(*args, **kwargs)
|
| 104 |
+
|
| 105 |
+
def bind(self, *args, **kwargs):
|
| 106 |
+
self._na('bind() method')
|
| 107 |
+
return self._sock.bind(*args, **kwargs)
|
| 108 |
+
|
| 109 |
+
def ioctl(self, *args, **kwargs):
|
| 110 |
+
self._na('ioctl() method')
|
| 111 |
+
return self._sock.ioctl(*args, **kwargs)
|
| 112 |
+
|
| 113 |
+
def listen(self, *args, **kwargs):
|
| 114 |
+
self._na('listen() method')
|
| 115 |
+
return self._sock.listen(*args, **kwargs)
|
| 116 |
+
|
| 117 |
+
def makefile(self):
|
| 118 |
+
self._na('makefile() method')
|
| 119 |
+
return self._sock.makefile()
|
| 120 |
+
|
| 121 |
+
def sendfile(self, *args, **kwargs):
|
| 122 |
+
self._na('sendfile() method')
|
| 123 |
+
return self._sock.sendfile(*args, **kwargs)
|
| 124 |
+
|
| 125 |
+
def close(self):
|
| 126 |
+
self._na('close() method')
|
| 127 |
+
return self._sock.close()
|
| 128 |
+
|
| 129 |
+
def detach(self):
|
| 130 |
+
self._na('detach() method')
|
| 131 |
+
return self._sock.detach()
|
| 132 |
+
|
| 133 |
+
def sendmsg_afalg(self, *args, **kwargs):
|
| 134 |
+
self._na('sendmsg_afalg() method')
|
| 135 |
+
return self._sock.sendmsg_afalg(*args, **kwargs)
|
| 136 |
+
|
| 137 |
+
def sendmsg(self, *args, **kwargs):
|
| 138 |
+
self._na('sendmsg() method')
|
| 139 |
+
return self._sock.sendmsg(*args, **kwargs)
|
| 140 |
+
|
| 141 |
+
def sendto(self, *args, **kwargs):
|
| 142 |
+
self._na('sendto() method')
|
| 143 |
+
return self._sock.sendto(*args, **kwargs)
|
| 144 |
+
|
| 145 |
+
def send(self, *args, **kwargs):
|
| 146 |
+
self._na('send() method')
|
| 147 |
+
return self._sock.send(*args, **kwargs)
|
| 148 |
+
|
| 149 |
+
def sendall(self, *args, **kwargs):
|
| 150 |
+
self._na('sendall() method')
|
| 151 |
+
return self._sock.sendall(*args, **kwargs)
|
| 152 |
+
|
| 153 |
+
def set_inheritable(self, *args, **kwargs):
|
| 154 |
+
self._na('set_inheritable() method')
|
| 155 |
+
return self._sock.set_inheritable(*args, **kwargs)
|
| 156 |
+
|
| 157 |
+
def share(self, process_id):
|
| 158 |
+
self._na('share() method')
|
| 159 |
+
return self._sock.share(process_id)
|
| 160 |
+
|
| 161 |
+
def recv_into(self, *args, **kwargs):
|
| 162 |
+
self._na('recv_into() method')
|
| 163 |
+
return self._sock.recv_into(*args, **kwargs)
|
| 164 |
+
|
| 165 |
+
def recvfrom_into(self, *args, **kwargs):
|
| 166 |
+
self._na('recvfrom_into() method')
|
| 167 |
+
return self._sock.recvfrom_into(*args, **kwargs)
|
| 168 |
+
|
| 169 |
+
def recvmsg_into(self, *args, **kwargs):
|
| 170 |
+
self._na('recvmsg_into() method')
|
| 171 |
+
return self._sock.recvmsg_into(*args, **kwargs)
|
| 172 |
+
|
| 173 |
+
def recvmsg(self, *args, **kwargs):
|
| 174 |
+
self._na('recvmsg() method')
|
| 175 |
+
return self._sock.recvmsg(*args, **kwargs)
|
| 176 |
+
|
| 177 |
+
def recvfrom(self, *args, **kwargs):
|
| 178 |
+
self._na('recvfrom() method')
|
| 179 |
+
return self._sock.recvfrom(*args, **kwargs)
|
| 180 |
+
|
| 181 |
+
def recv(self, *args, **kwargs):
|
| 182 |
+
self._na('recv() method')
|
| 183 |
+
return self._sock.recv(*args, **kwargs)
|
| 184 |
+
|
| 185 |
+
def settimeout(self, value):
|
| 186 |
+
if value == 0:
|
| 187 |
+
return
|
| 188 |
+
raise ValueError(
|
| 189 |
+
'settimeout(): only 0 timeout is allowed on transport sockets')
|
| 190 |
+
|
| 191 |
+
def gettimeout(self):
|
| 192 |
+
return 0
|
| 193 |
+
|
| 194 |
+
def setblocking(self, flag):
|
| 195 |
+
if not flag:
|
| 196 |
+
return
|
| 197 |
+
raise ValueError(
|
| 198 |
+
'setblocking(): transport sockets cannot be blocking')
|
| 199 |
+
|
| 200 |
+
def __enter__(self):
|
| 201 |
+
self._na('context manager protocol')
|
| 202 |
+
return self._sock.__enter__()
|
| 203 |
+
|
| 204 |
+
def __exit__(self, *err):
|
| 205 |
+
self._na('context manager protocol')
|
| 206 |
+
return self._sock.__exit__(*err)
|
omnilmm/lib/python3.10/asyncio/unix_events.py
ADDED
|
@@ -0,0 +1,1466 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Selector event loop for Unix with signal handling."""
|
| 2 |
+
|
| 3 |
+
import errno
|
| 4 |
+
import io
|
| 5 |
+
import itertools
|
| 6 |
+
import os
|
| 7 |
+
import selectors
|
| 8 |
+
import signal
|
| 9 |
+
import socket
|
| 10 |
+
import stat
|
| 11 |
+
import subprocess
|
| 12 |
+
import sys
|
| 13 |
+
import threading
|
| 14 |
+
import warnings
|
| 15 |
+
|
| 16 |
+
from . import base_events
|
| 17 |
+
from . import base_subprocess
|
| 18 |
+
from . import constants
|
| 19 |
+
from . import coroutines
|
| 20 |
+
from . import events
|
| 21 |
+
from . import exceptions
|
| 22 |
+
from . import futures
|
| 23 |
+
from . import selector_events
|
| 24 |
+
from . import tasks
|
| 25 |
+
from . import transports
|
| 26 |
+
from .log import logger
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
__all__ = (
|
| 30 |
+
'SelectorEventLoop',
|
| 31 |
+
'AbstractChildWatcher', 'SafeChildWatcher',
|
| 32 |
+
'FastChildWatcher', 'PidfdChildWatcher',
|
| 33 |
+
'MultiLoopChildWatcher', 'ThreadedChildWatcher',
|
| 34 |
+
'DefaultEventLoopPolicy',
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
if sys.platform == 'win32': # pragma: no cover
|
| 39 |
+
raise ImportError('Signals are not really supported on Windows')
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _sighandler_noop(signum, frame):
|
| 43 |
+
"""Dummy signal handler."""
|
| 44 |
+
pass
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def waitstatus_to_exitcode(status):
|
| 48 |
+
try:
|
| 49 |
+
return os.waitstatus_to_exitcode(status)
|
| 50 |
+
except ValueError:
|
| 51 |
+
# The child exited, but we don't understand its status.
|
| 52 |
+
# This shouldn't happen, but if it does, let's just
|
| 53 |
+
# return that status; perhaps that helps debug it.
|
| 54 |
+
return status
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
|
| 58 |
+
"""Unix event loop.
|
| 59 |
+
|
| 60 |
+
Adds signal handling and UNIX Domain Socket support to SelectorEventLoop.
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
def __init__(self, selector=None):
|
| 64 |
+
super().__init__(selector)
|
| 65 |
+
self._signal_handlers = {}
|
| 66 |
+
|
| 67 |
+
def close(self):
|
| 68 |
+
super().close()
|
| 69 |
+
if not sys.is_finalizing():
|
| 70 |
+
for sig in list(self._signal_handlers):
|
| 71 |
+
self.remove_signal_handler(sig)
|
| 72 |
+
else:
|
| 73 |
+
if self._signal_handlers:
|
| 74 |
+
warnings.warn(f"Closing the loop {self!r} "
|
| 75 |
+
f"on interpreter shutdown "
|
| 76 |
+
f"stage, skipping signal handlers removal",
|
| 77 |
+
ResourceWarning,
|
| 78 |
+
source=self)
|
| 79 |
+
self._signal_handlers.clear()
|
| 80 |
+
|
| 81 |
+
def _process_self_data(self, data):
|
| 82 |
+
for signum in data:
|
| 83 |
+
if not signum:
|
| 84 |
+
# ignore null bytes written by _write_to_self()
|
| 85 |
+
continue
|
| 86 |
+
self._handle_signal(signum)
|
| 87 |
+
|
| 88 |
+
def add_signal_handler(self, sig, callback, *args):
|
| 89 |
+
"""Add a handler for a signal. UNIX only.
|
| 90 |
+
|
| 91 |
+
Raise ValueError if the signal number is invalid or uncatchable.
|
| 92 |
+
Raise RuntimeError if there is a problem setting up the handler.
|
| 93 |
+
"""
|
| 94 |
+
if (coroutines.iscoroutine(callback) or
|
| 95 |
+
coroutines.iscoroutinefunction(callback)):
|
| 96 |
+
raise TypeError("coroutines cannot be used "
|
| 97 |
+
"with add_signal_handler()")
|
| 98 |
+
self._check_signal(sig)
|
| 99 |
+
self._check_closed()
|
| 100 |
+
try:
|
| 101 |
+
# set_wakeup_fd() raises ValueError if this is not the
|
| 102 |
+
# main thread. By calling it early we ensure that an
|
| 103 |
+
# event loop running in another thread cannot add a signal
|
| 104 |
+
# handler.
|
| 105 |
+
signal.set_wakeup_fd(self._csock.fileno())
|
| 106 |
+
except (ValueError, OSError) as exc:
|
| 107 |
+
raise RuntimeError(str(exc))
|
| 108 |
+
|
| 109 |
+
handle = events.Handle(callback, args, self, None)
|
| 110 |
+
self._signal_handlers[sig] = handle
|
| 111 |
+
|
| 112 |
+
try:
|
| 113 |
+
# Register a dummy signal handler to ask Python to write the signal
|
| 114 |
+
# number in the wakeup file descriptor. _process_self_data() will
|
| 115 |
+
# read signal numbers from this file descriptor to handle signals.
|
| 116 |
+
signal.signal(sig, _sighandler_noop)
|
| 117 |
+
|
| 118 |
+
# Set SA_RESTART to limit EINTR occurrences.
|
| 119 |
+
signal.siginterrupt(sig, False)
|
| 120 |
+
except OSError as exc:
|
| 121 |
+
del self._signal_handlers[sig]
|
| 122 |
+
if not self._signal_handlers:
|
| 123 |
+
try:
|
| 124 |
+
signal.set_wakeup_fd(-1)
|
| 125 |
+
except (ValueError, OSError) as nexc:
|
| 126 |
+
logger.info('set_wakeup_fd(-1) failed: %s', nexc)
|
| 127 |
+
|
| 128 |
+
if exc.errno == errno.EINVAL:
|
| 129 |
+
raise RuntimeError(f'sig {sig} cannot be caught')
|
| 130 |
+
else:
|
| 131 |
+
raise
|
| 132 |
+
|
| 133 |
+
def _handle_signal(self, sig):
|
| 134 |
+
"""Internal helper that is the actual signal handler."""
|
| 135 |
+
handle = self._signal_handlers.get(sig)
|
| 136 |
+
if handle is None:
|
| 137 |
+
return # Assume it's some race condition.
|
| 138 |
+
if handle._cancelled:
|
| 139 |
+
self.remove_signal_handler(sig) # Remove it properly.
|
| 140 |
+
else:
|
| 141 |
+
self._add_callback_signalsafe(handle)
|
| 142 |
+
|
| 143 |
+
def remove_signal_handler(self, sig):
|
| 144 |
+
"""Remove a handler for a signal. UNIX only.
|
| 145 |
+
|
| 146 |
+
Return True if a signal handler was removed, False if not.
|
| 147 |
+
"""
|
| 148 |
+
self._check_signal(sig)
|
| 149 |
+
try:
|
| 150 |
+
del self._signal_handlers[sig]
|
| 151 |
+
except KeyError:
|
| 152 |
+
return False
|
| 153 |
+
|
| 154 |
+
if sig == signal.SIGINT:
|
| 155 |
+
handler = signal.default_int_handler
|
| 156 |
+
else:
|
| 157 |
+
handler = signal.SIG_DFL
|
| 158 |
+
|
| 159 |
+
try:
|
| 160 |
+
signal.signal(sig, handler)
|
| 161 |
+
except OSError as exc:
|
| 162 |
+
if exc.errno == errno.EINVAL:
|
| 163 |
+
raise RuntimeError(f'sig {sig} cannot be caught')
|
| 164 |
+
else:
|
| 165 |
+
raise
|
| 166 |
+
|
| 167 |
+
if not self._signal_handlers:
|
| 168 |
+
try:
|
| 169 |
+
signal.set_wakeup_fd(-1)
|
| 170 |
+
except (ValueError, OSError) as exc:
|
| 171 |
+
logger.info('set_wakeup_fd(-1) failed: %s', exc)
|
| 172 |
+
|
| 173 |
+
return True
|
| 174 |
+
|
| 175 |
+
def _check_signal(self, sig):
|
| 176 |
+
"""Internal helper to validate a signal.
|
| 177 |
+
|
| 178 |
+
Raise ValueError if the signal number is invalid or uncatchable.
|
| 179 |
+
Raise RuntimeError if there is a problem setting up the handler.
|
| 180 |
+
"""
|
| 181 |
+
if not isinstance(sig, int):
|
| 182 |
+
raise TypeError(f'sig must be an int, not {sig!r}')
|
| 183 |
+
|
| 184 |
+
if sig not in signal.valid_signals():
|
| 185 |
+
raise ValueError(f'invalid signal number {sig}')
|
| 186 |
+
|
| 187 |
+
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
|
| 188 |
+
extra=None):
|
| 189 |
+
return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra)
|
| 190 |
+
|
| 191 |
+
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
|
| 192 |
+
extra=None):
|
| 193 |
+
return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra)
|
| 194 |
+
|
| 195 |
+
async def _make_subprocess_transport(self, protocol, args, shell,
|
| 196 |
+
stdin, stdout, stderr, bufsize,
|
| 197 |
+
extra=None, **kwargs):
|
| 198 |
+
with events.get_child_watcher() as watcher:
|
| 199 |
+
if not watcher.is_active():
|
| 200 |
+
# Check early.
|
| 201 |
+
# Raising exception before process creation
|
| 202 |
+
# prevents subprocess execution if the watcher
|
| 203 |
+
# is not ready to handle it.
|
| 204 |
+
raise RuntimeError("asyncio.get_child_watcher() is not activated, "
|
| 205 |
+
"subprocess support is not installed.")
|
| 206 |
+
waiter = self.create_future()
|
| 207 |
+
transp = _UnixSubprocessTransport(self, protocol, args, shell,
|
| 208 |
+
stdin, stdout, stderr, bufsize,
|
| 209 |
+
waiter=waiter, extra=extra,
|
| 210 |
+
**kwargs)
|
| 211 |
+
|
| 212 |
+
watcher.add_child_handler(transp.get_pid(),
|
| 213 |
+
self._child_watcher_callback, transp)
|
| 214 |
+
try:
|
| 215 |
+
await waiter
|
| 216 |
+
except (SystemExit, KeyboardInterrupt):
|
| 217 |
+
raise
|
| 218 |
+
except BaseException:
|
| 219 |
+
transp.close()
|
| 220 |
+
await transp._wait()
|
| 221 |
+
raise
|
| 222 |
+
|
| 223 |
+
return transp
|
| 224 |
+
|
| 225 |
+
def _child_watcher_callback(self, pid, returncode, transp):
|
| 226 |
+
# Skip one iteration for callbacks to be executed
|
| 227 |
+
self.call_soon_threadsafe(self.call_soon, transp._process_exited, returncode)
|
| 228 |
+
|
| 229 |
+
async def create_unix_connection(
|
| 230 |
+
self, protocol_factory, path=None, *,
|
| 231 |
+
ssl=None, sock=None,
|
| 232 |
+
server_hostname=None,
|
| 233 |
+
ssl_handshake_timeout=None):
|
| 234 |
+
assert server_hostname is None or isinstance(server_hostname, str)
|
| 235 |
+
if ssl:
|
| 236 |
+
if server_hostname is None:
|
| 237 |
+
raise ValueError(
|
| 238 |
+
'you have to pass server_hostname when using ssl')
|
| 239 |
+
else:
|
| 240 |
+
if server_hostname is not None:
|
| 241 |
+
raise ValueError('server_hostname is only meaningful with ssl')
|
| 242 |
+
if ssl_handshake_timeout is not None:
|
| 243 |
+
raise ValueError(
|
| 244 |
+
'ssl_handshake_timeout is only meaningful with ssl')
|
| 245 |
+
|
| 246 |
+
if path is not None:
|
| 247 |
+
if sock is not None:
|
| 248 |
+
raise ValueError(
|
| 249 |
+
'path and sock can not be specified at the same time')
|
| 250 |
+
|
| 251 |
+
path = os.fspath(path)
|
| 252 |
+
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
|
| 253 |
+
try:
|
| 254 |
+
sock.setblocking(False)
|
| 255 |
+
await self.sock_connect(sock, path)
|
| 256 |
+
except:
|
| 257 |
+
sock.close()
|
| 258 |
+
raise
|
| 259 |
+
|
| 260 |
+
else:
|
| 261 |
+
if sock is None:
|
| 262 |
+
raise ValueError('no path and sock were specified')
|
| 263 |
+
if (sock.family != socket.AF_UNIX or
|
| 264 |
+
sock.type != socket.SOCK_STREAM):
|
| 265 |
+
raise ValueError(
|
| 266 |
+
f'A UNIX Domain Stream Socket was expected, got {sock!r}')
|
| 267 |
+
sock.setblocking(False)
|
| 268 |
+
|
| 269 |
+
transport, protocol = await self._create_connection_transport(
|
| 270 |
+
sock, protocol_factory, ssl, server_hostname,
|
| 271 |
+
ssl_handshake_timeout=ssl_handshake_timeout)
|
| 272 |
+
return transport, protocol
|
| 273 |
+
|
| 274 |
+
async def create_unix_server(
|
| 275 |
+
self, protocol_factory, path=None, *,
|
| 276 |
+
sock=None, backlog=100, ssl=None,
|
| 277 |
+
ssl_handshake_timeout=None,
|
| 278 |
+
start_serving=True):
|
| 279 |
+
if isinstance(ssl, bool):
|
| 280 |
+
raise TypeError('ssl argument must be an SSLContext or None')
|
| 281 |
+
|
| 282 |
+
if ssl_handshake_timeout is not None and not ssl:
|
| 283 |
+
raise ValueError(
|
| 284 |
+
'ssl_handshake_timeout is only meaningful with ssl')
|
| 285 |
+
|
| 286 |
+
if path is not None:
|
| 287 |
+
if sock is not None:
|
| 288 |
+
raise ValueError(
|
| 289 |
+
'path and sock can not be specified at the same time')
|
| 290 |
+
|
| 291 |
+
path = os.fspath(path)
|
| 292 |
+
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
| 293 |
+
|
| 294 |
+
# Check for abstract socket. `str` and `bytes` paths are supported.
|
| 295 |
+
if path[0] not in (0, '\x00'):
|
| 296 |
+
try:
|
| 297 |
+
if stat.S_ISSOCK(os.stat(path).st_mode):
|
| 298 |
+
os.remove(path)
|
| 299 |
+
except FileNotFoundError:
|
| 300 |
+
pass
|
| 301 |
+
except OSError as err:
|
| 302 |
+
# Directory may have permissions only to create socket.
|
| 303 |
+
logger.error('Unable to check or remove stale UNIX socket '
|
| 304 |
+
'%r: %r', path, err)
|
| 305 |
+
|
| 306 |
+
try:
|
| 307 |
+
sock.bind(path)
|
| 308 |
+
except OSError as exc:
|
| 309 |
+
sock.close()
|
| 310 |
+
if exc.errno == errno.EADDRINUSE:
|
| 311 |
+
# Let's improve the error message by adding
|
| 312 |
+
# with what exact address it occurs.
|
| 313 |
+
msg = f'Address {path!r} is already in use'
|
| 314 |
+
raise OSError(errno.EADDRINUSE, msg) from None
|
| 315 |
+
else:
|
| 316 |
+
raise
|
| 317 |
+
except:
|
| 318 |
+
sock.close()
|
| 319 |
+
raise
|
| 320 |
+
else:
|
| 321 |
+
if sock is None:
|
| 322 |
+
raise ValueError(
|
| 323 |
+
'path was not specified, and no sock specified')
|
| 324 |
+
|
| 325 |
+
if (sock.family != socket.AF_UNIX or
|
| 326 |
+
sock.type != socket.SOCK_STREAM):
|
| 327 |
+
raise ValueError(
|
| 328 |
+
f'A UNIX Domain Stream Socket was expected, got {sock!r}')
|
| 329 |
+
|
| 330 |
+
sock.setblocking(False)
|
| 331 |
+
server = base_events.Server(self, [sock], protocol_factory,
|
| 332 |
+
ssl, backlog, ssl_handshake_timeout)
|
| 333 |
+
if start_serving:
|
| 334 |
+
server._start_serving()
|
| 335 |
+
# Skip one loop iteration so that all 'loop.add_reader'
|
| 336 |
+
# go through.
|
| 337 |
+
await tasks.sleep(0)
|
| 338 |
+
|
| 339 |
+
return server
|
| 340 |
+
|
| 341 |
+
async def _sock_sendfile_native(self, sock, file, offset, count):
|
| 342 |
+
try:
|
| 343 |
+
os.sendfile
|
| 344 |
+
except AttributeError:
|
| 345 |
+
raise exceptions.SendfileNotAvailableError(
|
| 346 |
+
"os.sendfile() is not available")
|
| 347 |
+
try:
|
| 348 |
+
fileno = file.fileno()
|
| 349 |
+
except (AttributeError, io.UnsupportedOperation) as err:
|
| 350 |
+
raise exceptions.SendfileNotAvailableError("not a regular file")
|
| 351 |
+
try:
|
| 352 |
+
fsize = os.fstat(fileno).st_size
|
| 353 |
+
except OSError:
|
| 354 |
+
raise exceptions.SendfileNotAvailableError("not a regular file")
|
| 355 |
+
blocksize = count if count else fsize
|
| 356 |
+
if not blocksize:
|
| 357 |
+
return 0 # empty file
|
| 358 |
+
|
| 359 |
+
fut = self.create_future()
|
| 360 |
+
self._sock_sendfile_native_impl(fut, None, sock, fileno,
|
| 361 |
+
offset, count, blocksize, 0)
|
| 362 |
+
return await fut
|
| 363 |
+
|
| 364 |
+
def _sock_sendfile_native_impl(self, fut, registered_fd, sock, fileno,
|
| 365 |
+
offset, count, blocksize, total_sent):
|
| 366 |
+
fd = sock.fileno()
|
| 367 |
+
if registered_fd is not None:
|
| 368 |
+
# Remove the callback early. It should be rare that the
|
| 369 |
+
# selector says the fd is ready but the call still returns
|
| 370 |
+
# EAGAIN, and I am willing to take a hit in that case in
|
| 371 |
+
# order to simplify the common case.
|
| 372 |
+
self.remove_writer(registered_fd)
|
| 373 |
+
if fut.cancelled():
|
| 374 |
+
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
|
| 375 |
+
return
|
| 376 |
+
if count:
|
| 377 |
+
blocksize = count - total_sent
|
| 378 |
+
if blocksize <= 0:
|
| 379 |
+
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
|
| 380 |
+
fut.set_result(total_sent)
|
| 381 |
+
return
|
| 382 |
+
|
| 383 |
+
try:
|
| 384 |
+
sent = os.sendfile(fd, fileno, offset, blocksize)
|
| 385 |
+
except (BlockingIOError, InterruptedError):
|
| 386 |
+
if registered_fd is None:
|
| 387 |
+
self._sock_add_cancellation_callback(fut, sock)
|
| 388 |
+
self.add_writer(fd, self._sock_sendfile_native_impl, fut,
|
| 389 |
+
fd, sock, fileno,
|
| 390 |
+
offset, count, blocksize, total_sent)
|
| 391 |
+
except OSError as exc:
|
| 392 |
+
if (registered_fd is not None and
|
| 393 |
+
exc.errno == errno.ENOTCONN and
|
| 394 |
+
type(exc) is not ConnectionError):
|
| 395 |
+
# If we have an ENOTCONN and this isn't a first call to
|
| 396 |
+
# sendfile(), i.e. the connection was closed in the middle
|
| 397 |
+
# of the operation, normalize the error to ConnectionError
|
| 398 |
+
# to make it consistent across all Posix systems.
|
| 399 |
+
new_exc = ConnectionError(
|
| 400 |
+
"socket is not connected", errno.ENOTCONN)
|
| 401 |
+
new_exc.__cause__ = exc
|
| 402 |
+
exc = new_exc
|
| 403 |
+
if total_sent == 0:
|
| 404 |
+
# We can get here for different reasons, the main
|
| 405 |
+
# one being 'file' is not a regular mmap(2)-like
|
| 406 |
+
# file, in which case we'll fall back on using
|
| 407 |
+
# plain send().
|
| 408 |
+
err = exceptions.SendfileNotAvailableError(
|
| 409 |
+
"os.sendfile call failed")
|
| 410 |
+
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
|
| 411 |
+
fut.set_exception(err)
|
| 412 |
+
else:
|
| 413 |
+
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
|
| 414 |
+
fut.set_exception(exc)
|
| 415 |
+
except (SystemExit, KeyboardInterrupt):
|
| 416 |
+
raise
|
| 417 |
+
except BaseException as exc:
|
| 418 |
+
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
|
| 419 |
+
fut.set_exception(exc)
|
| 420 |
+
else:
|
| 421 |
+
if sent == 0:
|
| 422 |
+
# EOF
|
| 423 |
+
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
|
| 424 |
+
fut.set_result(total_sent)
|
| 425 |
+
else:
|
| 426 |
+
offset += sent
|
| 427 |
+
total_sent += sent
|
| 428 |
+
if registered_fd is None:
|
| 429 |
+
self._sock_add_cancellation_callback(fut, sock)
|
| 430 |
+
self.add_writer(fd, self._sock_sendfile_native_impl, fut,
|
| 431 |
+
fd, sock, fileno,
|
| 432 |
+
offset, count, blocksize, total_sent)
|
| 433 |
+
|
| 434 |
+
def _sock_sendfile_update_filepos(self, fileno, offset, total_sent):
|
| 435 |
+
if total_sent > 0:
|
| 436 |
+
os.lseek(fileno, offset, os.SEEK_SET)
|
| 437 |
+
|
| 438 |
+
def _sock_add_cancellation_callback(self, fut, sock):
|
| 439 |
+
def cb(fut):
|
| 440 |
+
if fut.cancelled():
|
| 441 |
+
fd = sock.fileno()
|
| 442 |
+
if fd != -1:
|
| 443 |
+
self.remove_writer(fd)
|
| 444 |
+
fut.add_done_callback(cb)
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
class _UnixReadPipeTransport(transports.ReadTransport):
|
| 448 |
+
|
| 449 |
+
max_size = 256 * 1024 # max bytes we read in one event loop iteration
|
| 450 |
+
|
| 451 |
+
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
|
| 452 |
+
super().__init__(extra)
|
| 453 |
+
self._extra['pipe'] = pipe
|
| 454 |
+
self._loop = loop
|
| 455 |
+
self._pipe = pipe
|
| 456 |
+
self._fileno = pipe.fileno()
|
| 457 |
+
self._protocol = protocol
|
| 458 |
+
self._closing = False
|
| 459 |
+
self._paused = False
|
| 460 |
+
|
| 461 |
+
mode = os.fstat(self._fileno).st_mode
|
| 462 |
+
if not (stat.S_ISFIFO(mode) or
|
| 463 |
+
stat.S_ISSOCK(mode) or
|
| 464 |
+
stat.S_ISCHR(mode)):
|
| 465 |
+
self._pipe = None
|
| 466 |
+
self._fileno = None
|
| 467 |
+
self._protocol = None
|
| 468 |
+
raise ValueError("Pipe transport is for pipes/sockets only.")
|
| 469 |
+
|
| 470 |
+
os.set_blocking(self._fileno, False)
|
| 471 |
+
|
| 472 |
+
self._loop.call_soon(self._protocol.connection_made, self)
|
| 473 |
+
# only start reading when connection_made() has been called
|
| 474 |
+
self._loop.call_soon(self._loop._add_reader,
|
| 475 |
+
self._fileno, self._read_ready)
|
| 476 |
+
if waiter is not None:
|
| 477 |
+
# only wake up the waiter when connection_made() has been called
|
| 478 |
+
self._loop.call_soon(futures._set_result_unless_cancelled,
|
| 479 |
+
waiter, None)
|
| 480 |
+
|
| 481 |
+
def __repr__(self):
|
| 482 |
+
info = [self.__class__.__name__]
|
| 483 |
+
if self._pipe is None:
|
| 484 |
+
info.append('closed')
|
| 485 |
+
elif self._closing:
|
| 486 |
+
info.append('closing')
|
| 487 |
+
info.append(f'fd={self._fileno}')
|
| 488 |
+
selector = getattr(self._loop, '_selector', None)
|
| 489 |
+
if self._pipe is not None and selector is not None:
|
| 490 |
+
polling = selector_events._test_selector_event(
|
| 491 |
+
selector, self._fileno, selectors.EVENT_READ)
|
| 492 |
+
if polling:
|
| 493 |
+
info.append('polling')
|
| 494 |
+
else:
|
| 495 |
+
info.append('idle')
|
| 496 |
+
elif self._pipe is not None:
|
| 497 |
+
info.append('open')
|
| 498 |
+
else:
|
| 499 |
+
info.append('closed')
|
| 500 |
+
return '<{}>'.format(' '.join(info))
|
| 501 |
+
|
| 502 |
+
def _read_ready(self):
|
| 503 |
+
try:
|
| 504 |
+
data = os.read(self._fileno, self.max_size)
|
| 505 |
+
except (BlockingIOError, InterruptedError):
|
| 506 |
+
pass
|
| 507 |
+
except OSError as exc:
|
| 508 |
+
self._fatal_error(exc, 'Fatal read error on pipe transport')
|
| 509 |
+
else:
|
| 510 |
+
if data:
|
| 511 |
+
self._protocol.data_received(data)
|
| 512 |
+
else:
|
| 513 |
+
if self._loop.get_debug():
|
| 514 |
+
logger.info("%r was closed by peer", self)
|
| 515 |
+
self._closing = True
|
| 516 |
+
self._loop._remove_reader(self._fileno)
|
| 517 |
+
self._loop.call_soon(self._protocol.eof_received)
|
| 518 |
+
self._loop.call_soon(self._call_connection_lost, None)
|
| 519 |
+
|
| 520 |
+
def pause_reading(self):
|
| 521 |
+
if self._closing or self._paused:
|
| 522 |
+
return
|
| 523 |
+
self._paused = True
|
| 524 |
+
self._loop._remove_reader(self._fileno)
|
| 525 |
+
if self._loop.get_debug():
|
| 526 |
+
logger.debug("%r pauses reading", self)
|
| 527 |
+
|
| 528 |
+
def resume_reading(self):
|
| 529 |
+
if self._closing or not self._paused:
|
| 530 |
+
return
|
| 531 |
+
self._paused = False
|
| 532 |
+
self._loop._add_reader(self._fileno, self._read_ready)
|
| 533 |
+
if self._loop.get_debug():
|
| 534 |
+
logger.debug("%r resumes reading", self)
|
| 535 |
+
|
| 536 |
+
def set_protocol(self, protocol):
|
| 537 |
+
self._protocol = protocol
|
| 538 |
+
|
| 539 |
+
def get_protocol(self):
|
| 540 |
+
return self._protocol
|
| 541 |
+
|
| 542 |
+
def is_closing(self):
|
| 543 |
+
return self._closing
|
| 544 |
+
|
| 545 |
+
def close(self):
|
| 546 |
+
if not self._closing:
|
| 547 |
+
self._close(None)
|
| 548 |
+
|
| 549 |
+
def __del__(self, _warn=warnings.warn):
|
| 550 |
+
if self._pipe is not None:
|
| 551 |
+
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
|
| 552 |
+
self._pipe.close()
|
| 553 |
+
|
| 554 |
+
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
|
| 555 |
+
# should be called by exception handler only
|
| 556 |
+
if (isinstance(exc, OSError) and exc.errno == errno.EIO):
|
| 557 |
+
if self._loop.get_debug():
|
| 558 |
+
logger.debug("%r: %s", self, message, exc_info=True)
|
| 559 |
+
else:
|
| 560 |
+
self._loop.call_exception_handler({
|
| 561 |
+
'message': message,
|
| 562 |
+
'exception': exc,
|
| 563 |
+
'transport': self,
|
| 564 |
+
'protocol': self._protocol,
|
| 565 |
+
})
|
| 566 |
+
self._close(exc)
|
| 567 |
+
|
| 568 |
+
def _close(self, exc):
|
| 569 |
+
self._closing = True
|
| 570 |
+
self._loop._remove_reader(self._fileno)
|
| 571 |
+
self._loop.call_soon(self._call_connection_lost, exc)
|
| 572 |
+
|
| 573 |
+
def _call_connection_lost(self, exc):
|
| 574 |
+
try:
|
| 575 |
+
self._protocol.connection_lost(exc)
|
| 576 |
+
finally:
|
| 577 |
+
self._pipe.close()
|
| 578 |
+
self._pipe = None
|
| 579 |
+
self._protocol = None
|
| 580 |
+
self._loop = None
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
class _UnixWritePipeTransport(transports._FlowControlMixin,
|
| 584 |
+
transports.WriteTransport):
|
| 585 |
+
|
| 586 |
+
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
|
| 587 |
+
super().__init__(extra, loop)
|
| 588 |
+
self._extra['pipe'] = pipe
|
| 589 |
+
self._pipe = pipe
|
| 590 |
+
self._fileno = pipe.fileno()
|
| 591 |
+
self._protocol = protocol
|
| 592 |
+
self._buffer = bytearray()
|
| 593 |
+
self._conn_lost = 0
|
| 594 |
+
self._closing = False # Set when close() or write_eof() called.
|
| 595 |
+
|
| 596 |
+
mode = os.fstat(self._fileno).st_mode
|
| 597 |
+
is_char = stat.S_ISCHR(mode)
|
| 598 |
+
is_fifo = stat.S_ISFIFO(mode)
|
| 599 |
+
is_socket = stat.S_ISSOCK(mode)
|
| 600 |
+
if not (is_char or is_fifo or is_socket):
|
| 601 |
+
self._pipe = None
|
| 602 |
+
self._fileno = None
|
| 603 |
+
self._protocol = None
|
| 604 |
+
raise ValueError("Pipe transport is only for "
|
| 605 |
+
"pipes, sockets and character devices")
|
| 606 |
+
|
| 607 |
+
os.set_blocking(self._fileno, False)
|
| 608 |
+
self._loop.call_soon(self._protocol.connection_made, self)
|
| 609 |
+
|
| 610 |
+
# On AIX, the reader trick (to be notified when the read end of the
|
| 611 |
+
# socket is closed) only works for sockets. On other platforms it
|
| 612 |
+
# works for pipes and sockets. (Exception: OS X 10.4? Issue #19294.)
|
| 613 |
+
if is_socket or (is_fifo and not sys.platform.startswith("aix")):
|
| 614 |
+
# only start reading when connection_made() has been called
|
| 615 |
+
self._loop.call_soon(self._loop._add_reader,
|
| 616 |
+
self._fileno, self._read_ready)
|
| 617 |
+
|
| 618 |
+
if waiter is not None:
|
| 619 |
+
# only wake up the waiter when connection_made() has been called
|
| 620 |
+
self._loop.call_soon(futures._set_result_unless_cancelled,
|
| 621 |
+
waiter, None)
|
| 622 |
+
|
| 623 |
+
def __repr__(self):
|
| 624 |
+
info = [self.__class__.__name__]
|
| 625 |
+
if self._pipe is None:
|
| 626 |
+
info.append('closed')
|
| 627 |
+
elif self._closing:
|
| 628 |
+
info.append('closing')
|
| 629 |
+
info.append(f'fd={self._fileno}')
|
| 630 |
+
selector = getattr(self._loop, '_selector', None)
|
| 631 |
+
if self._pipe is not None and selector is not None:
|
| 632 |
+
polling = selector_events._test_selector_event(
|
| 633 |
+
selector, self._fileno, selectors.EVENT_WRITE)
|
| 634 |
+
if polling:
|
| 635 |
+
info.append('polling')
|
| 636 |
+
else:
|
| 637 |
+
info.append('idle')
|
| 638 |
+
|
| 639 |
+
bufsize = self.get_write_buffer_size()
|
| 640 |
+
info.append(f'bufsize={bufsize}')
|
| 641 |
+
elif self._pipe is not None:
|
| 642 |
+
info.append('open')
|
| 643 |
+
else:
|
| 644 |
+
info.append('closed')
|
| 645 |
+
return '<{}>'.format(' '.join(info))
|
| 646 |
+
|
| 647 |
+
def get_write_buffer_size(self):
|
| 648 |
+
return len(self._buffer)
|
| 649 |
+
|
| 650 |
+
def _read_ready(self):
|
| 651 |
+
# Pipe was closed by peer.
|
| 652 |
+
if self._loop.get_debug():
|
| 653 |
+
logger.info("%r was closed by peer", self)
|
| 654 |
+
if self._buffer:
|
| 655 |
+
self._close(BrokenPipeError())
|
| 656 |
+
else:
|
| 657 |
+
self._close()
|
| 658 |
+
|
| 659 |
+
def write(self, data):
|
| 660 |
+
assert isinstance(data, (bytes, bytearray, memoryview)), repr(data)
|
| 661 |
+
if isinstance(data, bytearray):
|
| 662 |
+
data = memoryview(data)
|
| 663 |
+
if not data:
|
| 664 |
+
return
|
| 665 |
+
|
| 666 |
+
if self._conn_lost or self._closing:
|
| 667 |
+
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
|
| 668 |
+
logger.warning('pipe closed by peer or '
|
| 669 |
+
'os.write(pipe, data) raised exception.')
|
| 670 |
+
self._conn_lost += 1
|
| 671 |
+
return
|
| 672 |
+
|
| 673 |
+
if not self._buffer:
|
| 674 |
+
# Attempt to send it right away first.
|
| 675 |
+
try:
|
| 676 |
+
n = os.write(self._fileno, data)
|
| 677 |
+
except (BlockingIOError, InterruptedError):
|
| 678 |
+
n = 0
|
| 679 |
+
except (SystemExit, KeyboardInterrupt):
|
| 680 |
+
raise
|
| 681 |
+
except BaseException as exc:
|
| 682 |
+
self._conn_lost += 1
|
| 683 |
+
self._fatal_error(exc, 'Fatal write error on pipe transport')
|
| 684 |
+
return
|
| 685 |
+
if n == len(data):
|
| 686 |
+
return
|
| 687 |
+
elif n > 0:
|
| 688 |
+
data = memoryview(data)[n:]
|
| 689 |
+
self._loop._add_writer(self._fileno, self._write_ready)
|
| 690 |
+
|
| 691 |
+
self._buffer += data
|
| 692 |
+
self._maybe_pause_protocol()
|
| 693 |
+
|
| 694 |
+
def _write_ready(self):
|
| 695 |
+
assert self._buffer, 'Data should not be empty'
|
| 696 |
+
|
| 697 |
+
try:
|
| 698 |
+
n = os.write(self._fileno, self._buffer)
|
| 699 |
+
except (BlockingIOError, InterruptedError):
|
| 700 |
+
pass
|
| 701 |
+
except (SystemExit, KeyboardInterrupt):
|
| 702 |
+
raise
|
| 703 |
+
except BaseException as exc:
|
| 704 |
+
self._buffer.clear()
|
| 705 |
+
self._conn_lost += 1
|
| 706 |
+
# Remove writer here, _fatal_error() doesn't it
|
| 707 |
+
# because _buffer is empty.
|
| 708 |
+
self._loop._remove_writer(self._fileno)
|
| 709 |
+
self._fatal_error(exc, 'Fatal write error on pipe transport')
|
| 710 |
+
else:
|
| 711 |
+
if n == len(self._buffer):
|
| 712 |
+
self._buffer.clear()
|
| 713 |
+
self._loop._remove_writer(self._fileno)
|
| 714 |
+
self._maybe_resume_protocol() # May append to buffer.
|
| 715 |
+
if self._closing:
|
| 716 |
+
self._loop._remove_reader(self._fileno)
|
| 717 |
+
self._call_connection_lost(None)
|
| 718 |
+
return
|
| 719 |
+
elif n > 0:
|
| 720 |
+
del self._buffer[:n]
|
| 721 |
+
|
| 722 |
+
def can_write_eof(self):
|
| 723 |
+
return True
|
| 724 |
+
|
| 725 |
+
def write_eof(self):
|
| 726 |
+
if self._closing:
|
| 727 |
+
return
|
| 728 |
+
assert self._pipe
|
| 729 |
+
self._closing = True
|
| 730 |
+
if not self._buffer:
|
| 731 |
+
self._loop._remove_reader(self._fileno)
|
| 732 |
+
self._loop.call_soon(self._call_connection_lost, None)
|
| 733 |
+
|
| 734 |
+
def set_protocol(self, protocol):
|
| 735 |
+
self._protocol = protocol
|
| 736 |
+
|
| 737 |
+
def get_protocol(self):
|
| 738 |
+
return self._protocol
|
| 739 |
+
|
| 740 |
+
def is_closing(self):
|
| 741 |
+
return self._closing
|
| 742 |
+
|
| 743 |
+
def close(self):
|
| 744 |
+
if self._pipe is not None and not self._closing:
|
| 745 |
+
# write_eof is all what we needed to close the write pipe
|
| 746 |
+
self.write_eof()
|
| 747 |
+
|
| 748 |
+
def __del__(self, _warn=warnings.warn):
|
| 749 |
+
if self._pipe is not None:
|
| 750 |
+
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
|
| 751 |
+
self._pipe.close()
|
| 752 |
+
|
| 753 |
+
def abort(self):
|
| 754 |
+
self._close(None)
|
| 755 |
+
|
| 756 |
+
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
|
| 757 |
+
# should be called by exception handler only
|
| 758 |
+
if isinstance(exc, OSError):
|
| 759 |
+
if self._loop.get_debug():
|
| 760 |
+
logger.debug("%r: %s", self, message, exc_info=True)
|
| 761 |
+
else:
|
| 762 |
+
self._loop.call_exception_handler({
|
| 763 |
+
'message': message,
|
| 764 |
+
'exception': exc,
|
| 765 |
+
'transport': self,
|
| 766 |
+
'protocol': self._protocol,
|
| 767 |
+
})
|
| 768 |
+
self._close(exc)
|
| 769 |
+
|
| 770 |
+
def _close(self, exc=None):
|
| 771 |
+
self._closing = True
|
| 772 |
+
if self._buffer:
|
| 773 |
+
self._loop._remove_writer(self._fileno)
|
| 774 |
+
self._buffer.clear()
|
| 775 |
+
self._loop._remove_reader(self._fileno)
|
| 776 |
+
self._loop.call_soon(self._call_connection_lost, exc)
|
| 777 |
+
|
| 778 |
+
def _call_connection_lost(self, exc):
|
| 779 |
+
try:
|
| 780 |
+
self._protocol.connection_lost(exc)
|
| 781 |
+
finally:
|
| 782 |
+
self._pipe.close()
|
| 783 |
+
self._pipe = None
|
| 784 |
+
self._protocol = None
|
| 785 |
+
self._loop = None
|
| 786 |
+
|
| 787 |
+
|
| 788 |
+
class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport):
|
| 789 |
+
|
| 790 |
+
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
|
| 791 |
+
stdin_w = None
|
| 792 |
+
if stdin == subprocess.PIPE and sys.platform.startswith('aix'):
|
| 793 |
+
# Use a socket pair for stdin on AIX, since it does not
|
| 794 |
+
# support selecting read events on the write end of a
|
| 795 |
+
# socket (which we use in order to detect closing of the
|
| 796 |
+
# other end).
|
| 797 |
+
stdin, stdin_w = socket.socketpair()
|
| 798 |
+
try:
|
| 799 |
+
self._proc = subprocess.Popen(
|
| 800 |
+
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
|
| 801 |
+
universal_newlines=False, bufsize=bufsize, **kwargs)
|
| 802 |
+
if stdin_w is not None:
|
| 803 |
+
stdin.close()
|
| 804 |
+
self._proc.stdin = open(stdin_w.detach(), 'wb', buffering=bufsize)
|
| 805 |
+
stdin_w = None
|
| 806 |
+
finally:
|
| 807 |
+
if stdin_w is not None:
|
| 808 |
+
stdin.close()
|
| 809 |
+
stdin_w.close()
|
| 810 |
+
|
| 811 |
+
|
| 812 |
+
class AbstractChildWatcher:
|
| 813 |
+
"""Abstract base class for monitoring child processes.
|
| 814 |
+
|
| 815 |
+
Objects derived from this class monitor a collection of subprocesses and
|
| 816 |
+
report their termination or interruption by a signal.
|
| 817 |
+
|
| 818 |
+
New callbacks are registered with .add_child_handler(). Starting a new
|
| 819 |
+
process must be done within a 'with' block to allow the watcher to suspend
|
| 820 |
+
its activity until the new process if fully registered (this is needed to
|
| 821 |
+
prevent a race condition in some implementations).
|
| 822 |
+
|
| 823 |
+
Example:
|
| 824 |
+
with watcher:
|
| 825 |
+
proc = subprocess.Popen("sleep 1")
|
| 826 |
+
watcher.add_child_handler(proc.pid, callback)
|
| 827 |
+
|
| 828 |
+
Notes:
|
| 829 |
+
Implementations of this class must be thread-safe.
|
| 830 |
+
|
| 831 |
+
Since child watcher objects may catch the SIGCHLD signal and call
|
| 832 |
+
waitpid(-1), there should be only one active object per process.
|
| 833 |
+
"""
|
| 834 |
+
|
| 835 |
+
def add_child_handler(self, pid, callback, *args):
|
| 836 |
+
"""Register a new child handler.
|
| 837 |
+
|
| 838 |
+
Arrange for callback(pid, returncode, *args) to be called when
|
| 839 |
+
process 'pid' terminates. Specifying another callback for the same
|
| 840 |
+
process replaces the previous handler.
|
| 841 |
+
|
| 842 |
+
Note: callback() must be thread-safe.
|
| 843 |
+
"""
|
| 844 |
+
raise NotImplementedError()
|
| 845 |
+
|
| 846 |
+
def remove_child_handler(self, pid):
|
| 847 |
+
"""Removes the handler for process 'pid'.
|
| 848 |
+
|
| 849 |
+
The function returns True if the handler was successfully removed,
|
| 850 |
+
False if there was nothing to remove."""
|
| 851 |
+
|
| 852 |
+
raise NotImplementedError()
|
| 853 |
+
|
| 854 |
+
def attach_loop(self, loop):
|
| 855 |
+
"""Attach the watcher to an event loop.
|
| 856 |
+
|
| 857 |
+
If the watcher was previously attached to an event loop, then it is
|
| 858 |
+
first detached before attaching to the new loop.
|
| 859 |
+
|
| 860 |
+
Note: loop may be None.
|
| 861 |
+
"""
|
| 862 |
+
raise NotImplementedError()
|
| 863 |
+
|
| 864 |
+
def close(self):
|
| 865 |
+
"""Close the watcher.
|
| 866 |
+
|
| 867 |
+
This must be called to make sure that any underlying resource is freed.
|
| 868 |
+
"""
|
| 869 |
+
raise NotImplementedError()
|
| 870 |
+
|
| 871 |
+
def is_active(self):
|
| 872 |
+
"""Return ``True`` if the watcher is active and is used by the event loop.
|
| 873 |
+
|
| 874 |
+
Return True if the watcher is installed and ready to handle process exit
|
| 875 |
+
notifications.
|
| 876 |
+
|
| 877 |
+
"""
|
| 878 |
+
raise NotImplementedError()
|
| 879 |
+
|
| 880 |
+
def __enter__(self):
|
| 881 |
+
"""Enter the watcher's context and allow starting new processes
|
| 882 |
+
|
| 883 |
+
This function must return self"""
|
| 884 |
+
raise NotImplementedError()
|
| 885 |
+
|
| 886 |
+
def __exit__(self, a, b, c):
|
| 887 |
+
"""Exit the watcher's context"""
|
| 888 |
+
raise NotImplementedError()
|
| 889 |
+
|
| 890 |
+
|
| 891 |
+
class PidfdChildWatcher(AbstractChildWatcher):
|
| 892 |
+
"""Child watcher implementation using Linux's pid file descriptors.
|
| 893 |
+
|
| 894 |
+
This child watcher polls process file descriptors (pidfds) to await child
|
| 895 |
+
process termination. In some respects, PidfdChildWatcher is a "Goldilocks"
|
| 896 |
+
child watcher implementation. It doesn't require signals or threads, doesn't
|
| 897 |
+
interfere with any processes launched outside the event loop, and scales
|
| 898 |
+
linearly with the number of subprocesses launched by the event loop. The
|
| 899 |
+
main disadvantage is that pidfds are specific to Linux, and only work on
|
| 900 |
+
recent (5.3+) kernels.
|
| 901 |
+
"""
|
| 902 |
+
|
| 903 |
+
def __init__(self):
|
| 904 |
+
self._loop = None
|
| 905 |
+
self._callbacks = {}
|
| 906 |
+
|
| 907 |
+
def __enter__(self):
|
| 908 |
+
return self
|
| 909 |
+
|
| 910 |
+
def __exit__(self, exc_type, exc_value, exc_traceback):
|
| 911 |
+
pass
|
| 912 |
+
|
| 913 |
+
def is_active(self):
|
| 914 |
+
return self._loop is not None and self._loop.is_running()
|
| 915 |
+
|
| 916 |
+
def close(self):
|
| 917 |
+
self.attach_loop(None)
|
| 918 |
+
|
| 919 |
+
def attach_loop(self, loop):
|
| 920 |
+
if self._loop is not None and loop is None and self._callbacks:
|
| 921 |
+
warnings.warn(
|
| 922 |
+
'A loop is being detached '
|
| 923 |
+
'from a child watcher with pending handlers',
|
| 924 |
+
RuntimeWarning)
|
| 925 |
+
for pidfd, _, _ in self._callbacks.values():
|
| 926 |
+
self._loop._remove_reader(pidfd)
|
| 927 |
+
os.close(pidfd)
|
| 928 |
+
self._callbacks.clear()
|
| 929 |
+
self._loop = loop
|
| 930 |
+
|
| 931 |
+
def add_child_handler(self, pid, callback, *args):
|
| 932 |
+
existing = self._callbacks.get(pid)
|
| 933 |
+
if existing is not None:
|
| 934 |
+
self._callbacks[pid] = existing[0], callback, args
|
| 935 |
+
else:
|
| 936 |
+
pidfd = os.pidfd_open(pid)
|
| 937 |
+
self._loop._add_reader(pidfd, self._do_wait, pid)
|
| 938 |
+
self._callbacks[pid] = pidfd, callback, args
|
| 939 |
+
|
| 940 |
+
def _do_wait(self, pid):
|
| 941 |
+
pidfd, callback, args = self._callbacks.pop(pid)
|
| 942 |
+
self._loop._remove_reader(pidfd)
|
| 943 |
+
try:
|
| 944 |
+
_, status = os.waitpid(pid, 0)
|
| 945 |
+
except ChildProcessError:
|
| 946 |
+
# The child process is already reaped
|
| 947 |
+
# (may happen if waitpid() is called elsewhere).
|
| 948 |
+
returncode = 255
|
| 949 |
+
logger.warning(
|
| 950 |
+
"child process pid %d exit status already read: "
|
| 951 |
+
" will report returncode 255",
|
| 952 |
+
pid)
|
| 953 |
+
else:
|
| 954 |
+
returncode = waitstatus_to_exitcode(status)
|
| 955 |
+
|
| 956 |
+
os.close(pidfd)
|
| 957 |
+
callback(pid, returncode, *args)
|
| 958 |
+
|
| 959 |
+
def remove_child_handler(self, pid):
|
| 960 |
+
try:
|
| 961 |
+
pidfd, _, _ = self._callbacks.pop(pid)
|
| 962 |
+
except KeyError:
|
| 963 |
+
return False
|
| 964 |
+
self._loop._remove_reader(pidfd)
|
| 965 |
+
os.close(pidfd)
|
| 966 |
+
return True
|
| 967 |
+
|
| 968 |
+
|
| 969 |
+
class BaseChildWatcher(AbstractChildWatcher):
|
| 970 |
+
|
| 971 |
+
def __init__(self):
|
| 972 |
+
self._loop = None
|
| 973 |
+
self._callbacks = {}
|
| 974 |
+
|
| 975 |
+
def close(self):
|
| 976 |
+
self.attach_loop(None)
|
| 977 |
+
|
| 978 |
+
def is_active(self):
|
| 979 |
+
return self._loop is not None and self._loop.is_running()
|
| 980 |
+
|
| 981 |
+
def _do_waitpid(self, expected_pid):
|
| 982 |
+
raise NotImplementedError()
|
| 983 |
+
|
| 984 |
+
def _do_waitpid_all(self):
|
| 985 |
+
raise NotImplementedError()
|
| 986 |
+
|
| 987 |
+
def attach_loop(self, loop):
|
| 988 |
+
assert loop is None or isinstance(loop, events.AbstractEventLoop)
|
| 989 |
+
|
| 990 |
+
if self._loop is not None and loop is None and self._callbacks:
|
| 991 |
+
warnings.warn(
|
| 992 |
+
'A loop is being detached '
|
| 993 |
+
'from a child watcher with pending handlers',
|
| 994 |
+
RuntimeWarning)
|
| 995 |
+
|
| 996 |
+
if self._loop is not None:
|
| 997 |
+
self._loop.remove_signal_handler(signal.SIGCHLD)
|
| 998 |
+
|
| 999 |
+
self._loop = loop
|
| 1000 |
+
if loop is not None:
|
| 1001 |
+
loop.add_signal_handler(signal.SIGCHLD, self._sig_chld)
|
| 1002 |
+
|
| 1003 |
+
# Prevent a race condition in case a child terminated
|
| 1004 |
+
# during the switch.
|
| 1005 |
+
self._do_waitpid_all()
|
| 1006 |
+
|
| 1007 |
+
def _sig_chld(self):
|
| 1008 |
+
try:
|
| 1009 |
+
self._do_waitpid_all()
|
| 1010 |
+
except (SystemExit, KeyboardInterrupt):
|
| 1011 |
+
raise
|
| 1012 |
+
except BaseException as exc:
|
| 1013 |
+
# self._loop should always be available here
|
| 1014 |
+
# as '_sig_chld' is added as a signal handler
|
| 1015 |
+
# in 'attach_loop'
|
| 1016 |
+
self._loop.call_exception_handler({
|
| 1017 |
+
'message': 'Unknown exception in SIGCHLD handler',
|
| 1018 |
+
'exception': exc,
|
| 1019 |
+
})
|
| 1020 |
+
|
| 1021 |
+
|
| 1022 |
+
class SafeChildWatcher(BaseChildWatcher):
|
| 1023 |
+
"""'Safe' child watcher implementation.
|
| 1024 |
+
|
| 1025 |
+
This implementation avoids disrupting other code spawning processes by
|
| 1026 |
+
polling explicitly each process in the SIGCHLD handler instead of calling
|
| 1027 |
+
os.waitpid(-1).
|
| 1028 |
+
|
| 1029 |
+
This is a safe solution but it has a significant overhead when handling a
|
| 1030 |
+
big number of children (O(n) each time SIGCHLD is raised)
|
| 1031 |
+
"""
|
| 1032 |
+
|
| 1033 |
+
def close(self):
|
| 1034 |
+
self._callbacks.clear()
|
| 1035 |
+
super().close()
|
| 1036 |
+
|
| 1037 |
+
def __enter__(self):
|
| 1038 |
+
return self
|
| 1039 |
+
|
| 1040 |
+
def __exit__(self, a, b, c):
|
| 1041 |
+
pass
|
| 1042 |
+
|
| 1043 |
+
def add_child_handler(self, pid, callback, *args):
|
| 1044 |
+
self._callbacks[pid] = (callback, args)
|
| 1045 |
+
|
| 1046 |
+
# Prevent a race condition in case the child is already terminated.
|
| 1047 |
+
self._do_waitpid(pid)
|
| 1048 |
+
|
| 1049 |
+
def remove_child_handler(self, pid):
|
| 1050 |
+
try:
|
| 1051 |
+
del self._callbacks[pid]
|
| 1052 |
+
return True
|
| 1053 |
+
except KeyError:
|
| 1054 |
+
return False
|
| 1055 |
+
|
| 1056 |
+
def _do_waitpid_all(self):
|
| 1057 |
+
|
| 1058 |
+
for pid in list(self._callbacks):
|
| 1059 |
+
self._do_waitpid(pid)
|
| 1060 |
+
|
| 1061 |
+
def _do_waitpid(self, expected_pid):
|
| 1062 |
+
assert expected_pid > 0
|
| 1063 |
+
|
| 1064 |
+
try:
|
| 1065 |
+
pid, status = os.waitpid(expected_pid, os.WNOHANG)
|
| 1066 |
+
except ChildProcessError:
|
| 1067 |
+
# The child process is already reaped
|
| 1068 |
+
# (may happen if waitpid() is called elsewhere).
|
| 1069 |
+
pid = expected_pid
|
| 1070 |
+
returncode = 255
|
| 1071 |
+
logger.warning(
|
| 1072 |
+
"Unknown child process pid %d, will report returncode 255",
|
| 1073 |
+
pid)
|
| 1074 |
+
else:
|
| 1075 |
+
if pid == 0:
|
| 1076 |
+
# The child process is still alive.
|
| 1077 |
+
return
|
| 1078 |
+
|
| 1079 |
+
returncode = waitstatus_to_exitcode(status)
|
| 1080 |
+
if self._loop.get_debug():
|
| 1081 |
+
logger.debug('process %s exited with returncode %s',
|
| 1082 |
+
expected_pid, returncode)
|
| 1083 |
+
|
| 1084 |
+
try:
|
| 1085 |
+
callback, args = self._callbacks.pop(pid)
|
| 1086 |
+
except KeyError: # pragma: no cover
|
| 1087 |
+
# May happen if .remove_child_handler() is called
|
| 1088 |
+
# after os.waitpid() returns.
|
| 1089 |
+
if self._loop.get_debug():
|
| 1090 |
+
logger.warning("Child watcher got an unexpected pid: %r",
|
| 1091 |
+
pid, exc_info=True)
|
| 1092 |
+
else:
|
| 1093 |
+
callback(pid, returncode, *args)
|
| 1094 |
+
|
| 1095 |
+
|
| 1096 |
+
class FastChildWatcher(BaseChildWatcher):
|
| 1097 |
+
"""'Fast' child watcher implementation.
|
| 1098 |
+
|
| 1099 |
+
This implementation reaps every terminated processes by calling
|
| 1100 |
+
os.waitpid(-1) directly, possibly breaking other code spawning processes
|
| 1101 |
+
and waiting for their termination.
|
| 1102 |
+
|
| 1103 |
+
There is no noticeable overhead when handling a big number of children
|
| 1104 |
+
(O(1) each time a child terminates).
|
| 1105 |
+
"""
|
| 1106 |
+
def __init__(self):
|
| 1107 |
+
super().__init__()
|
| 1108 |
+
self._lock = threading.Lock()
|
| 1109 |
+
self._zombies = {}
|
| 1110 |
+
self._forks = 0
|
| 1111 |
+
|
| 1112 |
+
def close(self):
|
| 1113 |
+
self._callbacks.clear()
|
| 1114 |
+
self._zombies.clear()
|
| 1115 |
+
super().close()
|
| 1116 |
+
|
| 1117 |
+
def __enter__(self):
|
| 1118 |
+
with self._lock:
|
| 1119 |
+
self._forks += 1
|
| 1120 |
+
|
| 1121 |
+
return self
|
| 1122 |
+
|
| 1123 |
+
def __exit__(self, a, b, c):
|
| 1124 |
+
with self._lock:
|
| 1125 |
+
self._forks -= 1
|
| 1126 |
+
|
| 1127 |
+
if self._forks or not self._zombies:
|
| 1128 |
+
return
|
| 1129 |
+
|
| 1130 |
+
collateral_victims = str(self._zombies)
|
| 1131 |
+
self._zombies.clear()
|
| 1132 |
+
|
| 1133 |
+
logger.warning(
|
| 1134 |
+
"Caught subprocesses termination from unknown pids: %s",
|
| 1135 |
+
collateral_victims)
|
| 1136 |
+
|
| 1137 |
+
def add_child_handler(self, pid, callback, *args):
|
| 1138 |
+
assert self._forks, "Must use the context manager"
|
| 1139 |
+
|
| 1140 |
+
with self._lock:
|
| 1141 |
+
try:
|
| 1142 |
+
returncode = self._zombies.pop(pid)
|
| 1143 |
+
except KeyError:
|
| 1144 |
+
# The child is running.
|
| 1145 |
+
self._callbacks[pid] = callback, args
|
| 1146 |
+
return
|
| 1147 |
+
|
| 1148 |
+
# The child is dead already. We can fire the callback.
|
| 1149 |
+
callback(pid, returncode, *args)
|
| 1150 |
+
|
| 1151 |
+
def remove_child_handler(self, pid):
|
| 1152 |
+
try:
|
| 1153 |
+
del self._callbacks[pid]
|
| 1154 |
+
return True
|
| 1155 |
+
except KeyError:
|
| 1156 |
+
return False
|
| 1157 |
+
|
| 1158 |
+
def _do_waitpid_all(self):
|
| 1159 |
+
# Because of signal coalescing, we must keep calling waitpid() as
|
| 1160 |
+
# long as we're able to reap a child.
|
| 1161 |
+
while True:
|
| 1162 |
+
try:
|
| 1163 |
+
pid, status = os.waitpid(-1, os.WNOHANG)
|
| 1164 |
+
except ChildProcessError:
|
| 1165 |
+
# No more child processes exist.
|
| 1166 |
+
return
|
| 1167 |
+
else:
|
| 1168 |
+
if pid == 0:
|
| 1169 |
+
# A child process is still alive.
|
| 1170 |
+
return
|
| 1171 |
+
|
| 1172 |
+
returncode = waitstatus_to_exitcode(status)
|
| 1173 |
+
|
| 1174 |
+
with self._lock:
|
| 1175 |
+
try:
|
| 1176 |
+
callback, args = self._callbacks.pop(pid)
|
| 1177 |
+
except KeyError:
|
| 1178 |
+
# unknown child
|
| 1179 |
+
if self._forks:
|
| 1180 |
+
# It may not be registered yet.
|
| 1181 |
+
self._zombies[pid] = returncode
|
| 1182 |
+
if self._loop.get_debug():
|
| 1183 |
+
logger.debug('unknown process %s exited '
|
| 1184 |
+
'with returncode %s',
|
| 1185 |
+
pid, returncode)
|
| 1186 |
+
continue
|
| 1187 |
+
callback = None
|
| 1188 |
+
else:
|
| 1189 |
+
if self._loop.get_debug():
|
| 1190 |
+
logger.debug('process %s exited with returncode %s',
|
| 1191 |
+
pid, returncode)
|
| 1192 |
+
|
| 1193 |
+
if callback is None:
|
| 1194 |
+
logger.warning(
|
| 1195 |
+
"Caught subprocess termination from unknown pid: "
|
| 1196 |
+
"%d -> %d", pid, returncode)
|
| 1197 |
+
else:
|
| 1198 |
+
callback(pid, returncode, *args)
|
| 1199 |
+
|
| 1200 |
+
|
| 1201 |
+
class MultiLoopChildWatcher(AbstractChildWatcher):
|
| 1202 |
+
"""A watcher that doesn't require running loop in the main thread.
|
| 1203 |
+
|
| 1204 |
+
This implementation registers a SIGCHLD signal handler on
|
| 1205 |
+
instantiation (which may conflict with other code that
|
| 1206 |
+
install own handler for this signal).
|
| 1207 |
+
|
| 1208 |
+
The solution is safe but it has a significant overhead when
|
| 1209 |
+
handling a big number of processes (*O(n)* each time a
|
| 1210 |
+
SIGCHLD is received).
|
| 1211 |
+
"""
|
| 1212 |
+
|
| 1213 |
+
# Implementation note:
|
| 1214 |
+
# The class keeps compatibility with AbstractChildWatcher ABC
|
| 1215 |
+
# To achieve this it has empty attach_loop() method
|
| 1216 |
+
# and doesn't accept explicit loop argument
|
| 1217 |
+
# for add_child_handler()/remove_child_handler()
|
| 1218 |
+
# but retrieves the current loop by get_running_loop()
|
| 1219 |
+
|
| 1220 |
+
def __init__(self):
|
| 1221 |
+
self._callbacks = {}
|
| 1222 |
+
self._saved_sighandler = None
|
| 1223 |
+
|
| 1224 |
+
def is_active(self):
|
| 1225 |
+
return self._saved_sighandler is not None
|
| 1226 |
+
|
| 1227 |
+
def close(self):
|
| 1228 |
+
self._callbacks.clear()
|
| 1229 |
+
if self._saved_sighandler is None:
|
| 1230 |
+
return
|
| 1231 |
+
|
| 1232 |
+
handler = signal.getsignal(signal.SIGCHLD)
|
| 1233 |
+
if handler != self._sig_chld:
|
| 1234 |
+
logger.warning("SIGCHLD handler was changed by outside code")
|
| 1235 |
+
else:
|
| 1236 |
+
signal.signal(signal.SIGCHLD, self._saved_sighandler)
|
| 1237 |
+
self._saved_sighandler = None
|
| 1238 |
+
|
| 1239 |
+
def __enter__(self):
|
| 1240 |
+
return self
|
| 1241 |
+
|
| 1242 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 1243 |
+
pass
|
| 1244 |
+
|
| 1245 |
+
def add_child_handler(self, pid, callback, *args):
|
| 1246 |
+
loop = events.get_running_loop()
|
| 1247 |
+
self._callbacks[pid] = (loop, callback, args)
|
| 1248 |
+
|
| 1249 |
+
# Prevent a race condition in case the child is already terminated.
|
| 1250 |
+
self._do_waitpid(pid)
|
| 1251 |
+
|
| 1252 |
+
def remove_child_handler(self, pid):
|
| 1253 |
+
try:
|
| 1254 |
+
del self._callbacks[pid]
|
| 1255 |
+
return True
|
| 1256 |
+
except KeyError:
|
| 1257 |
+
return False
|
| 1258 |
+
|
| 1259 |
+
def attach_loop(self, loop):
|
| 1260 |
+
# Don't save the loop but initialize itself if called first time
|
| 1261 |
+
# The reason to do it here is that attach_loop() is called from
|
| 1262 |
+
# unix policy only for the main thread.
|
| 1263 |
+
# Main thread is required for subscription on SIGCHLD signal
|
| 1264 |
+
if self._saved_sighandler is not None:
|
| 1265 |
+
return
|
| 1266 |
+
|
| 1267 |
+
self._saved_sighandler = signal.signal(signal.SIGCHLD, self._sig_chld)
|
| 1268 |
+
if self._saved_sighandler is None:
|
| 1269 |
+
logger.warning("Previous SIGCHLD handler was set by non-Python code, "
|
| 1270 |
+
"restore to default handler on watcher close.")
|
| 1271 |
+
self._saved_sighandler = signal.SIG_DFL
|
| 1272 |
+
|
| 1273 |
+
# Set SA_RESTART to limit EINTR occurrences.
|
| 1274 |
+
signal.siginterrupt(signal.SIGCHLD, False)
|
| 1275 |
+
|
| 1276 |
+
def _do_waitpid_all(self):
|
| 1277 |
+
for pid in list(self._callbacks):
|
| 1278 |
+
self._do_waitpid(pid)
|
| 1279 |
+
|
| 1280 |
+
def _do_waitpid(self, expected_pid):
|
| 1281 |
+
assert expected_pid > 0
|
| 1282 |
+
|
| 1283 |
+
try:
|
| 1284 |
+
pid, status = os.waitpid(expected_pid, os.WNOHANG)
|
| 1285 |
+
except ChildProcessError:
|
| 1286 |
+
# The child process is already reaped
|
| 1287 |
+
# (may happen if waitpid() is called elsewhere).
|
| 1288 |
+
pid = expected_pid
|
| 1289 |
+
returncode = 255
|
| 1290 |
+
logger.warning(
|
| 1291 |
+
"Unknown child process pid %d, will report returncode 255",
|
| 1292 |
+
pid)
|
| 1293 |
+
debug_log = False
|
| 1294 |
+
else:
|
| 1295 |
+
if pid == 0:
|
| 1296 |
+
# The child process is still alive.
|
| 1297 |
+
return
|
| 1298 |
+
|
| 1299 |
+
returncode = waitstatus_to_exitcode(status)
|
| 1300 |
+
debug_log = True
|
| 1301 |
+
try:
|
| 1302 |
+
loop, callback, args = self._callbacks.pop(pid)
|
| 1303 |
+
except KeyError: # pragma: no cover
|
| 1304 |
+
# May happen if .remove_child_handler() is called
|
| 1305 |
+
# after os.waitpid() returns.
|
| 1306 |
+
logger.warning("Child watcher got an unexpected pid: %r",
|
| 1307 |
+
pid, exc_info=True)
|
| 1308 |
+
else:
|
| 1309 |
+
if loop.is_closed():
|
| 1310 |
+
logger.warning("Loop %r that handles pid %r is closed", loop, pid)
|
| 1311 |
+
else:
|
| 1312 |
+
if debug_log and loop.get_debug():
|
| 1313 |
+
logger.debug('process %s exited with returncode %s',
|
| 1314 |
+
expected_pid, returncode)
|
| 1315 |
+
loop.call_soon_threadsafe(callback, pid, returncode, *args)
|
| 1316 |
+
|
| 1317 |
+
def _sig_chld(self, signum, frame):
|
| 1318 |
+
try:
|
| 1319 |
+
self._do_waitpid_all()
|
| 1320 |
+
except (SystemExit, KeyboardInterrupt):
|
| 1321 |
+
raise
|
| 1322 |
+
except BaseException:
|
| 1323 |
+
logger.warning('Unknown exception in SIGCHLD handler', exc_info=True)
|
| 1324 |
+
|
| 1325 |
+
|
| 1326 |
+
class ThreadedChildWatcher(AbstractChildWatcher):
|
| 1327 |
+
"""Threaded child watcher implementation.
|
| 1328 |
+
|
| 1329 |
+
The watcher uses a thread per process
|
| 1330 |
+
for waiting for the process finish.
|
| 1331 |
+
|
| 1332 |
+
It doesn't require subscription on POSIX signal
|
| 1333 |
+
but a thread creation is not free.
|
| 1334 |
+
|
| 1335 |
+
The watcher has O(1) complexity, its performance doesn't depend
|
| 1336 |
+
on amount of spawn processes.
|
| 1337 |
+
"""
|
| 1338 |
+
|
| 1339 |
+
def __init__(self):
|
| 1340 |
+
self._pid_counter = itertools.count(0)
|
| 1341 |
+
self._threads = {}
|
| 1342 |
+
|
| 1343 |
+
def is_active(self):
|
| 1344 |
+
return True
|
| 1345 |
+
|
| 1346 |
+
def close(self):
|
| 1347 |
+
self._join_threads()
|
| 1348 |
+
|
| 1349 |
+
def _join_threads(self):
|
| 1350 |
+
"""Internal: Join all non-daemon threads"""
|
| 1351 |
+
threads = [thread for thread in list(self._threads.values())
|
| 1352 |
+
if thread.is_alive() and not thread.daemon]
|
| 1353 |
+
for thread in threads:
|
| 1354 |
+
thread.join()
|
| 1355 |
+
|
| 1356 |
+
def __enter__(self):
|
| 1357 |
+
return self
|
| 1358 |
+
|
| 1359 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 1360 |
+
pass
|
| 1361 |
+
|
| 1362 |
+
def __del__(self, _warn=warnings.warn):
|
| 1363 |
+
threads = [thread for thread in list(self._threads.values())
|
| 1364 |
+
if thread.is_alive()]
|
| 1365 |
+
if threads:
|
| 1366 |
+
_warn(f"{self.__class__} has registered but not finished child processes",
|
| 1367 |
+
ResourceWarning,
|
| 1368 |
+
source=self)
|
| 1369 |
+
|
| 1370 |
+
def add_child_handler(self, pid, callback, *args):
|
| 1371 |
+
loop = events.get_running_loop()
|
| 1372 |
+
thread = threading.Thread(target=self._do_waitpid,
|
| 1373 |
+
name=f"waitpid-{next(self._pid_counter)}",
|
| 1374 |
+
args=(loop, pid, callback, args),
|
| 1375 |
+
daemon=True)
|
| 1376 |
+
self._threads[pid] = thread
|
| 1377 |
+
thread.start()
|
| 1378 |
+
|
| 1379 |
+
def remove_child_handler(self, pid):
|
| 1380 |
+
# asyncio never calls remove_child_handler() !!!
|
| 1381 |
+
# The method is no-op but is implemented because
|
| 1382 |
+
# abstract base classes require it.
|
| 1383 |
+
return True
|
| 1384 |
+
|
| 1385 |
+
def attach_loop(self, loop):
|
| 1386 |
+
pass
|
| 1387 |
+
|
| 1388 |
+
def _do_waitpid(self, loop, expected_pid, callback, args):
|
| 1389 |
+
assert expected_pid > 0
|
| 1390 |
+
|
| 1391 |
+
try:
|
| 1392 |
+
pid, status = os.waitpid(expected_pid, 0)
|
| 1393 |
+
except ChildProcessError:
|
| 1394 |
+
# The child process is already reaped
|
| 1395 |
+
# (may happen if waitpid() is called elsewhere).
|
| 1396 |
+
pid = expected_pid
|
| 1397 |
+
returncode = 255
|
| 1398 |
+
logger.warning(
|
| 1399 |
+
"Unknown child process pid %d, will report returncode 255",
|
| 1400 |
+
pid)
|
| 1401 |
+
else:
|
| 1402 |
+
returncode = waitstatus_to_exitcode(status)
|
| 1403 |
+
if loop.get_debug():
|
| 1404 |
+
logger.debug('process %s exited with returncode %s',
|
| 1405 |
+
expected_pid, returncode)
|
| 1406 |
+
|
| 1407 |
+
if loop.is_closed():
|
| 1408 |
+
logger.warning("Loop %r that handles pid %r is closed", loop, pid)
|
| 1409 |
+
else:
|
| 1410 |
+
loop.call_soon_threadsafe(callback, pid, returncode, *args)
|
| 1411 |
+
|
| 1412 |
+
self._threads.pop(expected_pid)
|
| 1413 |
+
|
| 1414 |
+
|
| 1415 |
+
class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
|
| 1416 |
+
"""UNIX event loop policy with a watcher for child processes."""
|
| 1417 |
+
_loop_factory = _UnixSelectorEventLoop
|
| 1418 |
+
|
| 1419 |
+
def __init__(self):
|
| 1420 |
+
super().__init__()
|
| 1421 |
+
self._watcher = None
|
| 1422 |
+
|
| 1423 |
+
def _init_watcher(self):
|
| 1424 |
+
with events._lock:
|
| 1425 |
+
if self._watcher is None: # pragma: no branch
|
| 1426 |
+
self._watcher = ThreadedChildWatcher()
|
| 1427 |
+
if threading.current_thread() is threading.main_thread():
|
| 1428 |
+
self._watcher.attach_loop(self._local._loop)
|
| 1429 |
+
|
| 1430 |
+
def set_event_loop(self, loop):
|
| 1431 |
+
"""Set the event loop.
|
| 1432 |
+
|
| 1433 |
+
As a side effect, if a child watcher was set before, then calling
|
| 1434 |
+
.set_event_loop() from the main thread will call .attach_loop(loop) on
|
| 1435 |
+
the child watcher.
|
| 1436 |
+
"""
|
| 1437 |
+
|
| 1438 |
+
super().set_event_loop(loop)
|
| 1439 |
+
|
| 1440 |
+
if (self._watcher is not None and
|
| 1441 |
+
threading.current_thread() is threading.main_thread()):
|
| 1442 |
+
self._watcher.attach_loop(loop)
|
| 1443 |
+
|
| 1444 |
+
def get_child_watcher(self):
|
| 1445 |
+
"""Get the watcher for child processes.
|
| 1446 |
+
|
| 1447 |
+
If not yet set, a ThreadedChildWatcher object is automatically created.
|
| 1448 |
+
"""
|
| 1449 |
+
if self._watcher is None:
|
| 1450 |
+
self._init_watcher()
|
| 1451 |
+
|
| 1452 |
+
return self._watcher
|
| 1453 |
+
|
| 1454 |
+
def set_child_watcher(self, watcher):
|
| 1455 |
+
"""Set the watcher for child processes."""
|
| 1456 |
+
|
| 1457 |
+
assert watcher is None or isinstance(watcher, AbstractChildWatcher)
|
| 1458 |
+
|
| 1459 |
+
if self._watcher is not None:
|
| 1460 |
+
self._watcher.close()
|
| 1461 |
+
|
| 1462 |
+
self._watcher = watcher
|
| 1463 |
+
|
| 1464 |
+
|
| 1465 |
+
SelectorEventLoop = _UnixSelectorEventLoop
|
| 1466 |
+
DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy
|
omnilmm/lib/python3.10/asyncio/windows_events.py
ADDED
|
@@ -0,0 +1,924 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Selector and proactor event loops for Windows."""
|
| 2 |
+
|
| 3 |
+
import sys
|
| 4 |
+
|
| 5 |
+
if sys.platform != 'win32': # pragma: no cover
|
| 6 |
+
raise ImportError('win32 only')
|
| 7 |
+
|
| 8 |
+
import _overlapped
|
| 9 |
+
import _winapi
|
| 10 |
+
import errno
|
| 11 |
+
import math
|
| 12 |
+
import msvcrt
|
| 13 |
+
import socket
|
| 14 |
+
import struct
|
| 15 |
+
import time
|
| 16 |
+
import weakref
|
| 17 |
+
|
| 18 |
+
from . import events
|
| 19 |
+
from . import base_subprocess
|
| 20 |
+
from . import futures
|
| 21 |
+
from . import exceptions
|
| 22 |
+
from . import proactor_events
|
| 23 |
+
from . import selector_events
|
| 24 |
+
from . import tasks
|
| 25 |
+
from . import windows_utils
|
| 26 |
+
from .log import logger
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
__all__ = (
|
| 30 |
+
'SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor',
|
| 31 |
+
'DefaultEventLoopPolicy', 'WindowsSelectorEventLoopPolicy',
|
| 32 |
+
'WindowsProactorEventLoopPolicy',
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
NULL = 0
|
| 37 |
+
INFINITE = 0xffffffff
|
| 38 |
+
ERROR_CONNECTION_REFUSED = 1225
|
| 39 |
+
ERROR_CONNECTION_ABORTED = 1236
|
| 40 |
+
|
| 41 |
+
# Initial delay in seconds for connect_pipe() before retrying to connect
|
| 42 |
+
CONNECT_PIPE_INIT_DELAY = 0.001
|
| 43 |
+
|
| 44 |
+
# Maximum delay in seconds for connect_pipe() before retrying to connect
|
| 45 |
+
CONNECT_PIPE_MAX_DELAY = 0.100
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class _OverlappedFuture(futures.Future):
|
| 49 |
+
"""Subclass of Future which represents an overlapped operation.
|
| 50 |
+
|
| 51 |
+
Cancelling it will immediately cancel the overlapped operation.
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
def __init__(self, ov, *, loop=None):
|
| 55 |
+
super().__init__(loop=loop)
|
| 56 |
+
if self._source_traceback:
|
| 57 |
+
del self._source_traceback[-1]
|
| 58 |
+
self._ov = ov
|
| 59 |
+
|
| 60 |
+
def _repr_info(self):
|
| 61 |
+
info = super()._repr_info()
|
| 62 |
+
if self._ov is not None:
|
| 63 |
+
state = 'pending' if self._ov.pending else 'completed'
|
| 64 |
+
info.insert(1, f'overlapped=<{state}, {self._ov.address:#x}>')
|
| 65 |
+
return info
|
| 66 |
+
|
| 67 |
+
def _cancel_overlapped(self):
|
| 68 |
+
if self._ov is None:
|
| 69 |
+
return
|
| 70 |
+
try:
|
| 71 |
+
self._ov.cancel()
|
| 72 |
+
except OSError as exc:
|
| 73 |
+
context = {
|
| 74 |
+
'message': 'Cancelling an overlapped future failed',
|
| 75 |
+
'exception': exc,
|
| 76 |
+
'future': self,
|
| 77 |
+
}
|
| 78 |
+
if self._source_traceback:
|
| 79 |
+
context['source_traceback'] = self._source_traceback
|
| 80 |
+
self._loop.call_exception_handler(context)
|
| 81 |
+
self._ov = None
|
| 82 |
+
|
| 83 |
+
def cancel(self, msg=None):
|
| 84 |
+
self._cancel_overlapped()
|
| 85 |
+
return super().cancel(msg=msg)
|
| 86 |
+
|
| 87 |
+
def set_exception(self, exception):
|
| 88 |
+
super().set_exception(exception)
|
| 89 |
+
self._cancel_overlapped()
|
| 90 |
+
|
| 91 |
+
def set_result(self, result):
|
| 92 |
+
super().set_result(result)
|
| 93 |
+
self._ov = None
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class _BaseWaitHandleFuture(futures.Future):
|
| 97 |
+
"""Subclass of Future which represents a wait handle."""
|
| 98 |
+
|
| 99 |
+
def __init__(self, ov, handle, wait_handle, *, loop=None):
|
| 100 |
+
super().__init__(loop=loop)
|
| 101 |
+
if self._source_traceback:
|
| 102 |
+
del self._source_traceback[-1]
|
| 103 |
+
# Keep a reference to the Overlapped object to keep it alive until the
|
| 104 |
+
# wait is unregistered
|
| 105 |
+
self._ov = ov
|
| 106 |
+
self._handle = handle
|
| 107 |
+
self._wait_handle = wait_handle
|
| 108 |
+
|
| 109 |
+
# Should we call UnregisterWaitEx() if the wait completes
|
| 110 |
+
# or is cancelled?
|
| 111 |
+
self._registered = True
|
| 112 |
+
|
| 113 |
+
def _poll(self):
|
| 114 |
+
# non-blocking wait: use a timeout of 0 millisecond
|
| 115 |
+
return (_winapi.WaitForSingleObject(self._handle, 0) ==
|
| 116 |
+
_winapi.WAIT_OBJECT_0)
|
| 117 |
+
|
| 118 |
+
def _repr_info(self):
|
| 119 |
+
info = super()._repr_info()
|
| 120 |
+
info.append(f'handle={self._handle:#x}')
|
| 121 |
+
if self._handle is not None:
|
| 122 |
+
state = 'signaled' if self._poll() else 'waiting'
|
| 123 |
+
info.append(state)
|
| 124 |
+
if self._wait_handle is not None:
|
| 125 |
+
info.append(f'wait_handle={self._wait_handle:#x}')
|
| 126 |
+
return info
|
| 127 |
+
|
| 128 |
+
def _unregister_wait_cb(self, fut):
|
| 129 |
+
# The wait was unregistered: it's not safe to destroy the Overlapped
|
| 130 |
+
# object
|
| 131 |
+
self._ov = None
|
| 132 |
+
|
| 133 |
+
def _unregister_wait(self):
|
| 134 |
+
if not self._registered:
|
| 135 |
+
return
|
| 136 |
+
self._registered = False
|
| 137 |
+
|
| 138 |
+
wait_handle = self._wait_handle
|
| 139 |
+
self._wait_handle = None
|
| 140 |
+
try:
|
| 141 |
+
_overlapped.UnregisterWait(wait_handle)
|
| 142 |
+
except OSError as exc:
|
| 143 |
+
if exc.winerror != _overlapped.ERROR_IO_PENDING:
|
| 144 |
+
context = {
|
| 145 |
+
'message': 'Failed to unregister the wait handle',
|
| 146 |
+
'exception': exc,
|
| 147 |
+
'future': self,
|
| 148 |
+
}
|
| 149 |
+
if self._source_traceback:
|
| 150 |
+
context['source_traceback'] = self._source_traceback
|
| 151 |
+
self._loop.call_exception_handler(context)
|
| 152 |
+
return
|
| 153 |
+
# ERROR_IO_PENDING means that the unregister is pending
|
| 154 |
+
|
| 155 |
+
self._unregister_wait_cb(None)
|
| 156 |
+
|
| 157 |
+
def cancel(self, msg=None):
|
| 158 |
+
self._unregister_wait()
|
| 159 |
+
return super().cancel(msg=msg)
|
| 160 |
+
|
| 161 |
+
def set_exception(self, exception):
|
| 162 |
+
self._unregister_wait()
|
| 163 |
+
super().set_exception(exception)
|
| 164 |
+
|
| 165 |
+
def set_result(self, result):
|
| 166 |
+
self._unregister_wait()
|
| 167 |
+
super().set_result(result)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
class _WaitCancelFuture(_BaseWaitHandleFuture):
|
| 171 |
+
"""Subclass of Future which represents a wait for the cancellation of a
|
| 172 |
+
_WaitHandleFuture using an event.
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
def __init__(self, ov, event, wait_handle, *, loop=None):
|
| 176 |
+
super().__init__(ov, event, wait_handle, loop=loop)
|
| 177 |
+
|
| 178 |
+
self._done_callback = None
|
| 179 |
+
|
| 180 |
+
def cancel(self):
|
| 181 |
+
raise RuntimeError("_WaitCancelFuture must not be cancelled")
|
| 182 |
+
|
| 183 |
+
def set_result(self, result):
|
| 184 |
+
super().set_result(result)
|
| 185 |
+
if self._done_callback is not None:
|
| 186 |
+
self._done_callback(self)
|
| 187 |
+
|
| 188 |
+
def set_exception(self, exception):
|
| 189 |
+
super().set_exception(exception)
|
| 190 |
+
if self._done_callback is not None:
|
| 191 |
+
self._done_callback(self)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
class _WaitHandleFuture(_BaseWaitHandleFuture):
|
| 195 |
+
def __init__(self, ov, handle, wait_handle, proactor, *, loop=None):
|
| 196 |
+
super().__init__(ov, handle, wait_handle, loop=loop)
|
| 197 |
+
self._proactor = proactor
|
| 198 |
+
self._unregister_proactor = True
|
| 199 |
+
self._event = _overlapped.CreateEvent(None, True, False, None)
|
| 200 |
+
self._event_fut = None
|
| 201 |
+
|
| 202 |
+
def _unregister_wait_cb(self, fut):
|
| 203 |
+
if self._event is not None:
|
| 204 |
+
_winapi.CloseHandle(self._event)
|
| 205 |
+
self._event = None
|
| 206 |
+
self._event_fut = None
|
| 207 |
+
|
| 208 |
+
# If the wait was cancelled, the wait may never be signalled, so
|
| 209 |
+
# it's required to unregister it. Otherwise, IocpProactor.close() will
|
| 210 |
+
# wait forever for an event which will never come.
|
| 211 |
+
#
|
| 212 |
+
# If the IocpProactor already received the event, it's safe to call
|
| 213 |
+
# _unregister() because we kept a reference to the Overlapped object
|
| 214 |
+
# which is used as a unique key.
|
| 215 |
+
self._proactor._unregister(self._ov)
|
| 216 |
+
self._proactor = None
|
| 217 |
+
|
| 218 |
+
super()._unregister_wait_cb(fut)
|
| 219 |
+
|
| 220 |
+
def _unregister_wait(self):
|
| 221 |
+
if not self._registered:
|
| 222 |
+
return
|
| 223 |
+
self._registered = False
|
| 224 |
+
|
| 225 |
+
wait_handle = self._wait_handle
|
| 226 |
+
self._wait_handle = None
|
| 227 |
+
try:
|
| 228 |
+
_overlapped.UnregisterWaitEx(wait_handle, self._event)
|
| 229 |
+
except OSError as exc:
|
| 230 |
+
if exc.winerror != _overlapped.ERROR_IO_PENDING:
|
| 231 |
+
context = {
|
| 232 |
+
'message': 'Failed to unregister the wait handle',
|
| 233 |
+
'exception': exc,
|
| 234 |
+
'future': self,
|
| 235 |
+
}
|
| 236 |
+
if self._source_traceback:
|
| 237 |
+
context['source_traceback'] = self._source_traceback
|
| 238 |
+
self._loop.call_exception_handler(context)
|
| 239 |
+
return
|
| 240 |
+
# ERROR_IO_PENDING is not an error, the wait was unregistered
|
| 241 |
+
|
| 242 |
+
self._event_fut = self._proactor._wait_cancel(self._event,
|
| 243 |
+
self._unregister_wait_cb)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
class PipeServer(object):
|
| 247 |
+
"""Class representing a pipe server.
|
| 248 |
+
|
| 249 |
+
This is much like a bound, listening socket.
|
| 250 |
+
"""
|
| 251 |
+
def __init__(self, address):
|
| 252 |
+
self._address = address
|
| 253 |
+
self._free_instances = weakref.WeakSet()
|
| 254 |
+
# initialize the pipe attribute before calling _server_pipe_handle()
|
| 255 |
+
# because this function can raise an exception and the destructor calls
|
| 256 |
+
# the close() method
|
| 257 |
+
self._pipe = None
|
| 258 |
+
self._accept_pipe_future = None
|
| 259 |
+
self._pipe = self._server_pipe_handle(True)
|
| 260 |
+
|
| 261 |
+
def _get_unconnected_pipe(self):
|
| 262 |
+
# Create new instance and return previous one. This ensures
|
| 263 |
+
# that (until the server is closed) there is always at least
|
| 264 |
+
# one pipe handle for address. Therefore if a client attempt
|
| 265 |
+
# to connect it will not fail with FileNotFoundError.
|
| 266 |
+
tmp, self._pipe = self._pipe, self._server_pipe_handle(False)
|
| 267 |
+
return tmp
|
| 268 |
+
|
| 269 |
+
def _server_pipe_handle(self, first):
|
| 270 |
+
# Return a wrapper for a new pipe handle.
|
| 271 |
+
if self.closed():
|
| 272 |
+
return None
|
| 273 |
+
flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
|
| 274 |
+
if first:
|
| 275 |
+
flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
|
| 276 |
+
h = _winapi.CreateNamedPipe(
|
| 277 |
+
self._address, flags,
|
| 278 |
+
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
|
| 279 |
+
_winapi.PIPE_WAIT,
|
| 280 |
+
_winapi.PIPE_UNLIMITED_INSTANCES,
|
| 281 |
+
windows_utils.BUFSIZE, windows_utils.BUFSIZE,
|
| 282 |
+
_winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
|
| 283 |
+
pipe = windows_utils.PipeHandle(h)
|
| 284 |
+
self._free_instances.add(pipe)
|
| 285 |
+
return pipe
|
| 286 |
+
|
| 287 |
+
def closed(self):
|
| 288 |
+
return (self._address is None)
|
| 289 |
+
|
| 290 |
+
def close(self):
|
| 291 |
+
if self._accept_pipe_future is not None:
|
| 292 |
+
self._accept_pipe_future.cancel()
|
| 293 |
+
self._accept_pipe_future = None
|
| 294 |
+
# Close all instances which have not been connected to by a client.
|
| 295 |
+
if self._address is not None:
|
| 296 |
+
for pipe in self._free_instances:
|
| 297 |
+
pipe.close()
|
| 298 |
+
self._pipe = None
|
| 299 |
+
self._address = None
|
| 300 |
+
self._free_instances.clear()
|
| 301 |
+
|
| 302 |
+
__del__ = close
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
class _WindowsSelectorEventLoop(selector_events.BaseSelectorEventLoop):
|
| 306 |
+
"""Windows version of selector event loop."""
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
|
| 310 |
+
"""Windows version of proactor event loop using IOCP."""
|
| 311 |
+
|
| 312 |
+
def __init__(self, proactor=None):
|
| 313 |
+
if proactor is None:
|
| 314 |
+
proactor = IocpProactor()
|
| 315 |
+
super().__init__(proactor)
|
| 316 |
+
|
| 317 |
+
def run_forever(self):
|
| 318 |
+
try:
|
| 319 |
+
assert self._self_reading_future is None
|
| 320 |
+
self.call_soon(self._loop_self_reading)
|
| 321 |
+
super().run_forever()
|
| 322 |
+
finally:
|
| 323 |
+
if self._self_reading_future is not None:
|
| 324 |
+
ov = self._self_reading_future._ov
|
| 325 |
+
self._self_reading_future.cancel()
|
| 326 |
+
# self_reading_future always uses IOCP, so even though it's
|
| 327 |
+
# been cancelled, we need to make sure that the IOCP message
|
| 328 |
+
# is received so that the kernel is not holding on to the
|
| 329 |
+
# memory, possibly causing memory corruption later. Only
|
| 330 |
+
# unregister it if IO is complete in all respects. Otherwise
|
| 331 |
+
# we need another _poll() later to complete the IO.
|
| 332 |
+
if ov is not None and not ov.pending:
|
| 333 |
+
self._proactor._unregister(ov)
|
| 334 |
+
self._self_reading_future = None
|
| 335 |
+
|
| 336 |
+
async def create_pipe_connection(self, protocol_factory, address):
|
| 337 |
+
f = self._proactor.connect_pipe(address)
|
| 338 |
+
pipe = await f
|
| 339 |
+
protocol = protocol_factory()
|
| 340 |
+
trans = self._make_duplex_pipe_transport(pipe, protocol,
|
| 341 |
+
extra={'addr': address})
|
| 342 |
+
return trans, protocol
|
| 343 |
+
|
| 344 |
+
async def start_serving_pipe(self, protocol_factory, address):
|
| 345 |
+
server = PipeServer(address)
|
| 346 |
+
|
| 347 |
+
def loop_accept_pipe(f=None):
|
| 348 |
+
pipe = None
|
| 349 |
+
try:
|
| 350 |
+
if f:
|
| 351 |
+
pipe = f.result()
|
| 352 |
+
server._free_instances.discard(pipe)
|
| 353 |
+
|
| 354 |
+
if server.closed():
|
| 355 |
+
# A client connected before the server was closed:
|
| 356 |
+
# drop the client (close the pipe) and exit
|
| 357 |
+
pipe.close()
|
| 358 |
+
return
|
| 359 |
+
|
| 360 |
+
protocol = protocol_factory()
|
| 361 |
+
self._make_duplex_pipe_transport(
|
| 362 |
+
pipe, protocol, extra={'addr': address})
|
| 363 |
+
|
| 364 |
+
pipe = server._get_unconnected_pipe()
|
| 365 |
+
if pipe is None:
|
| 366 |
+
return
|
| 367 |
+
|
| 368 |
+
f = self._proactor.accept_pipe(pipe)
|
| 369 |
+
except BrokenPipeError:
|
| 370 |
+
if pipe and pipe.fileno() != -1:
|
| 371 |
+
pipe.close()
|
| 372 |
+
self.call_soon(loop_accept_pipe)
|
| 373 |
+
except OSError as exc:
|
| 374 |
+
if pipe and pipe.fileno() != -1:
|
| 375 |
+
self.call_exception_handler({
|
| 376 |
+
'message': 'Pipe accept failed',
|
| 377 |
+
'exception': exc,
|
| 378 |
+
'pipe': pipe,
|
| 379 |
+
})
|
| 380 |
+
pipe.close()
|
| 381 |
+
elif self._debug:
|
| 382 |
+
logger.warning("Accept pipe failed on pipe %r",
|
| 383 |
+
pipe, exc_info=True)
|
| 384 |
+
self.call_soon(loop_accept_pipe)
|
| 385 |
+
except exceptions.CancelledError:
|
| 386 |
+
if pipe:
|
| 387 |
+
pipe.close()
|
| 388 |
+
else:
|
| 389 |
+
server._accept_pipe_future = f
|
| 390 |
+
f.add_done_callback(loop_accept_pipe)
|
| 391 |
+
|
| 392 |
+
self.call_soon(loop_accept_pipe)
|
| 393 |
+
return [server]
|
| 394 |
+
|
| 395 |
+
async def _make_subprocess_transport(self, protocol, args, shell,
|
| 396 |
+
stdin, stdout, stderr, bufsize,
|
| 397 |
+
extra=None, **kwargs):
|
| 398 |
+
waiter = self.create_future()
|
| 399 |
+
transp = _WindowsSubprocessTransport(self, protocol, args, shell,
|
| 400 |
+
stdin, stdout, stderr, bufsize,
|
| 401 |
+
waiter=waiter, extra=extra,
|
| 402 |
+
**kwargs)
|
| 403 |
+
try:
|
| 404 |
+
await waiter
|
| 405 |
+
except (SystemExit, KeyboardInterrupt):
|
| 406 |
+
raise
|
| 407 |
+
except BaseException:
|
| 408 |
+
transp.close()
|
| 409 |
+
await transp._wait()
|
| 410 |
+
raise
|
| 411 |
+
|
| 412 |
+
return transp
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
class IocpProactor:
|
| 416 |
+
"""Proactor implementation using IOCP."""
|
| 417 |
+
|
| 418 |
+
def __init__(self, concurrency=0xffffffff):
|
| 419 |
+
self._loop = None
|
| 420 |
+
self._results = []
|
| 421 |
+
self._iocp = _overlapped.CreateIoCompletionPort(
|
| 422 |
+
_overlapped.INVALID_HANDLE_VALUE, NULL, 0, concurrency)
|
| 423 |
+
self._cache = {}
|
| 424 |
+
self._registered = weakref.WeakSet()
|
| 425 |
+
self._unregistered = []
|
| 426 |
+
self._stopped_serving = weakref.WeakSet()
|
| 427 |
+
|
| 428 |
+
def _check_closed(self):
|
| 429 |
+
if self._iocp is None:
|
| 430 |
+
raise RuntimeError('IocpProactor is closed')
|
| 431 |
+
|
| 432 |
+
def __repr__(self):
|
| 433 |
+
info = ['overlapped#=%s' % len(self._cache),
|
| 434 |
+
'result#=%s' % len(self._results)]
|
| 435 |
+
if self._iocp is None:
|
| 436 |
+
info.append('closed')
|
| 437 |
+
return '<%s %s>' % (self.__class__.__name__, " ".join(info))
|
| 438 |
+
|
| 439 |
+
def set_loop(self, loop):
|
| 440 |
+
self._loop = loop
|
| 441 |
+
|
| 442 |
+
def select(self, timeout=None):
|
| 443 |
+
if not self._results:
|
| 444 |
+
self._poll(timeout)
|
| 445 |
+
tmp = self._results
|
| 446 |
+
self._results = []
|
| 447 |
+
try:
|
| 448 |
+
return tmp
|
| 449 |
+
finally:
|
| 450 |
+
# Needed to break cycles when an exception occurs.
|
| 451 |
+
tmp = None
|
| 452 |
+
|
| 453 |
+
def _result(self, value):
|
| 454 |
+
fut = self._loop.create_future()
|
| 455 |
+
fut.set_result(value)
|
| 456 |
+
return fut
|
| 457 |
+
|
| 458 |
+
def recv(self, conn, nbytes, flags=0):
|
| 459 |
+
self._register_with_iocp(conn)
|
| 460 |
+
ov = _overlapped.Overlapped(NULL)
|
| 461 |
+
try:
|
| 462 |
+
if isinstance(conn, socket.socket):
|
| 463 |
+
ov.WSARecv(conn.fileno(), nbytes, flags)
|
| 464 |
+
else:
|
| 465 |
+
ov.ReadFile(conn.fileno(), nbytes)
|
| 466 |
+
except BrokenPipeError:
|
| 467 |
+
return self._result(b'')
|
| 468 |
+
|
| 469 |
+
def finish_recv(trans, key, ov):
|
| 470 |
+
try:
|
| 471 |
+
return ov.getresult()
|
| 472 |
+
except OSError as exc:
|
| 473 |
+
if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
|
| 474 |
+
_overlapped.ERROR_OPERATION_ABORTED):
|
| 475 |
+
raise ConnectionResetError(*exc.args)
|
| 476 |
+
else:
|
| 477 |
+
raise
|
| 478 |
+
|
| 479 |
+
return self._register(ov, conn, finish_recv)
|
| 480 |
+
|
| 481 |
+
def recv_into(self, conn, buf, flags=0):
|
| 482 |
+
self._register_with_iocp(conn)
|
| 483 |
+
ov = _overlapped.Overlapped(NULL)
|
| 484 |
+
try:
|
| 485 |
+
if isinstance(conn, socket.socket):
|
| 486 |
+
ov.WSARecvInto(conn.fileno(), buf, flags)
|
| 487 |
+
else:
|
| 488 |
+
ov.ReadFileInto(conn.fileno(), buf)
|
| 489 |
+
except BrokenPipeError:
|
| 490 |
+
return self._result(0)
|
| 491 |
+
|
| 492 |
+
def finish_recv(trans, key, ov):
|
| 493 |
+
try:
|
| 494 |
+
return ov.getresult()
|
| 495 |
+
except OSError as exc:
|
| 496 |
+
if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
|
| 497 |
+
_overlapped.ERROR_OPERATION_ABORTED):
|
| 498 |
+
raise ConnectionResetError(*exc.args)
|
| 499 |
+
else:
|
| 500 |
+
raise
|
| 501 |
+
|
| 502 |
+
return self._register(ov, conn, finish_recv)
|
| 503 |
+
|
| 504 |
+
def recvfrom(self, conn, nbytes, flags=0):
|
| 505 |
+
self._register_with_iocp(conn)
|
| 506 |
+
ov = _overlapped.Overlapped(NULL)
|
| 507 |
+
try:
|
| 508 |
+
ov.WSARecvFrom(conn.fileno(), nbytes, flags)
|
| 509 |
+
except BrokenPipeError:
|
| 510 |
+
return self._result((b'', None))
|
| 511 |
+
|
| 512 |
+
def finish_recv(trans, key, ov):
|
| 513 |
+
try:
|
| 514 |
+
return ov.getresult()
|
| 515 |
+
except OSError as exc:
|
| 516 |
+
if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
|
| 517 |
+
_overlapped.ERROR_OPERATION_ABORTED):
|
| 518 |
+
raise ConnectionResetError(*exc.args)
|
| 519 |
+
else:
|
| 520 |
+
raise
|
| 521 |
+
|
| 522 |
+
return self._register(ov, conn, finish_recv)
|
| 523 |
+
|
| 524 |
+
def sendto(self, conn, buf, flags=0, addr=None):
|
| 525 |
+
self._register_with_iocp(conn)
|
| 526 |
+
ov = _overlapped.Overlapped(NULL)
|
| 527 |
+
|
| 528 |
+
ov.WSASendTo(conn.fileno(), buf, flags, addr)
|
| 529 |
+
|
| 530 |
+
def finish_send(trans, key, ov):
|
| 531 |
+
try:
|
| 532 |
+
return ov.getresult()
|
| 533 |
+
except OSError as exc:
|
| 534 |
+
if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
|
| 535 |
+
_overlapped.ERROR_OPERATION_ABORTED):
|
| 536 |
+
raise ConnectionResetError(*exc.args)
|
| 537 |
+
else:
|
| 538 |
+
raise
|
| 539 |
+
|
| 540 |
+
return self._register(ov, conn, finish_send)
|
| 541 |
+
|
| 542 |
+
def send(self, conn, buf, flags=0):
|
| 543 |
+
self._register_with_iocp(conn)
|
| 544 |
+
ov = _overlapped.Overlapped(NULL)
|
| 545 |
+
if isinstance(conn, socket.socket):
|
| 546 |
+
ov.WSASend(conn.fileno(), buf, flags)
|
| 547 |
+
else:
|
| 548 |
+
ov.WriteFile(conn.fileno(), buf)
|
| 549 |
+
|
| 550 |
+
def finish_send(trans, key, ov):
|
| 551 |
+
try:
|
| 552 |
+
return ov.getresult()
|
| 553 |
+
except OSError as exc:
|
| 554 |
+
if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
|
| 555 |
+
_overlapped.ERROR_OPERATION_ABORTED):
|
| 556 |
+
raise ConnectionResetError(*exc.args)
|
| 557 |
+
else:
|
| 558 |
+
raise
|
| 559 |
+
|
| 560 |
+
return self._register(ov, conn, finish_send)
|
| 561 |
+
|
| 562 |
+
def accept(self, listener):
|
| 563 |
+
self._register_with_iocp(listener)
|
| 564 |
+
conn = self._get_accept_socket(listener.family)
|
| 565 |
+
ov = _overlapped.Overlapped(NULL)
|
| 566 |
+
ov.AcceptEx(listener.fileno(), conn.fileno())
|
| 567 |
+
|
| 568 |
+
def finish_accept(trans, key, ov):
|
| 569 |
+
ov.getresult()
|
| 570 |
+
# Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work.
|
| 571 |
+
buf = struct.pack('@P', listener.fileno())
|
| 572 |
+
conn.setsockopt(socket.SOL_SOCKET,
|
| 573 |
+
_overlapped.SO_UPDATE_ACCEPT_CONTEXT, buf)
|
| 574 |
+
conn.settimeout(listener.gettimeout())
|
| 575 |
+
return conn, conn.getpeername()
|
| 576 |
+
|
| 577 |
+
async def accept_coro(future, conn):
|
| 578 |
+
# Coroutine closing the accept socket if the future is cancelled
|
| 579 |
+
try:
|
| 580 |
+
await future
|
| 581 |
+
except exceptions.CancelledError:
|
| 582 |
+
conn.close()
|
| 583 |
+
raise
|
| 584 |
+
|
| 585 |
+
future = self._register(ov, listener, finish_accept)
|
| 586 |
+
coro = accept_coro(future, conn)
|
| 587 |
+
tasks.ensure_future(coro, loop=self._loop)
|
| 588 |
+
return future
|
| 589 |
+
|
| 590 |
+
def connect(self, conn, address):
|
| 591 |
+
if conn.type == socket.SOCK_DGRAM:
|
| 592 |
+
# WSAConnect will complete immediately for UDP sockets so we don't
|
| 593 |
+
# need to register any IOCP operation
|
| 594 |
+
_overlapped.WSAConnect(conn.fileno(), address)
|
| 595 |
+
fut = self._loop.create_future()
|
| 596 |
+
fut.set_result(None)
|
| 597 |
+
return fut
|
| 598 |
+
|
| 599 |
+
self._register_with_iocp(conn)
|
| 600 |
+
# The socket needs to be locally bound before we call ConnectEx().
|
| 601 |
+
try:
|
| 602 |
+
_overlapped.BindLocal(conn.fileno(), conn.family)
|
| 603 |
+
except OSError as e:
|
| 604 |
+
if e.winerror != errno.WSAEINVAL:
|
| 605 |
+
raise
|
| 606 |
+
# Probably already locally bound; check using getsockname().
|
| 607 |
+
if conn.getsockname()[1] == 0:
|
| 608 |
+
raise
|
| 609 |
+
ov = _overlapped.Overlapped(NULL)
|
| 610 |
+
ov.ConnectEx(conn.fileno(), address)
|
| 611 |
+
|
| 612 |
+
def finish_connect(trans, key, ov):
|
| 613 |
+
ov.getresult()
|
| 614 |
+
# Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work.
|
| 615 |
+
conn.setsockopt(socket.SOL_SOCKET,
|
| 616 |
+
_overlapped.SO_UPDATE_CONNECT_CONTEXT, 0)
|
| 617 |
+
return conn
|
| 618 |
+
|
| 619 |
+
return self._register(ov, conn, finish_connect)
|
| 620 |
+
|
| 621 |
+
def sendfile(self, sock, file, offset, count):
|
| 622 |
+
self._register_with_iocp(sock)
|
| 623 |
+
ov = _overlapped.Overlapped(NULL)
|
| 624 |
+
offset_low = offset & 0xffff_ffff
|
| 625 |
+
offset_high = (offset >> 32) & 0xffff_ffff
|
| 626 |
+
ov.TransmitFile(sock.fileno(),
|
| 627 |
+
msvcrt.get_osfhandle(file.fileno()),
|
| 628 |
+
offset_low, offset_high,
|
| 629 |
+
count, 0, 0)
|
| 630 |
+
|
| 631 |
+
def finish_sendfile(trans, key, ov):
|
| 632 |
+
try:
|
| 633 |
+
return ov.getresult()
|
| 634 |
+
except OSError as exc:
|
| 635 |
+
if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
|
| 636 |
+
_overlapped.ERROR_OPERATION_ABORTED):
|
| 637 |
+
raise ConnectionResetError(*exc.args)
|
| 638 |
+
else:
|
| 639 |
+
raise
|
| 640 |
+
return self._register(ov, sock, finish_sendfile)
|
| 641 |
+
|
| 642 |
+
def accept_pipe(self, pipe):
|
| 643 |
+
self._register_with_iocp(pipe)
|
| 644 |
+
ov = _overlapped.Overlapped(NULL)
|
| 645 |
+
connected = ov.ConnectNamedPipe(pipe.fileno())
|
| 646 |
+
|
| 647 |
+
if connected:
|
| 648 |
+
# ConnectNamePipe() failed with ERROR_PIPE_CONNECTED which means
|
| 649 |
+
# that the pipe is connected. There is no need to wait for the
|
| 650 |
+
# completion of the connection.
|
| 651 |
+
return self._result(pipe)
|
| 652 |
+
|
| 653 |
+
def finish_accept_pipe(trans, key, ov):
|
| 654 |
+
ov.getresult()
|
| 655 |
+
return pipe
|
| 656 |
+
|
| 657 |
+
return self._register(ov, pipe, finish_accept_pipe)
|
| 658 |
+
|
| 659 |
+
async def connect_pipe(self, address):
|
| 660 |
+
delay = CONNECT_PIPE_INIT_DELAY
|
| 661 |
+
while True:
|
| 662 |
+
# Unfortunately there is no way to do an overlapped connect to
|
| 663 |
+
# a pipe. Call CreateFile() in a loop until it doesn't fail with
|
| 664 |
+
# ERROR_PIPE_BUSY.
|
| 665 |
+
try:
|
| 666 |
+
handle = _overlapped.ConnectPipe(address)
|
| 667 |
+
break
|
| 668 |
+
except OSError as exc:
|
| 669 |
+
if exc.winerror != _overlapped.ERROR_PIPE_BUSY:
|
| 670 |
+
raise
|
| 671 |
+
|
| 672 |
+
# ConnectPipe() failed with ERROR_PIPE_BUSY: retry later
|
| 673 |
+
delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY)
|
| 674 |
+
await tasks.sleep(delay)
|
| 675 |
+
|
| 676 |
+
return windows_utils.PipeHandle(handle)
|
| 677 |
+
|
| 678 |
+
def wait_for_handle(self, handle, timeout=None):
|
| 679 |
+
"""Wait for a handle.
|
| 680 |
+
|
| 681 |
+
Return a Future object. The result of the future is True if the wait
|
| 682 |
+
completed, or False if the wait did not complete (on timeout).
|
| 683 |
+
"""
|
| 684 |
+
return self._wait_for_handle(handle, timeout, False)
|
| 685 |
+
|
| 686 |
+
def _wait_cancel(self, event, done_callback):
|
| 687 |
+
fut = self._wait_for_handle(event, None, True)
|
| 688 |
+
# add_done_callback() cannot be used because the wait may only complete
|
| 689 |
+
# in IocpProactor.close(), while the event loop is not running.
|
| 690 |
+
fut._done_callback = done_callback
|
| 691 |
+
return fut
|
| 692 |
+
|
| 693 |
+
def _wait_for_handle(self, handle, timeout, _is_cancel):
|
| 694 |
+
self._check_closed()
|
| 695 |
+
|
| 696 |
+
if timeout is None:
|
| 697 |
+
ms = _winapi.INFINITE
|
| 698 |
+
else:
|
| 699 |
+
# RegisterWaitForSingleObject() has a resolution of 1 millisecond,
|
| 700 |
+
# round away from zero to wait *at least* timeout seconds.
|
| 701 |
+
ms = math.ceil(timeout * 1e3)
|
| 702 |
+
|
| 703 |
+
# We only create ov so we can use ov.address as a key for the cache.
|
| 704 |
+
ov = _overlapped.Overlapped(NULL)
|
| 705 |
+
wait_handle = _overlapped.RegisterWaitWithQueue(
|
| 706 |
+
handle, self._iocp, ov.address, ms)
|
| 707 |
+
if _is_cancel:
|
| 708 |
+
f = _WaitCancelFuture(ov, handle, wait_handle, loop=self._loop)
|
| 709 |
+
else:
|
| 710 |
+
f = _WaitHandleFuture(ov, handle, wait_handle, self,
|
| 711 |
+
loop=self._loop)
|
| 712 |
+
if f._source_traceback:
|
| 713 |
+
del f._source_traceback[-1]
|
| 714 |
+
|
| 715 |
+
def finish_wait_for_handle(trans, key, ov):
|
| 716 |
+
# Note that this second wait means that we should only use
|
| 717 |
+
# this with handles types where a successful wait has no
|
| 718 |
+
# effect. So events or processes are all right, but locks
|
| 719 |
+
# or semaphores are not. Also note if the handle is
|
| 720 |
+
# signalled and then quickly reset, then we may return
|
| 721 |
+
# False even though we have not timed out.
|
| 722 |
+
return f._poll()
|
| 723 |
+
|
| 724 |
+
self._cache[ov.address] = (f, ov, 0, finish_wait_for_handle)
|
| 725 |
+
return f
|
| 726 |
+
|
| 727 |
+
def _register_with_iocp(self, obj):
|
| 728 |
+
# To get notifications of finished ops on this objects sent to the
|
| 729 |
+
# completion port, were must register the handle.
|
| 730 |
+
if obj not in self._registered:
|
| 731 |
+
self._registered.add(obj)
|
| 732 |
+
_overlapped.CreateIoCompletionPort(obj.fileno(), self._iocp, 0, 0)
|
| 733 |
+
# XXX We could also use SetFileCompletionNotificationModes()
|
| 734 |
+
# to avoid sending notifications to completion port of ops
|
| 735 |
+
# that succeed immediately.
|
| 736 |
+
|
| 737 |
+
def _register(self, ov, obj, callback):
|
| 738 |
+
self._check_closed()
|
| 739 |
+
|
| 740 |
+
# Return a future which will be set with the result of the
|
| 741 |
+
# operation when it completes. The future's value is actually
|
| 742 |
+
# the value returned by callback().
|
| 743 |
+
f = _OverlappedFuture(ov, loop=self._loop)
|
| 744 |
+
if f._source_traceback:
|
| 745 |
+
del f._source_traceback[-1]
|
| 746 |
+
if not ov.pending:
|
| 747 |
+
# The operation has completed, so no need to postpone the
|
| 748 |
+
# work. We cannot take this short cut if we need the
|
| 749 |
+
# NumberOfBytes, CompletionKey values returned by
|
| 750 |
+
# PostQueuedCompletionStatus().
|
| 751 |
+
try:
|
| 752 |
+
value = callback(None, None, ov)
|
| 753 |
+
except OSError as e:
|
| 754 |
+
f.set_exception(e)
|
| 755 |
+
else:
|
| 756 |
+
f.set_result(value)
|
| 757 |
+
# Even if GetOverlappedResult() was called, we have to wait for the
|
| 758 |
+
# notification of the completion in GetQueuedCompletionStatus().
|
| 759 |
+
# Register the overlapped operation to keep a reference to the
|
| 760 |
+
# OVERLAPPED object, otherwise the memory is freed and Windows may
|
| 761 |
+
# read uninitialized memory.
|
| 762 |
+
|
| 763 |
+
# Register the overlapped operation for later. Note that
|
| 764 |
+
# we only store obj to prevent it from being garbage
|
| 765 |
+
# collected too early.
|
| 766 |
+
self._cache[ov.address] = (f, ov, obj, callback)
|
| 767 |
+
return f
|
| 768 |
+
|
| 769 |
+
def _unregister(self, ov):
|
| 770 |
+
"""Unregister an overlapped object.
|
| 771 |
+
|
| 772 |
+
Call this method when its future has been cancelled. The event can
|
| 773 |
+
already be signalled (pending in the proactor event queue). It is also
|
| 774 |
+
safe if the event is never signalled (because it was cancelled).
|
| 775 |
+
"""
|
| 776 |
+
self._check_closed()
|
| 777 |
+
self._unregistered.append(ov)
|
| 778 |
+
|
| 779 |
+
def _get_accept_socket(self, family):
|
| 780 |
+
s = socket.socket(family)
|
| 781 |
+
s.settimeout(0)
|
| 782 |
+
return s
|
| 783 |
+
|
| 784 |
+
def _poll(self, timeout=None):
|
| 785 |
+
if timeout is None:
|
| 786 |
+
ms = INFINITE
|
| 787 |
+
elif timeout < 0:
|
| 788 |
+
raise ValueError("negative timeout")
|
| 789 |
+
else:
|
| 790 |
+
# GetQueuedCompletionStatus() has a resolution of 1 millisecond,
|
| 791 |
+
# round away from zero to wait *at least* timeout seconds.
|
| 792 |
+
ms = math.ceil(timeout * 1e3)
|
| 793 |
+
if ms >= INFINITE:
|
| 794 |
+
raise ValueError("timeout too big")
|
| 795 |
+
|
| 796 |
+
while True:
|
| 797 |
+
status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms)
|
| 798 |
+
if status is None:
|
| 799 |
+
break
|
| 800 |
+
ms = 0
|
| 801 |
+
|
| 802 |
+
err, transferred, key, address = status
|
| 803 |
+
try:
|
| 804 |
+
f, ov, obj, callback = self._cache.pop(address)
|
| 805 |
+
except KeyError:
|
| 806 |
+
if self._loop.get_debug():
|
| 807 |
+
self._loop.call_exception_handler({
|
| 808 |
+
'message': ('GetQueuedCompletionStatus() returned an '
|
| 809 |
+
'unexpected event'),
|
| 810 |
+
'status': ('err=%s transferred=%s key=%#x address=%#x'
|
| 811 |
+
% (err, transferred, key, address)),
|
| 812 |
+
})
|
| 813 |
+
|
| 814 |
+
# key is either zero, or it is used to return a pipe
|
| 815 |
+
# handle which should be closed to avoid a leak.
|
| 816 |
+
if key not in (0, _overlapped.INVALID_HANDLE_VALUE):
|
| 817 |
+
_winapi.CloseHandle(key)
|
| 818 |
+
continue
|
| 819 |
+
|
| 820 |
+
if obj in self._stopped_serving:
|
| 821 |
+
f.cancel()
|
| 822 |
+
# Don't call the callback if _register() already read the result or
|
| 823 |
+
# if the overlapped has been cancelled
|
| 824 |
+
elif not f.done():
|
| 825 |
+
try:
|
| 826 |
+
value = callback(transferred, key, ov)
|
| 827 |
+
except OSError as e:
|
| 828 |
+
f.set_exception(e)
|
| 829 |
+
self._results.append(f)
|
| 830 |
+
else:
|
| 831 |
+
f.set_result(value)
|
| 832 |
+
self._results.append(f)
|
| 833 |
+
finally:
|
| 834 |
+
f = None
|
| 835 |
+
|
| 836 |
+
# Remove unregistered futures
|
| 837 |
+
for ov in self._unregistered:
|
| 838 |
+
self._cache.pop(ov.address, None)
|
| 839 |
+
self._unregistered.clear()
|
| 840 |
+
|
| 841 |
+
def _stop_serving(self, obj):
|
| 842 |
+
# obj is a socket or pipe handle. It will be closed in
|
| 843 |
+
# BaseProactorEventLoop._stop_serving() which will make any
|
| 844 |
+
# pending operations fail quickly.
|
| 845 |
+
self._stopped_serving.add(obj)
|
| 846 |
+
|
| 847 |
+
def close(self):
|
| 848 |
+
if self._iocp is None:
|
| 849 |
+
# already closed
|
| 850 |
+
return
|
| 851 |
+
|
| 852 |
+
# Cancel remaining registered operations.
|
| 853 |
+
for address, (fut, ov, obj, callback) in list(self._cache.items()):
|
| 854 |
+
if fut.cancelled():
|
| 855 |
+
# Nothing to do with cancelled futures
|
| 856 |
+
pass
|
| 857 |
+
elif isinstance(fut, _WaitCancelFuture):
|
| 858 |
+
# _WaitCancelFuture must not be cancelled
|
| 859 |
+
pass
|
| 860 |
+
else:
|
| 861 |
+
try:
|
| 862 |
+
fut.cancel()
|
| 863 |
+
except OSError as exc:
|
| 864 |
+
if self._loop is not None:
|
| 865 |
+
context = {
|
| 866 |
+
'message': 'Cancelling a future failed',
|
| 867 |
+
'exception': exc,
|
| 868 |
+
'future': fut,
|
| 869 |
+
}
|
| 870 |
+
if fut._source_traceback:
|
| 871 |
+
context['source_traceback'] = fut._source_traceback
|
| 872 |
+
self._loop.call_exception_handler(context)
|
| 873 |
+
|
| 874 |
+
# Wait until all cancelled overlapped complete: don't exit with running
|
| 875 |
+
# overlapped to prevent a crash. Display progress every second if the
|
| 876 |
+
# loop is still running.
|
| 877 |
+
msg_update = 1.0
|
| 878 |
+
start_time = time.monotonic()
|
| 879 |
+
next_msg = start_time + msg_update
|
| 880 |
+
while self._cache:
|
| 881 |
+
if next_msg <= time.monotonic():
|
| 882 |
+
logger.debug('%r is running after closing for %.1f seconds',
|
| 883 |
+
self, time.monotonic() - start_time)
|
| 884 |
+
next_msg = time.monotonic() + msg_update
|
| 885 |
+
|
| 886 |
+
# handle a few events, or timeout
|
| 887 |
+
self._poll(msg_update)
|
| 888 |
+
|
| 889 |
+
self._results = []
|
| 890 |
+
|
| 891 |
+
_winapi.CloseHandle(self._iocp)
|
| 892 |
+
self._iocp = None
|
| 893 |
+
|
| 894 |
+
def __del__(self):
|
| 895 |
+
self.close()
|
| 896 |
+
|
| 897 |
+
|
| 898 |
+
class _WindowsSubprocessTransport(base_subprocess.BaseSubprocessTransport):
|
| 899 |
+
|
| 900 |
+
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
|
| 901 |
+
self._proc = windows_utils.Popen(
|
| 902 |
+
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
|
| 903 |
+
bufsize=bufsize, **kwargs)
|
| 904 |
+
|
| 905 |
+
def callback(f):
|
| 906 |
+
returncode = self._proc.poll()
|
| 907 |
+
self._process_exited(returncode)
|
| 908 |
+
|
| 909 |
+
f = self._loop._proactor.wait_for_handle(int(self._proc._handle))
|
| 910 |
+
f.add_done_callback(callback)
|
| 911 |
+
|
| 912 |
+
|
| 913 |
+
SelectorEventLoop = _WindowsSelectorEventLoop
|
| 914 |
+
|
| 915 |
+
|
| 916 |
+
class WindowsSelectorEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
|
| 917 |
+
_loop_factory = SelectorEventLoop
|
| 918 |
+
|
| 919 |
+
|
| 920 |
+
class WindowsProactorEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
|
| 921 |
+
_loop_factory = ProactorEventLoop
|
| 922 |
+
|
| 923 |
+
|
| 924 |
+
DefaultEventLoopPolicy = WindowsProactorEventLoopPolicy
|
omnilmm/lib/python3.10/asyncio/windows_utils.py
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Various Windows specific bits and pieces."""
|
| 2 |
+
|
| 3 |
+
import sys
|
| 4 |
+
|
| 5 |
+
if sys.platform != 'win32': # pragma: no cover
|
| 6 |
+
raise ImportError('win32 only')
|
| 7 |
+
|
| 8 |
+
import _winapi
|
| 9 |
+
import itertools
|
| 10 |
+
import msvcrt
|
| 11 |
+
import os
|
| 12 |
+
import subprocess
|
| 13 |
+
import tempfile
|
| 14 |
+
import warnings
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
__all__ = 'pipe', 'Popen', 'PIPE', 'PipeHandle'
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Constants/globals
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
BUFSIZE = 8192
|
| 24 |
+
PIPE = subprocess.PIPE
|
| 25 |
+
STDOUT = subprocess.STDOUT
|
| 26 |
+
_mmap_counter = itertools.count()
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# Replacement for os.pipe() using handles instead of fds
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
|
| 33 |
+
"""Like os.pipe() but with overlapped support and using handles not fds."""
|
| 34 |
+
address = tempfile.mktemp(
|
| 35 |
+
prefix=r'\\.\pipe\python-pipe-{:d}-{:d}-'.format(
|
| 36 |
+
os.getpid(), next(_mmap_counter)))
|
| 37 |
+
|
| 38 |
+
if duplex:
|
| 39 |
+
openmode = _winapi.PIPE_ACCESS_DUPLEX
|
| 40 |
+
access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
|
| 41 |
+
obsize, ibsize = bufsize, bufsize
|
| 42 |
+
else:
|
| 43 |
+
openmode = _winapi.PIPE_ACCESS_INBOUND
|
| 44 |
+
access = _winapi.GENERIC_WRITE
|
| 45 |
+
obsize, ibsize = 0, bufsize
|
| 46 |
+
|
| 47 |
+
openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
|
| 48 |
+
|
| 49 |
+
if overlapped[0]:
|
| 50 |
+
openmode |= _winapi.FILE_FLAG_OVERLAPPED
|
| 51 |
+
|
| 52 |
+
if overlapped[1]:
|
| 53 |
+
flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED
|
| 54 |
+
else:
|
| 55 |
+
flags_and_attribs = 0
|
| 56 |
+
|
| 57 |
+
h1 = h2 = None
|
| 58 |
+
try:
|
| 59 |
+
h1 = _winapi.CreateNamedPipe(
|
| 60 |
+
address, openmode, _winapi.PIPE_WAIT,
|
| 61 |
+
1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
|
| 62 |
+
|
| 63 |
+
h2 = _winapi.CreateFile(
|
| 64 |
+
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
|
| 65 |
+
flags_and_attribs, _winapi.NULL)
|
| 66 |
+
|
| 67 |
+
ov = _winapi.ConnectNamedPipe(h1, overlapped=True)
|
| 68 |
+
ov.GetOverlappedResult(True)
|
| 69 |
+
return h1, h2
|
| 70 |
+
except:
|
| 71 |
+
if h1 is not None:
|
| 72 |
+
_winapi.CloseHandle(h1)
|
| 73 |
+
if h2 is not None:
|
| 74 |
+
_winapi.CloseHandle(h2)
|
| 75 |
+
raise
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
# Wrapper for a pipe handle
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class PipeHandle:
|
| 82 |
+
"""Wrapper for an overlapped pipe handle which is vaguely file-object like.
|
| 83 |
+
|
| 84 |
+
The IOCP event loop can use these instead of socket objects.
|
| 85 |
+
"""
|
| 86 |
+
def __init__(self, handle):
|
| 87 |
+
self._handle = handle
|
| 88 |
+
|
| 89 |
+
def __repr__(self):
|
| 90 |
+
if self._handle is not None:
|
| 91 |
+
handle = f'handle={self._handle!r}'
|
| 92 |
+
else:
|
| 93 |
+
handle = 'closed'
|
| 94 |
+
return f'<{self.__class__.__name__} {handle}>'
|
| 95 |
+
|
| 96 |
+
@property
|
| 97 |
+
def handle(self):
|
| 98 |
+
return self._handle
|
| 99 |
+
|
| 100 |
+
def fileno(self):
|
| 101 |
+
if self._handle is None:
|
| 102 |
+
raise ValueError("I/O operation on closed pipe")
|
| 103 |
+
return self._handle
|
| 104 |
+
|
| 105 |
+
def close(self, *, CloseHandle=_winapi.CloseHandle):
|
| 106 |
+
if self._handle is not None:
|
| 107 |
+
CloseHandle(self._handle)
|
| 108 |
+
self._handle = None
|
| 109 |
+
|
| 110 |
+
def __del__(self, _warn=warnings.warn):
|
| 111 |
+
if self._handle is not None:
|
| 112 |
+
_warn(f"unclosed {self!r}", ResourceWarning, source=self)
|
| 113 |
+
self.close()
|
| 114 |
+
|
| 115 |
+
def __enter__(self):
|
| 116 |
+
return self
|
| 117 |
+
|
| 118 |
+
def __exit__(self, t, v, tb):
|
| 119 |
+
self.close()
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
# Replacement for subprocess.Popen using overlapped pipe handles
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class Popen(subprocess.Popen):
|
| 126 |
+
"""Replacement for subprocess.Popen using overlapped pipe handles.
|
| 127 |
+
|
| 128 |
+
The stdin, stdout, stderr are None or instances of PipeHandle.
|
| 129 |
+
"""
|
| 130 |
+
def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds):
|
| 131 |
+
assert not kwds.get('universal_newlines')
|
| 132 |
+
assert kwds.get('bufsize', 0) == 0
|
| 133 |
+
stdin_rfd = stdout_wfd = stderr_wfd = None
|
| 134 |
+
stdin_wh = stdout_rh = stderr_rh = None
|
| 135 |
+
if stdin == PIPE:
|
| 136 |
+
stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True)
|
| 137 |
+
stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY)
|
| 138 |
+
else:
|
| 139 |
+
stdin_rfd = stdin
|
| 140 |
+
if stdout == PIPE:
|
| 141 |
+
stdout_rh, stdout_wh = pipe(overlapped=(True, False))
|
| 142 |
+
stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0)
|
| 143 |
+
else:
|
| 144 |
+
stdout_wfd = stdout
|
| 145 |
+
if stderr == PIPE:
|
| 146 |
+
stderr_rh, stderr_wh = pipe(overlapped=(True, False))
|
| 147 |
+
stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0)
|
| 148 |
+
elif stderr == STDOUT:
|
| 149 |
+
stderr_wfd = stdout_wfd
|
| 150 |
+
else:
|
| 151 |
+
stderr_wfd = stderr
|
| 152 |
+
try:
|
| 153 |
+
super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd,
|
| 154 |
+
stderr=stderr_wfd, **kwds)
|
| 155 |
+
except:
|
| 156 |
+
for h in (stdin_wh, stdout_rh, stderr_rh):
|
| 157 |
+
if h is not None:
|
| 158 |
+
_winapi.CloseHandle(h)
|
| 159 |
+
raise
|
| 160 |
+
else:
|
| 161 |
+
if stdin_wh is not None:
|
| 162 |
+
self.stdin = PipeHandle(stdin_wh)
|
| 163 |
+
if stdout_rh is not None:
|
| 164 |
+
self.stdout = PipeHandle(stdout_rh)
|
| 165 |
+
if stderr_rh is not None:
|
| 166 |
+
self.stderr = PipeHandle(stderr_rh)
|
| 167 |
+
finally:
|
| 168 |
+
if stdin == PIPE:
|
| 169 |
+
os.close(stdin_rfd)
|
| 170 |
+
if stdout == PIPE:
|
| 171 |
+
os.close(stdout_wfd)
|
| 172 |
+
if stderr == PIPE:
|
| 173 |
+
os.close(stderr_wfd)
|
omnilmm/lib/python3.10/site-packages/lxml/__pycache__/ElementInclude.cpython-310.pyc
ADDED
|
Binary file (3.79 kB). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (682 Bytes). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/__pycache__/builder.cpython-310.pyc
ADDED
|
Binary file (5.87 kB). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/__pycache__/doctestcompare.cpython-310.pyc
ADDED
|
Binary file (13.7 kB). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/__pycache__/pyclasslookup.cpython-310.pyc
ADDED
|
Binary file (221 Bytes). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/__pycache__/sax.cpython-310.pyc
ADDED
|
Binary file (8.12 kB). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/html/ElementSoup.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__doc__ = """Legacy interface to the BeautifulSoup HTML parser.
|
| 2 |
+
"""
|
| 3 |
+
|
| 4 |
+
__all__ = ["parse", "convert_tree"]
|
| 5 |
+
|
| 6 |
+
from .soupparser import convert_tree, parse as _parse
|
| 7 |
+
|
| 8 |
+
def parse(file, beautifulsoup=None, makeelement=None):
|
| 9 |
+
root = _parse(file, beautifulsoup=beautifulsoup, makeelement=makeelement)
|
| 10 |
+
return root.getroot()
|
omnilmm/lib/python3.10/site-packages/lxml/html/__init__.py
ADDED
|
@@ -0,0 +1,1923 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2004 Ian Bicking. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Redistribution and use in source and binary forms, with or without
|
| 4 |
+
# modification, are permitted provided that the following conditions are
|
| 5 |
+
# met:
|
| 6 |
+
#
|
| 7 |
+
# 1. Redistributions of source code must retain the above copyright
|
| 8 |
+
# notice, this list of conditions and the following disclaimer.
|
| 9 |
+
#
|
| 10 |
+
# 2. Redistributions in binary form must reproduce the above copyright
|
| 11 |
+
# notice, this list of conditions and the following disclaimer in
|
| 12 |
+
# the documentation and/or other materials provided with the
|
| 13 |
+
# distribution.
|
| 14 |
+
#
|
| 15 |
+
# 3. Neither the name of Ian Bicking nor the names of its contributors may
|
| 16 |
+
# be used to endorse or promote products derived from this software
|
| 17 |
+
# without specific prior written permission.
|
| 18 |
+
#
|
| 19 |
+
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 20 |
+
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 21 |
+
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 22 |
+
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IAN BICKING OR
|
| 23 |
+
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
| 24 |
+
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
| 25 |
+
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
| 26 |
+
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
| 27 |
+
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
| 28 |
+
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 29 |
+
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
|
| 31 |
+
"""The ``lxml.html`` tool set for HTML handling.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
__all__ = [
|
| 36 |
+
'document_fromstring', 'fragment_fromstring', 'fragments_fromstring', 'fromstring',
|
| 37 |
+
'tostring', 'Element', 'defs', 'open_in_browser', 'submit_form',
|
| 38 |
+
'find_rel_links', 'find_class', 'make_links_absolute',
|
| 39 |
+
'resolve_base_href', 'iterlinks', 'rewrite_links', 'parse']
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
import copy
|
| 43 |
+
import re
|
| 44 |
+
|
| 45 |
+
from collections.abc import MutableMapping, MutableSet
|
| 46 |
+
from functools import partial
|
| 47 |
+
from urllib.parse import urljoin
|
| 48 |
+
|
| 49 |
+
from .. import etree
|
| 50 |
+
from . import defs
|
| 51 |
+
from ._setmixin import SetMixin
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def __fix_docstring(s):
|
| 55 |
+
# TODO: remove and clean up doctests
|
| 56 |
+
if not s:
|
| 57 |
+
return s
|
| 58 |
+
sub = re.compile(r"^(\s*)u'", re.M).sub
|
| 59 |
+
return sub(r"\1'", s)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
|
| 63 |
+
|
| 64 |
+
_rel_links_xpath = etree.XPath("descendant-or-self::a[@rel]|descendant-or-self::x:a[@rel]",
|
| 65 |
+
namespaces={'x':XHTML_NAMESPACE})
|
| 66 |
+
_options_xpath = etree.XPath("descendant-or-self::option|descendant-or-self::x:option",
|
| 67 |
+
namespaces={'x':XHTML_NAMESPACE})
|
| 68 |
+
_forms_xpath = etree.XPath("descendant-or-self::form|descendant-or-self::x:form",
|
| 69 |
+
namespaces={'x':XHTML_NAMESPACE})
|
| 70 |
+
#_class_xpath = etree.XPath(r"descendant-or-self::*[regexp:match(@class, concat('\b', $class_name, '\b'))]", {'regexp': 'http://exslt.org/regular-expressions'})
|
| 71 |
+
_class_xpath = etree.XPath("descendant-or-self::*[@class and contains(concat(' ', normalize-space(@class), ' '), concat(' ', $class_name, ' '))]")
|
| 72 |
+
_id_xpath = etree.XPath("descendant-or-self::*[@id=$id]")
|
| 73 |
+
_collect_string_content = etree.XPath("string()")
|
| 74 |
+
_iter_css_urls = re.compile(r'url\(('+'["][^"]*["]|'+"['][^']*[']|"+r'[^)]*)\)', re.I).finditer
|
| 75 |
+
_iter_css_imports = re.compile(r'@import "(.*?)"').finditer
|
| 76 |
+
_label_xpath = etree.XPath("//label[@for=$id]|//x:label[@for=$id]",
|
| 77 |
+
namespaces={'x':XHTML_NAMESPACE})
|
| 78 |
+
_archive_re = re.compile(r'[^ ]+')
|
| 79 |
+
_parse_meta_refresh_url = re.compile(
|
| 80 |
+
r'[^;=]*;\s*(?:url\s*=\s*)?(?P<url>.*)$', re.I).search
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def _unquote_match(s, pos):
|
| 84 |
+
if s[:1] == '"' and s[-1:] == '"' or s[:1] == "'" and s[-1:] == "'":
|
| 85 |
+
return s[1:-1], pos+1
|
| 86 |
+
else:
|
| 87 |
+
return s,pos
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _transform_result(typ, result):
|
| 91 |
+
"""Convert the result back into the input type.
|
| 92 |
+
"""
|
| 93 |
+
if issubclass(typ, bytes):
|
| 94 |
+
return tostring(result, encoding='utf-8')
|
| 95 |
+
elif issubclass(typ, str):
|
| 96 |
+
return tostring(result, encoding='unicode')
|
| 97 |
+
else:
|
| 98 |
+
return result
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _nons(tag):
|
| 102 |
+
if isinstance(tag, str):
|
| 103 |
+
if tag[0] == '{' and tag[1:len(XHTML_NAMESPACE)+1] == XHTML_NAMESPACE:
|
| 104 |
+
return tag.split('}')[-1]
|
| 105 |
+
return tag
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
class Classes(MutableSet):
|
| 109 |
+
"""Provides access to an element's class attribute as a set-like collection.
|
| 110 |
+
Usage::
|
| 111 |
+
|
| 112 |
+
>>> el = fromstring('<p class="hidden large">Text</p>')
|
| 113 |
+
>>> classes = el.classes # or: classes = Classes(el.attrib)
|
| 114 |
+
>>> classes |= ['block', 'paragraph']
|
| 115 |
+
>>> el.get('class')
|
| 116 |
+
'hidden large block paragraph'
|
| 117 |
+
>>> classes.toggle('hidden')
|
| 118 |
+
False
|
| 119 |
+
>>> el.get('class')
|
| 120 |
+
'large block paragraph'
|
| 121 |
+
>>> classes -= ('some', 'classes', 'block')
|
| 122 |
+
>>> el.get('class')
|
| 123 |
+
'large paragraph'
|
| 124 |
+
"""
|
| 125 |
+
def __init__(self, attributes):
|
| 126 |
+
self._attributes = attributes
|
| 127 |
+
self._get_class_value = partial(attributes.get, 'class', '')
|
| 128 |
+
|
| 129 |
+
def add(self, value):
|
| 130 |
+
"""
|
| 131 |
+
Add a class.
|
| 132 |
+
|
| 133 |
+
This has no effect if the class is already present.
|
| 134 |
+
"""
|
| 135 |
+
if not value or re.search(r'\s', value):
|
| 136 |
+
raise ValueError("Invalid class name: %r" % value)
|
| 137 |
+
classes = self._get_class_value().split()
|
| 138 |
+
if value in classes:
|
| 139 |
+
return
|
| 140 |
+
classes.append(value)
|
| 141 |
+
self._attributes['class'] = ' '.join(classes)
|
| 142 |
+
|
| 143 |
+
def discard(self, value):
|
| 144 |
+
"""
|
| 145 |
+
Remove a class if it is currently present.
|
| 146 |
+
|
| 147 |
+
If the class is not present, do nothing.
|
| 148 |
+
"""
|
| 149 |
+
if not value or re.search(r'\s', value):
|
| 150 |
+
raise ValueError("Invalid class name: %r" % value)
|
| 151 |
+
classes = [name for name in self._get_class_value().split()
|
| 152 |
+
if name != value]
|
| 153 |
+
if classes:
|
| 154 |
+
self._attributes['class'] = ' '.join(classes)
|
| 155 |
+
elif 'class' in self._attributes:
|
| 156 |
+
del self._attributes['class']
|
| 157 |
+
|
| 158 |
+
def remove(self, value):
|
| 159 |
+
"""
|
| 160 |
+
Remove a class; it must currently be present.
|
| 161 |
+
|
| 162 |
+
If the class is not present, raise a KeyError.
|
| 163 |
+
"""
|
| 164 |
+
if not value or re.search(r'\s', value):
|
| 165 |
+
raise ValueError("Invalid class name: %r" % value)
|
| 166 |
+
super().remove(value)
|
| 167 |
+
|
| 168 |
+
def __contains__(self, name):
|
| 169 |
+
classes = self._get_class_value()
|
| 170 |
+
return name in classes and name in classes.split()
|
| 171 |
+
|
| 172 |
+
def __iter__(self):
|
| 173 |
+
return iter(self._get_class_value().split())
|
| 174 |
+
|
| 175 |
+
def __len__(self):
|
| 176 |
+
return len(self._get_class_value().split())
|
| 177 |
+
|
| 178 |
+
# non-standard methods
|
| 179 |
+
|
| 180 |
+
def update(self, values):
|
| 181 |
+
"""
|
| 182 |
+
Add all names from 'values'.
|
| 183 |
+
"""
|
| 184 |
+
classes = self._get_class_value().split()
|
| 185 |
+
extended = False
|
| 186 |
+
for value in values:
|
| 187 |
+
if value not in classes:
|
| 188 |
+
classes.append(value)
|
| 189 |
+
extended = True
|
| 190 |
+
if extended:
|
| 191 |
+
self._attributes['class'] = ' '.join(classes)
|
| 192 |
+
|
| 193 |
+
def toggle(self, value):
|
| 194 |
+
"""
|
| 195 |
+
Add a class name if it isn't there yet, or remove it if it exists.
|
| 196 |
+
|
| 197 |
+
Returns true if the class was added (and is now enabled) and
|
| 198 |
+
false if it was removed (and is now disabled).
|
| 199 |
+
"""
|
| 200 |
+
if not value or re.search(r'\s', value):
|
| 201 |
+
raise ValueError("Invalid class name: %r" % value)
|
| 202 |
+
classes = self._get_class_value().split()
|
| 203 |
+
try:
|
| 204 |
+
classes.remove(value)
|
| 205 |
+
enabled = False
|
| 206 |
+
except ValueError:
|
| 207 |
+
classes.append(value)
|
| 208 |
+
enabled = True
|
| 209 |
+
if classes:
|
| 210 |
+
self._attributes['class'] = ' '.join(classes)
|
| 211 |
+
else:
|
| 212 |
+
del self._attributes['class']
|
| 213 |
+
return enabled
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
class HtmlMixin:
|
| 217 |
+
|
| 218 |
+
def set(self, key, value=None):
|
| 219 |
+
"""set(self, key, value=None)
|
| 220 |
+
|
| 221 |
+
Sets an element attribute. If no value is provided, or if the value is None,
|
| 222 |
+
creates a 'boolean' attribute without value, e.g. "<form novalidate></form>"
|
| 223 |
+
for ``form.set('novalidate')``.
|
| 224 |
+
"""
|
| 225 |
+
super().set(key, value)
|
| 226 |
+
|
| 227 |
+
@property
|
| 228 |
+
def classes(self):
|
| 229 |
+
"""
|
| 230 |
+
A set-like wrapper around the 'class' attribute.
|
| 231 |
+
"""
|
| 232 |
+
return Classes(self.attrib)
|
| 233 |
+
|
| 234 |
+
@classes.setter
|
| 235 |
+
def classes(self, classes):
|
| 236 |
+
assert isinstance(classes, Classes) # only allow "el.classes |= ..." etc.
|
| 237 |
+
value = classes._get_class_value()
|
| 238 |
+
if value:
|
| 239 |
+
self.set('class', value)
|
| 240 |
+
elif self.get('class') is not None:
|
| 241 |
+
del self.attrib['class']
|
| 242 |
+
|
| 243 |
+
@property
|
| 244 |
+
def base_url(self):
|
| 245 |
+
"""
|
| 246 |
+
Returns the base URL, given when the page was parsed.
|
| 247 |
+
|
| 248 |
+
Use with ``urlparse.urljoin(el.base_url, href)`` to get
|
| 249 |
+
absolute URLs.
|
| 250 |
+
"""
|
| 251 |
+
return self.getroottree().docinfo.URL
|
| 252 |
+
|
| 253 |
+
@property
|
| 254 |
+
def forms(self):
|
| 255 |
+
"""
|
| 256 |
+
Return a list of all the forms
|
| 257 |
+
"""
|
| 258 |
+
return _forms_xpath(self)
|
| 259 |
+
|
| 260 |
+
@property
|
| 261 |
+
def body(self):
|
| 262 |
+
"""
|
| 263 |
+
Return the <body> element. Can be called from a child element
|
| 264 |
+
to get the document's head.
|
| 265 |
+
"""
|
| 266 |
+
return self.xpath('//body|//x:body', namespaces={'x':XHTML_NAMESPACE})[0]
|
| 267 |
+
|
| 268 |
+
@property
|
| 269 |
+
def head(self):
|
| 270 |
+
"""
|
| 271 |
+
Returns the <head> element. Can be called from a child
|
| 272 |
+
element to get the document's head.
|
| 273 |
+
"""
|
| 274 |
+
return self.xpath('//head|//x:head', namespaces={'x':XHTML_NAMESPACE})[0]
|
| 275 |
+
|
| 276 |
+
@property
|
| 277 |
+
def label(self):
|
| 278 |
+
"""
|
| 279 |
+
Get or set any <label> element associated with this element.
|
| 280 |
+
"""
|
| 281 |
+
id = self.get('id')
|
| 282 |
+
if not id:
|
| 283 |
+
return None
|
| 284 |
+
result = _label_xpath(self, id=id)
|
| 285 |
+
if not result:
|
| 286 |
+
return None
|
| 287 |
+
else:
|
| 288 |
+
return result[0]
|
| 289 |
+
|
| 290 |
+
@label.setter
|
| 291 |
+
def label(self, label):
|
| 292 |
+
id = self.get('id')
|
| 293 |
+
if not id:
|
| 294 |
+
raise TypeError(
|
| 295 |
+
"You cannot set a label for an element (%r) that has no id"
|
| 296 |
+
% self)
|
| 297 |
+
if _nons(label.tag) != 'label':
|
| 298 |
+
raise TypeError(
|
| 299 |
+
"You can only assign label to a label element (not %r)"
|
| 300 |
+
% label)
|
| 301 |
+
label.set('for', id)
|
| 302 |
+
|
| 303 |
+
@label.deleter
|
| 304 |
+
def label(self):
|
| 305 |
+
label = self.label
|
| 306 |
+
if label is not None:
|
| 307 |
+
del label.attrib['for']
|
| 308 |
+
|
| 309 |
+
def drop_tree(self):
|
| 310 |
+
"""
|
| 311 |
+
Removes this element from the tree, including its children and
|
| 312 |
+
text. The tail text is joined to the previous element or
|
| 313 |
+
parent.
|
| 314 |
+
"""
|
| 315 |
+
parent = self.getparent()
|
| 316 |
+
assert parent is not None
|
| 317 |
+
if self.tail:
|
| 318 |
+
previous = self.getprevious()
|
| 319 |
+
if previous is None:
|
| 320 |
+
parent.text = (parent.text or '') + self.tail
|
| 321 |
+
else:
|
| 322 |
+
previous.tail = (previous.tail or '') + self.tail
|
| 323 |
+
parent.remove(self)
|
| 324 |
+
|
| 325 |
+
def drop_tag(self):
|
| 326 |
+
"""
|
| 327 |
+
Remove the tag, but not its children or text. The children and text
|
| 328 |
+
are merged into the parent.
|
| 329 |
+
|
| 330 |
+
Example::
|
| 331 |
+
|
| 332 |
+
>>> h = fragment_fromstring('<div>Hello <b>World!</b></div>')
|
| 333 |
+
>>> h.find('.//b').drop_tag()
|
| 334 |
+
>>> print(tostring(h, encoding='unicode'))
|
| 335 |
+
<div>Hello World!</div>
|
| 336 |
+
"""
|
| 337 |
+
parent = self.getparent()
|
| 338 |
+
assert parent is not None
|
| 339 |
+
previous = self.getprevious()
|
| 340 |
+
if self.text and isinstance(self.tag, str):
|
| 341 |
+
# not a Comment, etc.
|
| 342 |
+
if previous is None:
|
| 343 |
+
parent.text = (parent.text or '') + self.text
|
| 344 |
+
else:
|
| 345 |
+
previous.tail = (previous.tail or '') + self.text
|
| 346 |
+
if self.tail:
|
| 347 |
+
if len(self):
|
| 348 |
+
last = self[-1]
|
| 349 |
+
last.tail = (last.tail or '') + self.tail
|
| 350 |
+
elif previous is None:
|
| 351 |
+
parent.text = (parent.text or '') + self.tail
|
| 352 |
+
else:
|
| 353 |
+
previous.tail = (previous.tail or '') + self.tail
|
| 354 |
+
index = parent.index(self)
|
| 355 |
+
parent[index:index+1] = self[:]
|
| 356 |
+
|
| 357 |
+
def find_rel_links(self, rel):
|
| 358 |
+
"""
|
| 359 |
+
Find any links like ``<a rel="{rel}">...</a>``; returns a list of elements.
|
| 360 |
+
"""
|
| 361 |
+
rel = rel.lower()
|
| 362 |
+
return [el for el in _rel_links_xpath(self)
|
| 363 |
+
if el.get('rel').lower() == rel]
|
| 364 |
+
|
| 365 |
+
def find_class(self, class_name):
|
| 366 |
+
"""
|
| 367 |
+
Find any elements with the given class name.
|
| 368 |
+
"""
|
| 369 |
+
return _class_xpath(self, class_name=class_name)
|
| 370 |
+
|
| 371 |
+
def get_element_by_id(self, id, *default):
|
| 372 |
+
"""
|
| 373 |
+
Get the first element in a document with the given id. If none is
|
| 374 |
+
found, return the default argument if provided or raise KeyError
|
| 375 |
+
otherwise.
|
| 376 |
+
|
| 377 |
+
Note that there can be more than one element with the same id,
|
| 378 |
+
and this isn't uncommon in HTML documents found in the wild.
|
| 379 |
+
Browsers return only the first match, and this function does
|
| 380 |
+
the same.
|
| 381 |
+
"""
|
| 382 |
+
try:
|
| 383 |
+
# FIXME: should this check for multiple matches?
|
| 384 |
+
# browsers just return the first one
|
| 385 |
+
return _id_xpath(self, id=id)[0]
|
| 386 |
+
except IndexError:
|
| 387 |
+
if default:
|
| 388 |
+
return default[0]
|
| 389 |
+
else:
|
| 390 |
+
raise KeyError(id)
|
| 391 |
+
|
| 392 |
+
def text_content(self):
|
| 393 |
+
"""
|
| 394 |
+
Return the text content of the tag (and the text in any children).
|
| 395 |
+
"""
|
| 396 |
+
return _collect_string_content(self)
|
| 397 |
+
|
| 398 |
+
def cssselect(self, expr, translator='html'):
|
| 399 |
+
"""
|
| 400 |
+
Run the CSS expression on this element and its children,
|
| 401 |
+
returning a list of the results.
|
| 402 |
+
|
| 403 |
+
Equivalent to lxml.cssselect.CSSSelect(expr, translator='html')(self)
|
| 404 |
+
-- note that pre-compiling the expression can provide a substantial
|
| 405 |
+
speedup.
|
| 406 |
+
"""
|
| 407 |
+
# Do the import here to make the dependency optional.
|
| 408 |
+
from lxml.cssselect import CSSSelector
|
| 409 |
+
return CSSSelector(expr, translator=translator)(self)
|
| 410 |
+
|
| 411 |
+
########################################
|
| 412 |
+
## Link functions
|
| 413 |
+
########################################
|
| 414 |
+
|
| 415 |
+
def make_links_absolute(self, base_url=None, resolve_base_href=True,
|
| 416 |
+
handle_failures=None):
|
| 417 |
+
"""
|
| 418 |
+
Make all links in the document absolute, given the
|
| 419 |
+
``base_url`` for the document (the full URL where the document
|
| 420 |
+
came from), or if no ``base_url`` is given, then the ``.base_url``
|
| 421 |
+
of the document.
|
| 422 |
+
|
| 423 |
+
If ``resolve_base_href`` is true, then any ``<base href>``
|
| 424 |
+
tags in the document are used *and* removed from the document.
|
| 425 |
+
If it is false then any such tag is ignored.
|
| 426 |
+
|
| 427 |
+
If ``handle_failures`` is None (default), a failure to process
|
| 428 |
+
a URL will abort the processing. If set to 'ignore', errors
|
| 429 |
+
are ignored. If set to 'discard', failing URLs will be removed.
|
| 430 |
+
"""
|
| 431 |
+
if base_url is None:
|
| 432 |
+
base_url = self.base_url
|
| 433 |
+
if base_url is None:
|
| 434 |
+
raise TypeError(
|
| 435 |
+
"No base_url given, and the document has no base_url")
|
| 436 |
+
if resolve_base_href:
|
| 437 |
+
self.resolve_base_href()
|
| 438 |
+
|
| 439 |
+
if handle_failures == 'ignore':
|
| 440 |
+
def link_repl(href):
|
| 441 |
+
try:
|
| 442 |
+
return urljoin(base_url, href)
|
| 443 |
+
except ValueError:
|
| 444 |
+
return href
|
| 445 |
+
elif handle_failures == 'discard':
|
| 446 |
+
def link_repl(href):
|
| 447 |
+
try:
|
| 448 |
+
return urljoin(base_url, href)
|
| 449 |
+
except ValueError:
|
| 450 |
+
return None
|
| 451 |
+
elif handle_failures is None:
|
| 452 |
+
def link_repl(href):
|
| 453 |
+
return urljoin(base_url, href)
|
| 454 |
+
else:
|
| 455 |
+
raise ValueError(
|
| 456 |
+
"unexpected value for handle_failures: %r" % handle_failures)
|
| 457 |
+
|
| 458 |
+
self.rewrite_links(link_repl)
|
| 459 |
+
|
| 460 |
+
def resolve_base_href(self, handle_failures=None):
|
| 461 |
+
"""
|
| 462 |
+
Find any ``<base href>`` tag in the document, and apply its
|
| 463 |
+
values to all links found in the document. Also remove the
|
| 464 |
+
tag once it has been applied.
|
| 465 |
+
|
| 466 |
+
If ``handle_failures`` is None (default), a failure to process
|
| 467 |
+
a URL will abort the processing. If set to 'ignore', errors
|
| 468 |
+
are ignored. If set to 'discard', failing URLs will be removed.
|
| 469 |
+
"""
|
| 470 |
+
base_href = None
|
| 471 |
+
basetags = self.xpath('//base[@href]|//x:base[@href]',
|
| 472 |
+
namespaces={'x': XHTML_NAMESPACE})
|
| 473 |
+
for b in basetags:
|
| 474 |
+
base_href = b.get('href')
|
| 475 |
+
b.drop_tree()
|
| 476 |
+
if not base_href:
|
| 477 |
+
return
|
| 478 |
+
self.make_links_absolute(base_href, resolve_base_href=False,
|
| 479 |
+
handle_failures=handle_failures)
|
| 480 |
+
|
| 481 |
+
def iterlinks(self):
|
| 482 |
+
"""
|
| 483 |
+
Yield (element, attribute, link, pos), where attribute may be None
|
| 484 |
+
(indicating the link is in the text). ``pos`` is the position
|
| 485 |
+
where the link occurs; often 0, but sometimes something else in
|
| 486 |
+
the case of links in stylesheets or style tags.
|
| 487 |
+
|
| 488 |
+
Note: <base href> is *not* taken into account in any way. The
|
| 489 |
+
link you get is exactly the link in the document.
|
| 490 |
+
|
| 491 |
+
Note: multiple links inside of a single text string or
|
| 492 |
+
attribute value are returned in reversed order. This makes it
|
| 493 |
+
possible to replace or delete them from the text string value
|
| 494 |
+
based on their reported text positions. Otherwise, a
|
| 495 |
+
modification at one text position can change the positions of
|
| 496 |
+
links reported later on.
|
| 497 |
+
"""
|
| 498 |
+
link_attrs = defs.link_attrs
|
| 499 |
+
for el in self.iter(etree.Element):
|
| 500 |
+
attribs = el.attrib
|
| 501 |
+
tag = _nons(el.tag)
|
| 502 |
+
if tag == 'object':
|
| 503 |
+
codebase = None
|
| 504 |
+
## <object> tags have attributes that are relative to
|
| 505 |
+
## codebase
|
| 506 |
+
if 'codebase' in attribs:
|
| 507 |
+
codebase = el.get('codebase')
|
| 508 |
+
yield (el, 'codebase', codebase, 0)
|
| 509 |
+
for attrib in ('classid', 'data'):
|
| 510 |
+
if attrib in attribs:
|
| 511 |
+
value = el.get(attrib)
|
| 512 |
+
if codebase is not None:
|
| 513 |
+
value = urljoin(codebase, value)
|
| 514 |
+
yield (el, attrib, value, 0)
|
| 515 |
+
if 'archive' in attribs:
|
| 516 |
+
for match in _archive_re.finditer(el.get('archive')):
|
| 517 |
+
value = match.group(0)
|
| 518 |
+
if codebase is not None:
|
| 519 |
+
value = urljoin(codebase, value)
|
| 520 |
+
yield (el, 'archive', value, match.start())
|
| 521 |
+
else:
|
| 522 |
+
for attrib in link_attrs:
|
| 523 |
+
if attrib in attribs:
|
| 524 |
+
yield (el, attrib, attribs[attrib], 0)
|
| 525 |
+
if tag == 'meta':
|
| 526 |
+
http_equiv = attribs.get('http-equiv', '').lower()
|
| 527 |
+
if http_equiv == 'refresh':
|
| 528 |
+
content = attribs.get('content', '')
|
| 529 |
+
match = _parse_meta_refresh_url(content)
|
| 530 |
+
url = (match.group('url') if match else content).strip()
|
| 531 |
+
# unexpected content means the redirect won't work, but we might
|
| 532 |
+
# as well be permissive and return the entire string.
|
| 533 |
+
if url:
|
| 534 |
+
url, pos = _unquote_match(
|
| 535 |
+
url, match.start('url') if match else content.find(url))
|
| 536 |
+
yield (el, 'content', url, pos)
|
| 537 |
+
elif tag == 'param':
|
| 538 |
+
valuetype = el.get('valuetype') or ''
|
| 539 |
+
if valuetype.lower() == 'ref':
|
| 540 |
+
## FIXME: while it's fine we *find* this link,
|
| 541 |
+
## according to the spec we aren't supposed to
|
| 542 |
+
## actually change the value, including resolving
|
| 543 |
+
## it. It can also still be a link, even if it
|
| 544 |
+
## doesn't have a valuetype="ref" (which seems to be the norm)
|
| 545 |
+
## http://www.w3.org/TR/html401/struct/objects.html#adef-valuetype
|
| 546 |
+
yield (el, 'value', el.get('value'), 0)
|
| 547 |
+
elif tag == 'style' and el.text:
|
| 548 |
+
urls = [
|
| 549 |
+
# (start_pos, url)
|
| 550 |
+
_unquote_match(match.group(1), match.start(1))[::-1]
|
| 551 |
+
for match in _iter_css_urls(el.text)
|
| 552 |
+
] + [
|
| 553 |
+
(match.start(1), match.group(1))
|
| 554 |
+
for match in _iter_css_imports(el.text)
|
| 555 |
+
]
|
| 556 |
+
if urls:
|
| 557 |
+
# sort by start pos to bring both match sets back into order
|
| 558 |
+
# and reverse the list to report correct positions despite
|
| 559 |
+
# modifications
|
| 560 |
+
urls.sort(reverse=True)
|
| 561 |
+
for start, url in urls:
|
| 562 |
+
yield (el, None, url, start)
|
| 563 |
+
if 'style' in attribs:
|
| 564 |
+
urls = list(_iter_css_urls(attribs['style']))
|
| 565 |
+
if urls:
|
| 566 |
+
# return in reversed order to simplify in-place modifications
|
| 567 |
+
for match in urls[::-1]:
|
| 568 |
+
url, start = _unquote_match(match.group(1), match.start(1))
|
| 569 |
+
yield (el, 'style', url, start)
|
| 570 |
+
|
| 571 |
+
def rewrite_links(self, link_repl_func, resolve_base_href=True,
|
| 572 |
+
base_href=None):
|
| 573 |
+
"""
|
| 574 |
+
Rewrite all the links in the document. For each link
|
| 575 |
+
``link_repl_func(link)`` will be called, and the return value
|
| 576 |
+
will replace the old link.
|
| 577 |
+
|
| 578 |
+
Note that links may not be absolute (unless you first called
|
| 579 |
+
``make_links_absolute()``), and may be internal (e.g.,
|
| 580 |
+
``'#anchor'``). They can also be values like
|
| 581 |
+
``'mailto:email'`` or ``'javascript:expr'``.
|
| 582 |
+
|
| 583 |
+
If you give ``base_href`` then all links passed to
|
| 584 |
+
``link_repl_func()`` will take that into account.
|
| 585 |
+
|
| 586 |
+
If the ``link_repl_func`` returns None, the attribute or
|
| 587 |
+
tag text will be removed completely.
|
| 588 |
+
"""
|
| 589 |
+
if base_href is not None:
|
| 590 |
+
# FIXME: this can be done in one pass with a wrapper
|
| 591 |
+
# around link_repl_func
|
| 592 |
+
self.make_links_absolute(
|
| 593 |
+
base_href, resolve_base_href=resolve_base_href)
|
| 594 |
+
elif resolve_base_href:
|
| 595 |
+
self.resolve_base_href()
|
| 596 |
+
|
| 597 |
+
for el, attrib, link, pos in self.iterlinks():
|
| 598 |
+
new_link = link_repl_func(link.strip())
|
| 599 |
+
if new_link == link:
|
| 600 |
+
continue
|
| 601 |
+
if new_link is None:
|
| 602 |
+
# Remove the attribute or element content
|
| 603 |
+
if attrib is None:
|
| 604 |
+
el.text = ''
|
| 605 |
+
else:
|
| 606 |
+
del el.attrib[attrib]
|
| 607 |
+
continue
|
| 608 |
+
|
| 609 |
+
if attrib is None:
|
| 610 |
+
new = el.text[:pos] + new_link + el.text[pos+len(link):]
|
| 611 |
+
el.text = new
|
| 612 |
+
else:
|
| 613 |
+
cur = el.get(attrib)
|
| 614 |
+
if not pos and len(cur) == len(link):
|
| 615 |
+
new = new_link # most common case
|
| 616 |
+
else:
|
| 617 |
+
new = cur[:pos] + new_link + cur[pos+len(link):]
|
| 618 |
+
el.set(attrib, new)
|
| 619 |
+
|
| 620 |
+
|
| 621 |
+
class _MethodFunc:
|
| 622 |
+
"""
|
| 623 |
+
An object that represents a method on an element as a function;
|
| 624 |
+
the function takes either an element or an HTML string. It
|
| 625 |
+
returns whatever the function normally returns, or if the function
|
| 626 |
+
works in-place (and so returns None) it returns a serialized form
|
| 627 |
+
of the resulting document.
|
| 628 |
+
"""
|
| 629 |
+
def __init__(self, name, copy=False, source_class=HtmlMixin):
|
| 630 |
+
self.name = name
|
| 631 |
+
self.copy = copy
|
| 632 |
+
self.__doc__ = getattr(source_class, self.name).__doc__
|
| 633 |
+
def __call__(self, doc, *args, **kw):
|
| 634 |
+
result_type = type(doc)
|
| 635 |
+
if isinstance(doc, (str, bytes)):
|
| 636 |
+
if 'copy' in kw:
|
| 637 |
+
raise TypeError(
|
| 638 |
+
"The keyword 'copy' can only be used with element inputs to %s, not a string input" % self.name)
|
| 639 |
+
doc = fromstring(doc, **kw)
|
| 640 |
+
else:
|
| 641 |
+
if 'copy' in kw:
|
| 642 |
+
make_a_copy = kw.pop('copy')
|
| 643 |
+
else:
|
| 644 |
+
make_a_copy = self.copy
|
| 645 |
+
if make_a_copy:
|
| 646 |
+
doc = copy.deepcopy(doc)
|
| 647 |
+
meth = getattr(doc, self.name)
|
| 648 |
+
result = meth(*args, **kw)
|
| 649 |
+
# FIXME: this None test is a bit sloppy
|
| 650 |
+
if result is None:
|
| 651 |
+
# Then return what we got in
|
| 652 |
+
return _transform_result(result_type, doc)
|
| 653 |
+
else:
|
| 654 |
+
return result
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
find_rel_links = _MethodFunc('find_rel_links', copy=False)
|
| 658 |
+
find_class = _MethodFunc('find_class', copy=False)
|
| 659 |
+
make_links_absolute = _MethodFunc('make_links_absolute', copy=True)
|
| 660 |
+
resolve_base_href = _MethodFunc('resolve_base_href', copy=True)
|
| 661 |
+
iterlinks = _MethodFunc('iterlinks', copy=False)
|
| 662 |
+
rewrite_links = _MethodFunc('rewrite_links', copy=True)
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
class HtmlComment(HtmlMixin, etree.CommentBase):
|
| 666 |
+
pass
|
| 667 |
+
|
| 668 |
+
|
| 669 |
+
class HtmlElement(HtmlMixin, etree.ElementBase):
|
| 670 |
+
pass
|
| 671 |
+
|
| 672 |
+
|
| 673 |
+
class HtmlProcessingInstruction(HtmlMixin, etree.PIBase):
|
| 674 |
+
pass
|
| 675 |
+
|
| 676 |
+
|
| 677 |
+
class HtmlEntity(HtmlMixin, etree.EntityBase):
|
| 678 |
+
pass
|
| 679 |
+
|
| 680 |
+
|
| 681 |
+
class HtmlElementClassLookup(etree.CustomElementClassLookup):
|
| 682 |
+
"""A lookup scheme for HTML Element classes.
|
| 683 |
+
|
| 684 |
+
To create a lookup instance with different Element classes, pass a tag
|
| 685 |
+
name mapping of Element classes in the ``classes`` keyword argument and/or
|
| 686 |
+
a tag name mapping of Mixin classes in the ``mixins`` keyword argument.
|
| 687 |
+
The special key '*' denotes a Mixin class that should be mixed into all
|
| 688 |
+
Element classes.
|
| 689 |
+
"""
|
| 690 |
+
_default_element_classes = {}
|
| 691 |
+
|
| 692 |
+
def __init__(self, classes=None, mixins=None):
|
| 693 |
+
etree.CustomElementClassLookup.__init__(self)
|
| 694 |
+
if classes is None:
|
| 695 |
+
classes = self._default_element_classes.copy()
|
| 696 |
+
if mixins:
|
| 697 |
+
mixers = {}
|
| 698 |
+
for name, value in mixins:
|
| 699 |
+
if name == '*':
|
| 700 |
+
for n in classes.keys():
|
| 701 |
+
mixers.setdefault(n, []).append(value)
|
| 702 |
+
else:
|
| 703 |
+
mixers.setdefault(name, []).append(value)
|
| 704 |
+
for name, mix_bases in mixers.items():
|
| 705 |
+
cur = classes.get(name, HtmlElement)
|
| 706 |
+
bases = tuple(mix_bases + [cur])
|
| 707 |
+
classes[name] = type(cur.__name__, bases, {})
|
| 708 |
+
self._element_classes = classes
|
| 709 |
+
|
| 710 |
+
def lookup(self, node_type, document, namespace, name):
|
| 711 |
+
if node_type == 'element':
|
| 712 |
+
return self._element_classes.get(name.lower(), HtmlElement)
|
| 713 |
+
elif node_type == 'comment':
|
| 714 |
+
return HtmlComment
|
| 715 |
+
elif node_type == 'PI':
|
| 716 |
+
return HtmlProcessingInstruction
|
| 717 |
+
elif node_type == 'entity':
|
| 718 |
+
return HtmlEntity
|
| 719 |
+
# Otherwise normal lookup
|
| 720 |
+
return None
|
| 721 |
+
|
| 722 |
+
|
| 723 |
+
################################################################################
|
| 724 |
+
# parsing
|
| 725 |
+
################################################################################
|
| 726 |
+
|
| 727 |
+
_looks_like_full_html_unicode = re.compile(
|
| 728 |
+
r'^\s*<(?:html|!doctype)', re.I).match
|
| 729 |
+
_looks_like_full_html_bytes = re.compile(
|
| 730 |
+
br'^\s*<(?:html|!doctype)', re.I).match
|
| 731 |
+
|
| 732 |
+
|
| 733 |
+
def document_fromstring(html, parser=None, ensure_head_body=False, **kw):
|
| 734 |
+
if parser is None:
|
| 735 |
+
parser = html_parser
|
| 736 |
+
value = etree.fromstring(html, parser, **kw)
|
| 737 |
+
if value is None:
|
| 738 |
+
raise etree.ParserError(
|
| 739 |
+
"Document is empty")
|
| 740 |
+
if ensure_head_body and value.find('head') is None:
|
| 741 |
+
value.insert(0, Element('head'))
|
| 742 |
+
if ensure_head_body and value.find('body') is None:
|
| 743 |
+
value.append(Element('body'))
|
| 744 |
+
return value
|
| 745 |
+
|
| 746 |
+
|
| 747 |
+
def fragments_fromstring(html, no_leading_text=False, base_url=None,
|
| 748 |
+
parser=None, **kw):
|
| 749 |
+
"""Parses several HTML elements, returning a list of elements.
|
| 750 |
+
|
| 751 |
+
The first item in the list may be a string.
|
| 752 |
+
If no_leading_text is true, then it will be an error if there is
|
| 753 |
+
leading text, and it will always be a list of only elements.
|
| 754 |
+
|
| 755 |
+
base_url will set the document's base_url attribute
|
| 756 |
+
(and the tree's docinfo.URL).
|
| 757 |
+
"""
|
| 758 |
+
if parser is None:
|
| 759 |
+
parser = html_parser
|
| 760 |
+
# FIXME: check what happens when you give html with a body, head, etc.
|
| 761 |
+
if isinstance(html, bytes):
|
| 762 |
+
if not _looks_like_full_html_bytes(html):
|
| 763 |
+
# can't use %-formatting in early Py3 versions
|
| 764 |
+
html = (b'<html><body>' + html +
|
| 765 |
+
b'</body></html>')
|
| 766 |
+
else:
|
| 767 |
+
if not _looks_like_full_html_unicode(html):
|
| 768 |
+
html = '<html><body>%s</body></html>' % html
|
| 769 |
+
doc = document_fromstring(html, parser=parser, base_url=base_url, **kw)
|
| 770 |
+
assert _nons(doc.tag) == 'html'
|
| 771 |
+
bodies = [e for e in doc if _nons(e.tag) == 'body']
|
| 772 |
+
assert len(bodies) == 1, ("too many bodies: %r in %r" % (bodies, html))
|
| 773 |
+
body = bodies[0]
|
| 774 |
+
elements = []
|
| 775 |
+
if no_leading_text and body.text and body.text.strip():
|
| 776 |
+
raise etree.ParserError(
|
| 777 |
+
"There is leading text: %r" % body.text)
|
| 778 |
+
if body.text and body.text.strip():
|
| 779 |
+
elements.append(body.text)
|
| 780 |
+
elements.extend(body)
|
| 781 |
+
# FIXME: removing the reference to the parent artificial document
|
| 782 |
+
# would be nice
|
| 783 |
+
return elements
|
| 784 |
+
|
| 785 |
+
|
| 786 |
+
def fragment_fromstring(html, create_parent=False, base_url=None,
|
| 787 |
+
parser=None, **kw):
|
| 788 |
+
"""
|
| 789 |
+
Parses a single HTML element; it is an error if there is more than
|
| 790 |
+
one element, or if anything but whitespace precedes or follows the
|
| 791 |
+
element.
|
| 792 |
+
|
| 793 |
+
If ``create_parent`` is true (or is a tag name) then a parent node
|
| 794 |
+
will be created to encapsulate the HTML in a single element. In this
|
| 795 |
+
case, leading or trailing text is also allowed, as are multiple elements
|
| 796 |
+
as result of the parsing.
|
| 797 |
+
|
| 798 |
+
Passing a ``base_url`` will set the document's ``base_url`` attribute
|
| 799 |
+
(and the tree's docinfo.URL).
|
| 800 |
+
"""
|
| 801 |
+
if parser is None:
|
| 802 |
+
parser = html_parser
|
| 803 |
+
|
| 804 |
+
accept_leading_text = bool(create_parent)
|
| 805 |
+
|
| 806 |
+
elements = fragments_fromstring(
|
| 807 |
+
html, parser=parser, no_leading_text=not accept_leading_text,
|
| 808 |
+
base_url=base_url, **kw)
|
| 809 |
+
|
| 810 |
+
if create_parent:
|
| 811 |
+
if not isinstance(create_parent, str):
|
| 812 |
+
create_parent = 'div'
|
| 813 |
+
new_root = Element(create_parent)
|
| 814 |
+
if elements:
|
| 815 |
+
if isinstance(elements[0], str):
|
| 816 |
+
new_root.text = elements[0]
|
| 817 |
+
del elements[0]
|
| 818 |
+
new_root.extend(elements)
|
| 819 |
+
return new_root
|
| 820 |
+
|
| 821 |
+
if not elements:
|
| 822 |
+
raise etree.ParserError('No elements found')
|
| 823 |
+
if len(elements) > 1:
|
| 824 |
+
raise etree.ParserError(
|
| 825 |
+
"Multiple elements found (%s)"
|
| 826 |
+
% ', '.join([_element_name(e) for e in elements]))
|
| 827 |
+
el = elements[0]
|
| 828 |
+
if el.tail and el.tail.strip():
|
| 829 |
+
raise etree.ParserError(
|
| 830 |
+
"Element followed by text: %r" % el.tail)
|
| 831 |
+
el.tail = None
|
| 832 |
+
return el
|
| 833 |
+
|
| 834 |
+
|
| 835 |
+
def fromstring(html, base_url=None, parser=None, **kw):
|
| 836 |
+
"""
|
| 837 |
+
Parse the html, returning a single element/document.
|
| 838 |
+
|
| 839 |
+
This tries to minimally parse the chunk of text, without knowing if it
|
| 840 |
+
is a fragment or a document.
|
| 841 |
+
|
| 842 |
+
base_url will set the document's base_url attribute (and the tree's docinfo.URL)
|
| 843 |
+
"""
|
| 844 |
+
if parser is None:
|
| 845 |
+
parser = html_parser
|
| 846 |
+
if isinstance(html, bytes):
|
| 847 |
+
is_full_html = _looks_like_full_html_bytes(html)
|
| 848 |
+
else:
|
| 849 |
+
is_full_html = _looks_like_full_html_unicode(html)
|
| 850 |
+
doc = document_fromstring(html, parser=parser, base_url=base_url, **kw)
|
| 851 |
+
if is_full_html:
|
| 852 |
+
return doc
|
| 853 |
+
# otherwise, lets parse it out...
|
| 854 |
+
bodies = doc.findall('body')
|
| 855 |
+
if not bodies:
|
| 856 |
+
bodies = doc.findall('{%s}body' % XHTML_NAMESPACE)
|
| 857 |
+
if bodies:
|
| 858 |
+
body = bodies[0]
|
| 859 |
+
if len(bodies) > 1:
|
| 860 |
+
# Somehow there are multiple bodies, which is bad, but just
|
| 861 |
+
# smash them into one body
|
| 862 |
+
for other_body in bodies[1:]:
|
| 863 |
+
if other_body.text:
|
| 864 |
+
if len(body):
|
| 865 |
+
body[-1].tail = (body[-1].tail or '') + other_body.text
|
| 866 |
+
else:
|
| 867 |
+
body.text = (body.text or '') + other_body.text
|
| 868 |
+
body.extend(other_body)
|
| 869 |
+
# We'll ignore tail
|
| 870 |
+
# I guess we are ignoring attributes too
|
| 871 |
+
other_body.drop_tree()
|
| 872 |
+
else:
|
| 873 |
+
body = None
|
| 874 |
+
heads = doc.findall('head')
|
| 875 |
+
if not heads:
|
| 876 |
+
heads = doc.findall('{%s}head' % XHTML_NAMESPACE)
|
| 877 |
+
if heads:
|
| 878 |
+
# Well, we have some sort of structure, so lets keep it all
|
| 879 |
+
head = heads[0]
|
| 880 |
+
if len(heads) > 1:
|
| 881 |
+
for other_head in heads[1:]:
|
| 882 |
+
head.extend(other_head)
|
| 883 |
+
# We don't care about text or tail in a head
|
| 884 |
+
other_head.drop_tree()
|
| 885 |
+
return doc
|
| 886 |
+
if body is None:
|
| 887 |
+
return doc
|
| 888 |
+
if (len(body) == 1 and (not body.text or not body.text.strip())
|
| 889 |
+
and (not body[-1].tail or not body[-1].tail.strip())):
|
| 890 |
+
# The body has just one element, so it was probably a single
|
| 891 |
+
# element passed in
|
| 892 |
+
return body[0]
|
| 893 |
+
# Now we have a body which represents a bunch of tags which have the
|
| 894 |
+
# content that was passed in. We will create a fake container, which
|
| 895 |
+
# is the body tag, except <body> implies too much structure.
|
| 896 |
+
if _contains_block_level_tag(body):
|
| 897 |
+
body.tag = 'div'
|
| 898 |
+
else:
|
| 899 |
+
body.tag = 'span'
|
| 900 |
+
return body
|
| 901 |
+
|
| 902 |
+
|
| 903 |
+
def parse(filename_or_url, parser=None, base_url=None, **kw):
|
| 904 |
+
"""
|
| 905 |
+
Parse a filename, URL, or file-like object into an HTML document
|
| 906 |
+
tree. Note: this returns a tree, not an element. Use
|
| 907 |
+
``parse(...).getroot()`` to get the document root.
|
| 908 |
+
|
| 909 |
+
You can override the base URL with the ``base_url`` keyword. This
|
| 910 |
+
is most useful when parsing from a file-like object.
|
| 911 |
+
"""
|
| 912 |
+
if parser is None:
|
| 913 |
+
parser = html_parser
|
| 914 |
+
return etree.parse(filename_or_url, parser, base_url=base_url, **kw)
|
| 915 |
+
|
| 916 |
+
|
| 917 |
+
def _contains_block_level_tag(el):
|
| 918 |
+
# FIXME: I could do this with XPath, but would that just be
|
| 919 |
+
# unnecessarily slow?
|
| 920 |
+
for el in el.iter(etree.Element):
|
| 921 |
+
if _nons(el.tag) in defs.block_tags:
|
| 922 |
+
return True
|
| 923 |
+
return False
|
| 924 |
+
|
| 925 |
+
|
| 926 |
+
def _element_name(el):
|
| 927 |
+
if isinstance(el, etree.CommentBase):
|
| 928 |
+
return 'comment'
|
| 929 |
+
elif isinstance(el, str):
|
| 930 |
+
return 'string'
|
| 931 |
+
else:
|
| 932 |
+
return _nons(el.tag)
|
| 933 |
+
|
| 934 |
+
|
| 935 |
+
################################################################################
|
| 936 |
+
# form handling
|
| 937 |
+
################################################################################
|
| 938 |
+
|
| 939 |
+
class FormElement(HtmlElement):
|
| 940 |
+
"""
|
| 941 |
+
Represents a <form> element.
|
| 942 |
+
"""
|
| 943 |
+
|
| 944 |
+
@property
|
| 945 |
+
def inputs(self):
|
| 946 |
+
"""
|
| 947 |
+
Returns an accessor for all the input elements in the form.
|
| 948 |
+
|
| 949 |
+
See `InputGetter` for more information about the object.
|
| 950 |
+
"""
|
| 951 |
+
return InputGetter(self)
|
| 952 |
+
|
| 953 |
+
@property
|
| 954 |
+
def fields(self):
|
| 955 |
+
"""
|
| 956 |
+
Dictionary-like object that represents all the fields in this
|
| 957 |
+
form. You can set values in this dictionary to effect the
|
| 958 |
+
form.
|
| 959 |
+
"""
|
| 960 |
+
return FieldsDict(self.inputs)
|
| 961 |
+
|
| 962 |
+
@fields.setter
|
| 963 |
+
def fields(self, value):
|
| 964 |
+
fields = self.fields
|
| 965 |
+
prev_keys = fields.keys()
|
| 966 |
+
for key, value in value.items():
|
| 967 |
+
if key in prev_keys:
|
| 968 |
+
prev_keys.remove(key)
|
| 969 |
+
fields[key] = value
|
| 970 |
+
for key in prev_keys:
|
| 971 |
+
if key is None:
|
| 972 |
+
# Case of an unnamed input; these aren't really
|
| 973 |
+
# expressed in form_values() anyway.
|
| 974 |
+
continue
|
| 975 |
+
fields[key] = None
|
| 976 |
+
|
| 977 |
+
def _name(self):
|
| 978 |
+
if self.get('name'):
|
| 979 |
+
return self.get('name')
|
| 980 |
+
elif self.get('id'):
|
| 981 |
+
return '#' + self.get('id')
|
| 982 |
+
iter_tags = self.body.iter
|
| 983 |
+
forms = list(iter_tags('form'))
|
| 984 |
+
if not forms:
|
| 985 |
+
forms = list(iter_tags('{%s}form' % XHTML_NAMESPACE))
|
| 986 |
+
return str(forms.index(self))
|
| 987 |
+
|
| 988 |
+
def form_values(self):
|
| 989 |
+
"""
|
| 990 |
+
Return a list of tuples of the field values for the form.
|
| 991 |
+
This is suitable to be passed to ``urllib.urlencode()``.
|
| 992 |
+
"""
|
| 993 |
+
results = []
|
| 994 |
+
for el in self.inputs:
|
| 995 |
+
name = el.name
|
| 996 |
+
if not name or 'disabled' in el.attrib:
|
| 997 |
+
continue
|
| 998 |
+
tag = _nons(el.tag)
|
| 999 |
+
if tag == 'textarea':
|
| 1000 |
+
results.append((name, el.value))
|
| 1001 |
+
elif tag == 'select':
|
| 1002 |
+
value = el.value
|
| 1003 |
+
if el.multiple:
|
| 1004 |
+
for v in value:
|
| 1005 |
+
results.append((name, v))
|
| 1006 |
+
elif value is not None:
|
| 1007 |
+
results.append((name, el.value))
|
| 1008 |
+
else:
|
| 1009 |
+
assert tag == 'input', (
|
| 1010 |
+
"Unexpected tag: %r" % el)
|
| 1011 |
+
if el.checkable and not el.checked:
|
| 1012 |
+
continue
|
| 1013 |
+
if el.type in ('submit', 'image', 'reset', 'file'):
|
| 1014 |
+
continue
|
| 1015 |
+
value = el.value
|
| 1016 |
+
if value is not None:
|
| 1017 |
+
results.append((name, el.value))
|
| 1018 |
+
return results
|
| 1019 |
+
|
| 1020 |
+
@property
|
| 1021 |
+
def action(self):
|
| 1022 |
+
"""
|
| 1023 |
+
Get/set the form's ``action`` attribute.
|
| 1024 |
+
"""
|
| 1025 |
+
base_url = self.base_url
|
| 1026 |
+
action = self.get('action')
|
| 1027 |
+
if base_url and action is not None:
|
| 1028 |
+
return urljoin(base_url, action)
|
| 1029 |
+
else:
|
| 1030 |
+
return action
|
| 1031 |
+
|
| 1032 |
+
@action.setter
|
| 1033 |
+
def action(self, value):
|
| 1034 |
+
self.set('action', value)
|
| 1035 |
+
|
| 1036 |
+
@action.deleter
|
| 1037 |
+
def action(self):
|
| 1038 |
+
attrib = self.attrib
|
| 1039 |
+
if 'action' in attrib:
|
| 1040 |
+
del attrib['action']
|
| 1041 |
+
|
| 1042 |
+
@property
|
| 1043 |
+
def method(self):
|
| 1044 |
+
"""
|
| 1045 |
+
Get/set the form's method. Always returns a capitalized
|
| 1046 |
+
string, and defaults to ``'GET'``
|
| 1047 |
+
"""
|
| 1048 |
+
return self.get('method', 'GET').upper()
|
| 1049 |
+
|
| 1050 |
+
@method.setter
|
| 1051 |
+
def method(self, value):
|
| 1052 |
+
self.set('method', value.upper())
|
| 1053 |
+
|
| 1054 |
+
|
| 1055 |
+
HtmlElementClassLookup._default_element_classes['form'] = FormElement
|
| 1056 |
+
|
| 1057 |
+
|
| 1058 |
+
def submit_form(form, extra_values=None, open_http=None):
|
| 1059 |
+
"""
|
| 1060 |
+
Helper function to submit a form. Returns a file-like object, as from
|
| 1061 |
+
``urllib.urlopen()``. This object also has a ``.geturl()`` function,
|
| 1062 |
+
which shows the URL if there were any redirects.
|
| 1063 |
+
|
| 1064 |
+
You can use this like::
|
| 1065 |
+
|
| 1066 |
+
form = doc.forms[0]
|
| 1067 |
+
form.inputs['foo'].value = 'bar' # etc
|
| 1068 |
+
response = form.submit()
|
| 1069 |
+
doc = parse(response)
|
| 1070 |
+
doc.make_links_absolute(response.geturl())
|
| 1071 |
+
|
| 1072 |
+
To change the HTTP requester, pass a function as ``open_http`` keyword
|
| 1073 |
+
argument that opens the URL for you. The function must have the following
|
| 1074 |
+
signature::
|
| 1075 |
+
|
| 1076 |
+
open_http(method, URL, values)
|
| 1077 |
+
|
| 1078 |
+
The action is one of 'GET' or 'POST', the URL is the target URL as a
|
| 1079 |
+
string, and the values are a sequence of ``(name, value)`` tuples with the
|
| 1080 |
+
form data.
|
| 1081 |
+
"""
|
| 1082 |
+
values = form.form_values()
|
| 1083 |
+
if extra_values:
|
| 1084 |
+
if hasattr(extra_values, 'items'):
|
| 1085 |
+
extra_values = extra_values.items()
|
| 1086 |
+
values.extend(extra_values)
|
| 1087 |
+
if open_http is None:
|
| 1088 |
+
open_http = open_http_urllib
|
| 1089 |
+
if form.action:
|
| 1090 |
+
url = form.action
|
| 1091 |
+
else:
|
| 1092 |
+
url = form.base_url
|
| 1093 |
+
return open_http(form.method, url, values)
|
| 1094 |
+
|
| 1095 |
+
|
| 1096 |
+
def open_http_urllib(method, url, values):
|
| 1097 |
+
if not url:
|
| 1098 |
+
raise ValueError("cannot submit, no URL provided")
|
| 1099 |
+
## FIXME: should test that it's not a relative URL or something
|
| 1100 |
+
try:
|
| 1101 |
+
from urllib import urlencode, urlopen
|
| 1102 |
+
except ImportError: # Python 3
|
| 1103 |
+
from urllib.request import urlopen
|
| 1104 |
+
from urllib.parse import urlencode
|
| 1105 |
+
if method == 'GET':
|
| 1106 |
+
if '?' in url:
|
| 1107 |
+
url += '&'
|
| 1108 |
+
else:
|
| 1109 |
+
url += '?'
|
| 1110 |
+
url += urlencode(values)
|
| 1111 |
+
data = None
|
| 1112 |
+
else:
|
| 1113 |
+
data = urlencode(values)
|
| 1114 |
+
if not isinstance(data, bytes):
|
| 1115 |
+
data = data.encode('ASCII')
|
| 1116 |
+
return urlopen(url, data)
|
| 1117 |
+
|
| 1118 |
+
|
| 1119 |
+
class FieldsDict(MutableMapping):
|
| 1120 |
+
|
| 1121 |
+
def __init__(self, inputs):
|
| 1122 |
+
self.inputs = inputs
|
| 1123 |
+
def __getitem__(self, item):
|
| 1124 |
+
return self.inputs[item].value
|
| 1125 |
+
def __setitem__(self, item, value):
|
| 1126 |
+
self.inputs[item].value = value
|
| 1127 |
+
def __delitem__(self, item):
|
| 1128 |
+
raise KeyError(
|
| 1129 |
+
"You cannot remove keys from ElementDict")
|
| 1130 |
+
def keys(self):
|
| 1131 |
+
return self.inputs.keys()
|
| 1132 |
+
def __contains__(self, item):
|
| 1133 |
+
return item in self.inputs
|
| 1134 |
+
def __iter__(self):
|
| 1135 |
+
return iter(self.inputs.keys())
|
| 1136 |
+
def __len__(self):
|
| 1137 |
+
return len(self.inputs)
|
| 1138 |
+
|
| 1139 |
+
def __repr__(self):
|
| 1140 |
+
return '<%s for form %s>' % (
|
| 1141 |
+
self.__class__.__name__,
|
| 1142 |
+
self.inputs.form._name())
|
| 1143 |
+
|
| 1144 |
+
|
| 1145 |
+
class InputGetter:
|
| 1146 |
+
|
| 1147 |
+
"""
|
| 1148 |
+
An accessor that represents all the input fields in a form.
|
| 1149 |
+
|
| 1150 |
+
You can get fields by name from this, with
|
| 1151 |
+
``form.inputs['field_name']``. If there are a set of checkboxes
|
| 1152 |
+
with the same name, they are returned as a list (a `CheckboxGroup`
|
| 1153 |
+
which also allows value setting). Radio inputs are handled
|
| 1154 |
+
similarly. Use ``.keys()`` and ``.items()`` to process all fields
|
| 1155 |
+
in this way.
|
| 1156 |
+
|
| 1157 |
+
You can also iterate over this to get all input elements. This
|
| 1158 |
+
won't return the same thing as if you get all the names, as
|
| 1159 |
+
checkboxes and radio elements are returned individually.
|
| 1160 |
+
"""
|
| 1161 |
+
|
| 1162 |
+
def __init__(self, form):
|
| 1163 |
+
self.form = form
|
| 1164 |
+
|
| 1165 |
+
def __repr__(self):
|
| 1166 |
+
return '<%s for form %s>' % (
|
| 1167 |
+
self.__class__.__name__,
|
| 1168 |
+
self.form._name())
|
| 1169 |
+
|
| 1170 |
+
## FIXME: there should be more methods, and it's unclear if this is
|
| 1171 |
+
## a dictionary-like object or list-like object
|
| 1172 |
+
|
| 1173 |
+
def __getitem__(self, name):
|
| 1174 |
+
fields = [field for field in self if field.name == name]
|
| 1175 |
+
if not fields:
|
| 1176 |
+
raise KeyError("No input element with the name %r" % name)
|
| 1177 |
+
|
| 1178 |
+
input_type = fields[0].get('type')
|
| 1179 |
+
if input_type == 'radio' and len(fields) > 1:
|
| 1180 |
+
group = RadioGroup(fields)
|
| 1181 |
+
group.name = name
|
| 1182 |
+
return group
|
| 1183 |
+
elif input_type == 'checkbox' and len(fields) > 1:
|
| 1184 |
+
group = CheckboxGroup(fields)
|
| 1185 |
+
group.name = name
|
| 1186 |
+
return group
|
| 1187 |
+
else:
|
| 1188 |
+
# I don't like throwing away elements like this
|
| 1189 |
+
return fields[0]
|
| 1190 |
+
|
| 1191 |
+
def __contains__(self, name):
|
| 1192 |
+
for field in self:
|
| 1193 |
+
if field.name == name:
|
| 1194 |
+
return True
|
| 1195 |
+
return False
|
| 1196 |
+
|
| 1197 |
+
def keys(self):
|
| 1198 |
+
"""
|
| 1199 |
+
Returns all unique field names, in document order.
|
| 1200 |
+
|
| 1201 |
+
:return: A list of all unique field names.
|
| 1202 |
+
"""
|
| 1203 |
+
names = []
|
| 1204 |
+
seen = {None}
|
| 1205 |
+
for el in self:
|
| 1206 |
+
name = el.name
|
| 1207 |
+
if name not in seen:
|
| 1208 |
+
names.append(name)
|
| 1209 |
+
seen.add(name)
|
| 1210 |
+
return names
|
| 1211 |
+
|
| 1212 |
+
def items(self):
|
| 1213 |
+
"""
|
| 1214 |
+
Returns all fields with their names, similar to dict.items().
|
| 1215 |
+
|
| 1216 |
+
:return: A list of (name, field) tuples.
|
| 1217 |
+
"""
|
| 1218 |
+
items = []
|
| 1219 |
+
seen = set()
|
| 1220 |
+
for el in self:
|
| 1221 |
+
name = el.name
|
| 1222 |
+
if name not in seen:
|
| 1223 |
+
seen.add(name)
|
| 1224 |
+
items.append((name, self[name]))
|
| 1225 |
+
return items
|
| 1226 |
+
|
| 1227 |
+
def __iter__(self):
|
| 1228 |
+
return self.form.iter('select', 'input', 'textarea')
|
| 1229 |
+
|
| 1230 |
+
def __len__(self):
|
| 1231 |
+
return sum(1 for _ in self)
|
| 1232 |
+
|
| 1233 |
+
|
| 1234 |
+
class InputMixin:
|
| 1235 |
+
"""
|
| 1236 |
+
Mix-in for all input elements (input, select, and textarea)
|
| 1237 |
+
"""
|
| 1238 |
+
@property
|
| 1239 |
+
def name(self):
|
| 1240 |
+
"""
|
| 1241 |
+
Get/set the name of the element
|
| 1242 |
+
"""
|
| 1243 |
+
return self.get('name')
|
| 1244 |
+
|
| 1245 |
+
@name.setter
|
| 1246 |
+
def name(self, value):
|
| 1247 |
+
self.set('name', value)
|
| 1248 |
+
|
| 1249 |
+
@name.deleter
|
| 1250 |
+
def name(self):
|
| 1251 |
+
attrib = self.attrib
|
| 1252 |
+
if 'name' in attrib:
|
| 1253 |
+
del attrib['name']
|
| 1254 |
+
|
| 1255 |
+
def __repr__(self):
|
| 1256 |
+
type_name = getattr(self, 'type', None)
|
| 1257 |
+
if type_name:
|
| 1258 |
+
type_name = ' type=%r' % type_name
|
| 1259 |
+
else:
|
| 1260 |
+
type_name = ''
|
| 1261 |
+
return '<%s %x name=%r%s>' % (
|
| 1262 |
+
self.__class__.__name__, id(self), self.name, type_name)
|
| 1263 |
+
|
| 1264 |
+
|
| 1265 |
+
class TextareaElement(InputMixin, HtmlElement):
|
| 1266 |
+
"""
|
| 1267 |
+
``<textarea>`` element. You can get the name with ``.name`` and
|
| 1268 |
+
get/set the value with ``.value``
|
| 1269 |
+
"""
|
| 1270 |
+
@property
|
| 1271 |
+
def value(self):
|
| 1272 |
+
"""
|
| 1273 |
+
Get/set the value (which is the contents of this element)
|
| 1274 |
+
"""
|
| 1275 |
+
content = self.text or ''
|
| 1276 |
+
if self.tag.startswith("{%s}" % XHTML_NAMESPACE):
|
| 1277 |
+
serialisation_method = 'xml'
|
| 1278 |
+
else:
|
| 1279 |
+
serialisation_method = 'html'
|
| 1280 |
+
for el in self:
|
| 1281 |
+
# it's rare that we actually get here, so let's not use ''.join()
|
| 1282 |
+
content += etree.tostring(
|
| 1283 |
+
el, method=serialisation_method, encoding='unicode')
|
| 1284 |
+
return content
|
| 1285 |
+
|
| 1286 |
+
@value.setter
|
| 1287 |
+
def value(self, value):
|
| 1288 |
+
del self[:]
|
| 1289 |
+
self.text = value
|
| 1290 |
+
|
| 1291 |
+
@value.deleter
|
| 1292 |
+
def value(self):
|
| 1293 |
+
self.text = ''
|
| 1294 |
+
del self[:]
|
| 1295 |
+
|
| 1296 |
+
|
| 1297 |
+
HtmlElementClassLookup._default_element_classes['textarea'] = TextareaElement
|
| 1298 |
+
|
| 1299 |
+
|
| 1300 |
+
class SelectElement(InputMixin, HtmlElement):
|
| 1301 |
+
"""
|
| 1302 |
+
``<select>`` element. You can get the name with ``.name``.
|
| 1303 |
+
|
| 1304 |
+
``.value`` will be the value of the selected option, unless this
|
| 1305 |
+
is a multi-select element (``<select multiple>``), in which case
|
| 1306 |
+
it will be a set-like object. In either case ``.value_options``
|
| 1307 |
+
gives the possible values.
|
| 1308 |
+
|
| 1309 |
+
The boolean attribute ``.multiple`` shows if this is a
|
| 1310 |
+
multi-select.
|
| 1311 |
+
"""
|
| 1312 |
+
@property
|
| 1313 |
+
def value(self):
|
| 1314 |
+
"""
|
| 1315 |
+
Get/set the value of this select (the selected option).
|
| 1316 |
+
|
| 1317 |
+
If this is a multi-select, this is a set-like object that
|
| 1318 |
+
represents all the selected options.
|
| 1319 |
+
"""
|
| 1320 |
+
if self.multiple:
|
| 1321 |
+
return MultipleSelectOptions(self)
|
| 1322 |
+
options = _options_xpath(self)
|
| 1323 |
+
|
| 1324 |
+
try:
|
| 1325 |
+
selected_option = next(el for el in reversed(options) if el.get('selected') is not None)
|
| 1326 |
+
except StopIteration:
|
| 1327 |
+
try:
|
| 1328 |
+
selected_option = next(el for el in options if el.get('disabled') is None)
|
| 1329 |
+
except StopIteration:
|
| 1330 |
+
return None
|
| 1331 |
+
value = selected_option.get('value')
|
| 1332 |
+
if value is None:
|
| 1333 |
+
value = (selected_option.text or '').strip()
|
| 1334 |
+
return value
|
| 1335 |
+
|
| 1336 |
+
@value.setter
|
| 1337 |
+
def value(self, value):
|
| 1338 |
+
if self.multiple:
|
| 1339 |
+
if isinstance(value, str):
|
| 1340 |
+
raise TypeError("You must pass in a sequence")
|
| 1341 |
+
values = self.value
|
| 1342 |
+
values.clear()
|
| 1343 |
+
values.update(value)
|
| 1344 |
+
return
|
| 1345 |
+
checked_option = None
|
| 1346 |
+
if value is not None:
|
| 1347 |
+
for el in _options_xpath(self):
|
| 1348 |
+
opt_value = el.get('value')
|
| 1349 |
+
if opt_value is None:
|
| 1350 |
+
opt_value = (el.text or '').strip()
|
| 1351 |
+
if opt_value == value:
|
| 1352 |
+
checked_option = el
|
| 1353 |
+
break
|
| 1354 |
+
else:
|
| 1355 |
+
raise ValueError(
|
| 1356 |
+
"There is no option with the value of %r" % value)
|
| 1357 |
+
for el in _options_xpath(self):
|
| 1358 |
+
if 'selected' in el.attrib:
|
| 1359 |
+
del el.attrib['selected']
|
| 1360 |
+
if checked_option is not None:
|
| 1361 |
+
checked_option.set('selected', '')
|
| 1362 |
+
|
| 1363 |
+
@value.deleter
|
| 1364 |
+
def value(self):
|
| 1365 |
+
# FIXME: should del be allowed at all?
|
| 1366 |
+
if self.multiple:
|
| 1367 |
+
self.value.clear()
|
| 1368 |
+
else:
|
| 1369 |
+
self.value = None
|
| 1370 |
+
|
| 1371 |
+
@property
|
| 1372 |
+
def value_options(self):
|
| 1373 |
+
"""
|
| 1374 |
+
All the possible values this select can have (the ``value``
|
| 1375 |
+
attribute of all the ``<option>`` elements.
|
| 1376 |
+
"""
|
| 1377 |
+
options = []
|
| 1378 |
+
for el in _options_xpath(self):
|
| 1379 |
+
value = el.get('value')
|
| 1380 |
+
if value is None:
|
| 1381 |
+
value = (el.text or '').strip()
|
| 1382 |
+
options.append(value)
|
| 1383 |
+
return options
|
| 1384 |
+
|
| 1385 |
+
@property
|
| 1386 |
+
def multiple(self):
|
| 1387 |
+
"""
|
| 1388 |
+
Boolean attribute: is there a ``multiple`` attribute on this element.
|
| 1389 |
+
"""
|
| 1390 |
+
return 'multiple' in self.attrib
|
| 1391 |
+
|
| 1392 |
+
@multiple.setter
|
| 1393 |
+
def multiple(self, value):
|
| 1394 |
+
if value:
|
| 1395 |
+
self.set('multiple', '')
|
| 1396 |
+
elif 'multiple' in self.attrib:
|
| 1397 |
+
del self.attrib['multiple']
|
| 1398 |
+
|
| 1399 |
+
|
| 1400 |
+
HtmlElementClassLookup._default_element_classes['select'] = SelectElement
|
| 1401 |
+
|
| 1402 |
+
|
| 1403 |
+
class MultipleSelectOptions(SetMixin):
|
| 1404 |
+
"""
|
| 1405 |
+
Represents all the selected options in a ``<select multiple>`` element.
|
| 1406 |
+
|
| 1407 |
+
You can add to this set-like option to select an option, or remove
|
| 1408 |
+
to unselect the option.
|
| 1409 |
+
"""
|
| 1410 |
+
|
| 1411 |
+
def __init__(self, select):
|
| 1412 |
+
self.select = select
|
| 1413 |
+
|
| 1414 |
+
@property
|
| 1415 |
+
def options(self):
|
| 1416 |
+
"""
|
| 1417 |
+
Iterator of all the ``<option>`` elements.
|
| 1418 |
+
"""
|
| 1419 |
+
return iter(_options_xpath(self.select))
|
| 1420 |
+
|
| 1421 |
+
def __iter__(self):
|
| 1422 |
+
for option in self.options:
|
| 1423 |
+
if 'selected' in option.attrib:
|
| 1424 |
+
opt_value = option.get('value')
|
| 1425 |
+
if opt_value is None:
|
| 1426 |
+
opt_value = (option.text or '').strip()
|
| 1427 |
+
yield opt_value
|
| 1428 |
+
|
| 1429 |
+
def add(self, item):
|
| 1430 |
+
for option in self.options:
|
| 1431 |
+
opt_value = option.get('value')
|
| 1432 |
+
if opt_value is None:
|
| 1433 |
+
opt_value = (option.text or '').strip()
|
| 1434 |
+
if opt_value == item:
|
| 1435 |
+
option.set('selected', '')
|
| 1436 |
+
break
|
| 1437 |
+
else:
|
| 1438 |
+
raise ValueError(
|
| 1439 |
+
"There is no option with the value %r" % item)
|
| 1440 |
+
|
| 1441 |
+
def remove(self, item):
|
| 1442 |
+
for option in self.options:
|
| 1443 |
+
opt_value = option.get('value')
|
| 1444 |
+
if opt_value is None:
|
| 1445 |
+
opt_value = (option.text or '').strip()
|
| 1446 |
+
if opt_value == item:
|
| 1447 |
+
if 'selected' in option.attrib:
|
| 1448 |
+
del option.attrib['selected']
|
| 1449 |
+
else:
|
| 1450 |
+
raise ValueError(
|
| 1451 |
+
"The option %r is not currently selected" % item)
|
| 1452 |
+
break
|
| 1453 |
+
else:
|
| 1454 |
+
raise ValueError(
|
| 1455 |
+
"There is not option with the value %r" % item)
|
| 1456 |
+
|
| 1457 |
+
def __repr__(self):
|
| 1458 |
+
return '<%s {%s} for select name=%r>' % (
|
| 1459 |
+
self.__class__.__name__,
|
| 1460 |
+
', '.join([repr(v) for v in self]),
|
| 1461 |
+
self.select.name)
|
| 1462 |
+
|
| 1463 |
+
|
| 1464 |
+
class RadioGroup(list):
|
| 1465 |
+
"""
|
| 1466 |
+
This object represents several ``<input type=radio>`` elements
|
| 1467 |
+
that have the same name.
|
| 1468 |
+
|
| 1469 |
+
You can use this like a list, but also use the property
|
| 1470 |
+
``.value`` to check/uncheck inputs. Also you can use
|
| 1471 |
+
``.value_options`` to get the possible values.
|
| 1472 |
+
"""
|
| 1473 |
+
@property
|
| 1474 |
+
def value(self):
|
| 1475 |
+
"""
|
| 1476 |
+
Get/set the value, which checks the radio with that value (and
|
| 1477 |
+
unchecks any other value).
|
| 1478 |
+
"""
|
| 1479 |
+
for el in self:
|
| 1480 |
+
if 'checked' in el.attrib:
|
| 1481 |
+
return el.get('value')
|
| 1482 |
+
return None
|
| 1483 |
+
|
| 1484 |
+
@value.setter
|
| 1485 |
+
def value(self, value):
|
| 1486 |
+
checked_option = None
|
| 1487 |
+
if value is not None:
|
| 1488 |
+
for el in self:
|
| 1489 |
+
if el.get('value') == value:
|
| 1490 |
+
checked_option = el
|
| 1491 |
+
break
|
| 1492 |
+
else:
|
| 1493 |
+
raise ValueError("There is no radio input with the value %r" % value)
|
| 1494 |
+
for el in self:
|
| 1495 |
+
if 'checked' in el.attrib:
|
| 1496 |
+
del el.attrib['checked']
|
| 1497 |
+
if checked_option is not None:
|
| 1498 |
+
checked_option.set('checked', '')
|
| 1499 |
+
|
| 1500 |
+
@value.deleter
|
| 1501 |
+
def value(self):
|
| 1502 |
+
self.value = None
|
| 1503 |
+
|
| 1504 |
+
@property
|
| 1505 |
+
def value_options(self):
|
| 1506 |
+
"""
|
| 1507 |
+
Returns a list of all the possible values.
|
| 1508 |
+
"""
|
| 1509 |
+
return [el.get('value') for el in self]
|
| 1510 |
+
|
| 1511 |
+
def __repr__(self):
|
| 1512 |
+
return '%s(%s)' % (
|
| 1513 |
+
self.__class__.__name__,
|
| 1514 |
+
list.__repr__(self))
|
| 1515 |
+
|
| 1516 |
+
|
| 1517 |
+
class CheckboxGroup(list):
|
| 1518 |
+
"""
|
| 1519 |
+
Represents a group of checkboxes (``<input type=checkbox>``) that
|
| 1520 |
+
have the same name.
|
| 1521 |
+
|
| 1522 |
+
In addition to using this like a list, the ``.value`` attribute
|
| 1523 |
+
returns a set-like object that you can add to or remove from to
|
| 1524 |
+
check and uncheck checkboxes. You can also use ``.value_options``
|
| 1525 |
+
to get the possible values.
|
| 1526 |
+
"""
|
| 1527 |
+
@property
|
| 1528 |
+
def value(self):
|
| 1529 |
+
"""
|
| 1530 |
+
Return a set-like object that can be modified to check or
|
| 1531 |
+
uncheck individual checkboxes according to their value.
|
| 1532 |
+
"""
|
| 1533 |
+
return CheckboxValues(self)
|
| 1534 |
+
|
| 1535 |
+
@value.setter
|
| 1536 |
+
def value(self, value):
|
| 1537 |
+
values = self.value
|
| 1538 |
+
values.clear()
|
| 1539 |
+
if not hasattr(value, '__iter__'):
|
| 1540 |
+
raise ValueError(
|
| 1541 |
+
"A CheckboxGroup (name=%r) must be set to a sequence (not %r)"
|
| 1542 |
+
% (self[0].name, value))
|
| 1543 |
+
values.update(value)
|
| 1544 |
+
|
| 1545 |
+
@value.deleter
|
| 1546 |
+
def value(self):
|
| 1547 |
+
self.value.clear()
|
| 1548 |
+
|
| 1549 |
+
@property
|
| 1550 |
+
def value_options(self):
|
| 1551 |
+
"""
|
| 1552 |
+
Returns a list of all the possible values.
|
| 1553 |
+
"""
|
| 1554 |
+
return [el.get('value') for el in self]
|
| 1555 |
+
|
| 1556 |
+
def __repr__(self):
|
| 1557 |
+
return '%s(%s)' % (
|
| 1558 |
+
self.__class__.__name__, list.__repr__(self))
|
| 1559 |
+
|
| 1560 |
+
|
| 1561 |
+
class CheckboxValues(SetMixin):
|
| 1562 |
+
"""
|
| 1563 |
+
Represents the values of the checked checkboxes in a group of
|
| 1564 |
+
checkboxes with the same name.
|
| 1565 |
+
"""
|
| 1566 |
+
|
| 1567 |
+
def __init__(self, group):
|
| 1568 |
+
self.group = group
|
| 1569 |
+
|
| 1570 |
+
def __iter__(self):
|
| 1571 |
+
return iter([
|
| 1572 |
+
el.get('value')
|
| 1573 |
+
for el in self.group
|
| 1574 |
+
if 'checked' in el.attrib])
|
| 1575 |
+
|
| 1576 |
+
def add(self, value):
|
| 1577 |
+
for el in self.group:
|
| 1578 |
+
if el.get('value') == value:
|
| 1579 |
+
el.set('checked', '')
|
| 1580 |
+
break
|
| 1581 |
+
else:
|
| 1582 |
+
raise KeyError("No checkbox with value %r" % value)
|
| 1583 |
+
|
| 1584 |
+
def remove(self, value):
|
| 1585 |
+
for el in self.group:
|
| 1586 |
+
if el.get('value') == value:
|
| 1587 |
+
if 'checked' in el.attrib:
|
| 1588 |
+
del el.attrib['checked']
|
| 1589 |
+
else:
|
| 1590 |
+
raise KeyError(
|
| 1591 |
+
"The checkbox with value %r was already unchecked" % value)
|
| 1592 |
+
break
|
| 1593 |
+
else:
|
| 1594 |
+
raise KeyError(
|
| 1595 |
+
"No checkbox with value %r" % value)
|
| 1596 |
+
|
| 1597 |
+
def __repr__(self):
|
| 1598 |
+
return '<%s {%s} for checkboxes name=%r>' % (
|
| 1599 |
+
self.__class__.__name__,
|
| 1600 |
+
', '.join([repr(v) for v in self]),
|
| 1601 |
+
self.group.name)
|
| 1602 |
+
|
| 1603 |
+
|
| 1604 |
+
class InputElement(InputMixin, HtmlElement):
|
| 1605 |
+
"""
|
| 1606 |
+
Represents an ``<input>`` element.
|
| 1607 |
+
|
| 1608 |
+
You can get the type with ``.type`` (which is lower-cased and
|
| 1609 |
+
defaults to ``'text'``).
|
| 1610 |
+
|
| 1611 |
+
Also you can get and set the value with ``.value``
|
| 1612 |
+
|
| 1613 |
+
Checkboxes and radios have the attribute ``input.checkable ==
|
| 1614 |
+
True`` (for all others it is false) and a boolean attribute
|
| 1615 |
+
``.checked``.
|
| 1616 |
+
|
| 1617 |
+
"""
|
| 1618 |
+
|
| 1619 |
+
## FIXME: I'm a little uncomfortable with the use of .checked
|
| 1620 |
+
@property
|
| 1621 |
+
def value(self):
|
| 1622 |
+
"""
|
| 1623 |
+
Get/set the value of this element, using the ``value`` attribute.
|
| 1624 |
+
|
| 1625 |
+
Also, if this is a checkbox and it has no value, this defaults
|
| 1626 |
+
to ``'on'``. If it is a checkbox or radio that is not
|
| 1627 |
+
checked, this returns None.
|
| 1628 |
+
"""
|
| 1629 |
+
if self.checkable:
|
| 1630 |
+
if self.checked:
|
| 1631 |
+
return self.get('value') or 'on'
|
| 1632 |
+
else:
|
| 1633 |
+
return None
|
| 1634 |
+
return self.get('value')
|
| 1635 |
+
|
| 1636 |
+
@value.setter
|
| 1637 |
+
def value(self, value):
|
| 1638 |
+
if self.checkable:
|
| 1639 |
+
if not value:
|
| 1640 |
+
self.checked = False
|
| 1641 |
+
else:
|
| 1642 |
+
self.checked = True
|
| 1643 |
+
if isinstance(value, str):
|
| 1644 |
+
self.set('value', value)
|
| 1645 |
+
else:
|
| 1646 |
+
self.set('value', value)
|
| 1647 |
+
|
| 1648 |
+
@value.deleter
|
| 1649 |
+
def value(self):
|
| 1650 |
+
if self.checkable:
|
| 1651 |
+
self.checked = False
|
| 1652 |
+
else:
|
| 1653 |
+
if 'value' in self.attrib:
|
| 1654 |
+
del self.attrib['value']
|
| 1655 |
+
|
| 1656 |
+
@property
|
| 1657 |
+
def type(self):
|
| 1658 |
+
"""
|
| 1659 |
+
Return the type of this element (using the type attribute).
|
| 1660 |
+
"""
|
| 1661 |
+
return self.get('type', 'text').lower()
|
| 1662 |
+
|
| 1663 |
+
@type.setter
|
| 1664 |
+
def type(self, value):
|
| 1665 |
+
self.set('type', value)
|
| 1666 |
+
|
| 1667 |
+
@property
|
| 1668 |
+
def checkable(self):
|
| 1669 |
+
"""
|
| 1670 |
+
Boolean: can this element be checked?
|
| 1671 |
+
"""
|
| 1672 |
+
return self.type in ('checkbox', 'radio')
|
| 1673 |
+
|
| 1674 |
+
@property
|
| 1675 |
+
def checked(self):
|
| 1676 |
+
"""
|
| 1677 |
+
Boolean attribute to get/set the presence of the ``checked``
|
| 1678 |
+
attribute.
|
| 1679 |
+
|
| 1680 |
+
You can only use this on checkable input types.
|
| 1681 |
+
"""
|
| 1682 |
+
if not self.checkable:
|
| 1683 |
+
raise AttributeError('Not a checkable input type')
|
| 1684 |
+
return 'checked' in self.attrib
|
| 1685 |
+
|
| 1686 |
+
@checked.setter
|
| 1687 |
+
def checked(self, value):
|
| 1688 |
+
if not self.checkable:
|
| 1689 |
+
raise AttributeError('Not a checkable input type')
|
| 1690 |
+
if value:
|
| 1691 |
+
self.set('checked', '')
|
| 1692 |
+
else:
|
| 1693 |
+
attrib = self.attrib
|
| 1694 |
+
if 'checked' in attrib:
|
| 1695 |
+
del attrib['checked']
|
| 1696 |
+
|
| 1697 |
+
|
| 1698 |
+
HtmlElementClassLookup._default_element_classes['input'] = InputElement
|
| 1699 |
+
|
| 1700 |
+
|
| 1701 |
+
class LabelElement(HtmlElement):
|
| 1702 |
+
"""
|
| 1703 |
+
Represents a ``<label>`` element.
|
| 1704 |
+
|
| 1705 |
+
Label elements are linked to other elements with their ``for``
|
| 1706 |
+
attribute. You can access this element with ``label.for_element``.
|
| 1707 |
+
"""
|
| 1708 |
+
@property
|
| 1709 |
+
def for_element(self):
|
| 1710 |
+
"""
|
| 1711 |
+
Get/set the element this label points to. Return None if it
|
| 1712 |
+
can't be found.
|
| 1713 |
+
"""
|
| 1714 |
+
id = self.get('for')
|
| 1715 |
+
if not id:
|
| 1716 |
+
return None
|
| 1717 |
+
return self.body.get_element_by_id(id)
|
| 1718 |
+
|
| 1719 |
+
@for_element.setter
|
| 1720 |
+
def for_element(self, other):
|
| 1721 |
+
id = other.get('id')
|
| 1722 |
+
if not id:
|
| 1723 |
+
raise TypeError(
|
| 1724 |
+
"Element %r has no id attribute" % other)
|
| 1725 |
+
self.set('for', id)
|
| 1726 |
+
|
| 1727 |
+
@for_element.deleter
|
| 1728 |
+
def for_element(self):
|
| 1729 |
+
attrib = self.attrib
|
| 1730 |
+
if 'id' in attrib:
|
| 1731 |
+
del attrib['id']
|
| 1732 |
+
|
| 1733 |
+
|
| 1734 |
+
HtmlElementClassLookup._default_element_classes['label'] = LabelElement
|
| 1735 |
+
|
| 1736 |
+
|
| 1737 |
+
############################################################
|
| 1738 |
+
## Serialization
|
| 1739 |
+
############################################################
|
| 1740 |
+
|
| 1741 |
+
def html_to_xhtml(html):
|
| 1742 |
+
"""Convert all tags in an HTML tree to XHTML by moving them to the
|
| 1743 |
+
XHTML namespace.
|
| 1744 |
+
"""
|
| 1745 |
+
try:
|
| 1746 |
+
html = html.getroot()
|
| 1747 |
+
except AttributeError:
|
| 1748 |
+
pass
|
| 1749 |
+
prefix = "{%s}" % XHTML_NAMESPACE
|
| 1750 |
+
for el in html.iter(etree.Element):
|
| 1751 |
+
tag = el.tag
|
| 1752 |
+
if tag[0] != '{':
|
| 1753 |
+
el.tag = prefix + tag
|
| 1754 |
+
|
| 1755 |
+
|
| 1756 |
+
def xhtml_to_html(xhtml):
|
| 1757 |
+
"""Convert all tags in an XHTML tree to HTML by removing their
|
| 1758 |
+
XHTML namespace.
|
| 1759 |
+
"""
|
| 1760 |
+
try:
|
| 1761 |
+
xhtml = xhtml.getroot()
|
| 1762 |
+
except AttributeError:
|
| 1763 |
+
pass
|
| 1764 |
+
prefix = "{%s}" % XHTML_NAMESPACE
|
| 1765 |
+
prefix_len = len(prefix)
|
| 1766 |
+
for el in xhtml.iter(prefix + "*"):
|
| 1767 |
+
el.tag = el.tag[prefix_len:]
|
| 1768 |
+
|
| 1769 |
+
|
| 1770 |
+
# This isn't a general match, but it's a match for what libxml2
|
| 1771 |
+
# specifically serialises:
|
| 1772 |
+
__str_replace_meta_content_type = re.compile(
|
| 1773 |
+
r'<meta http-equiv="Content-Type"[^>]*>').sub
|
| 1774 |
+
__bytes_replace_meta_content_type = re.compile(
|
| 1775 |
+
br'<meta http-equiv="Content-Type"[^>]*>').sub
|
| 1776 |
+
|
| 1777 |
+
|
| 1778 |
+
def tostring(doc, pretty_print=False, include_meta_content_type=False,
|
| 1779 |
+
encoding=None, method="html", with_tail=True, doctype=None):
|
| 1780 |
+
"""Return an HTML string representation of the document.
|
| 1781 |
+
|
| 1782 |
+
Note: if include_meta_content_type is true this will create a
|
| 1783 |
+
``<meta http-equiv="Content-Type" ...>`` tag in the head;
|
| 1784 |
+
regardless of the value of include_meta_content_type any existing
|
| 1785 |
+
``<meta http-equiv="Content-Type" ...>`` tag will be removed
|
| 1786 |
+
|
| 1787 |
+
The ``encoding`` argument controls the output encoding (defaults to
|
| 1788 |
+
ASCII, with &#...; character references for any characters outside
|
| 1789 |
+
of ASCII). Note that you can pass the name ``'unicode'`` as
|
| 1790 |
+
``encoding`` argument to serialise to a Unicode string.
|
| 1791 |
+
|
| 1792 |
+
The ``method`` argument defines the output method. It defaults to
|
| 1793 |
+
'html', but can also be 'xml' for xhtml output, or 'text' to
|
| 1794 |
+
serialise to plain text without markup.
|
| 1795 |
+
|
| 1796 |
+
To leave out the tail text of the top-level element that is being
|
| 1797 |
+
serialised, pass ``with_tail=False``.
|
| 1798 |
+
|
| 1799 |
+
The ``doctype`` option allows passing in a plain string that will
|
| 1800 |
+
be serialised before the XML tree. Note that passing in non
|
| 1801 |
+
well-formed content here will make the XML output non well-formed.
|
| 1802 |
+
Also, an existing doctype in the document tree will not be removed
|
| 1803 |
+
when serialising an ElementTree instance.
|
| 1804 |
+
|
| 1805 |
+
Example::
|
| 1806 |
+
|
| 1807 |
+
>>> from lxml import html
|
| 1808 |
+
>>> root = html.fragment_fromstring('<p>Hello<br>world!</p>')
|
| 1809 |
+
|
| 1810 |
+
>>> html.tostring(root)
|
| 1811 |
+
b'<p>Hello<br>world!</p>'
|
| 1812 |
+
>>> html.tostring(root, method='html')
|
| 1813 |
+
b'<p>Hello<br>world!</p>'
|
| 1814 |
+
|
| 1815 |
+
>>> html.tostring(root, method='xml')
|
| 1816 |
+
b'<p>Hello<br/>world!</p>'
|
| 1817 |
+
|
| 1818 |
+
>>> html.tostring(root, method='text')
|
| 1819 |
+
b'Helloworld!'
|
| 1820 |
+
|
| 1821 |
+
>>> html.tostring(root, method='text', encoding='unicode')
|
| 1822 |
+
u'Helloworld!'
|
| 1823 |
+
|
| 1824 |
+
>>> root = html.fragment_fromstring('<div><p>Hello<br>world!</p>TAIL</div>')
|
| 1825 |
+
>>> html.tostring(root[0], method='text', encoding='unicode')
|
| 1826 |
+
u'Helloworld!TAIL'
|
| 1827 |
+
|
| 1828 |
+
>>> html.tostring(root[0], method='text', encoding='unicode', with_tail=False)
|
| 1829 |
+
u'Helloworld!'
|
| 1830 |
+
|
| 1831 |
+
>>> doc = html.document_fromstring('<p>Hello<br>world!</p>')
|
| 1832 |
+
>>> html.tostring(doc, method='html', encoding='unicode')
|
| 1833 |
+
u'<html><body><p>Hello<br>world!</p></body></html>'
|
| 1834 |
+
|
| 1835 |
+
>>> print(html.tostring(doc, method='html', encoding='unicode',
|
| 1836 |
+
... doctype='<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"'
|
| 1837 |
+
... ' "http://www.w3.org/TR/html4/strict.dtd">'))
|
| 1838 |
+
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
|
| 1839 |
+
<html><body><p>Hello<br>world!</p></body></html>
|
| 1840 |
+
"""
|
| 1841 |
+
html = etree.tostring(doc, method=method, pretty_print=pretty_print,
|
| 1842 |
+
encoding=encoding, with_tail=with_tail,
|
| 1843 |
+
doctype=doctype)
|
| 1844 |
+
if method == 'html' and not include_meta_content_type:
|
| 1845 |
+
if isinstance(html, str):
|
| 1846 |
+
html = __str_replace_meta_content_type('', html)
|
| 1847 |
+
else:
|
| 1848 |
+
html = __bytes_replace_meta_content_type(b'', html)
|
| 1849 |
+
return html
|
| 1850 |
+
|
| 1851 |
+
|
| 1852 |
+
tostring.__doc__ = __fix_docstring(tostring.__doc__)
|
| 1853 |
+
|
| 1854 |
+
|
| 1855 |
+
def open_in_browser(doc, encoding=None):
|
| 1856 |
+
"""
|
| 1857 |
+
Open the HTML document in a web browser, saving it to a temporary
|
| 1858 |
+
file to open it. Note that this does not delete the file after
|
| 1859 |
+
use. This is mainly meant for debugging.
|
| 1860 |
+
"""
|
| 1861 |
+
import os
|
| 1862 |
+
import webbrowser
|
| 1863 |
+
import tempfile
|
| 1864 |
+
if not isinstance(doc, etree._ElementTree):
|
| 1865 |
+
doc = etree.ElementTree(doc)
|
| 1866 |
+
handle, fn = tempfile.mkstemp(suffix='.html')
|
| 1867 |
+
f = os.fdopen(handle, 'wb')
|
| 1868 |
+
try:
|
| 1869 |
+
doc.write(f, method="html", encoding=encoding or doc.docinfo.encoding or "UTF-8")
|
| 1870 |
+
finally:
|
| 1871 |
+
# we leak the file itself here, but we should at least close it
|
| 1872 |
+
f.close()
|
| 1873 |
+
url = 'file://' + fn.replace(os.path.sep, '/')
|
| 1874 |
+
print(url)
|
| 1875 |
+
webbrowser.open(url)
|
| 1876 |
+
|
| 1877 |
+
|
| 1878 |
+
################################################################################
|
| 1879 |
+
# configure Element class lookup
|
| 1880 |
+
################################################################################
|
| 1881 |
+
|
| 1882 |
+
class HTMLParser(etree.HTMLParser):
|
| 1883 |
+
"""An HTML parser that is configured to return lxml.html Element
|
| 1884 |
+
objects.
|
| 1885 |
+
"""
|
| 1886 |
+
def __init__(self, **kwargs):
|
| 1887 |
+
super().__init__(**kwargs)
|
| 1888 |
+
self.set_element_class_lookup(HtmlElementClassLookup())
|
| 1889 |
+
|
| 1890 |
+
|
| 1891 |
+
class XHTMLParser(etree.XMLParser):
|
| 1892 |
+
"""An XML parser that is configured to return lxml.html Element
|
| 1893 |
+
objects.
|
| 1894 |
+
|
| 1895 |
+
Note that this parser is not really XHTML aware unless you let it
|
| 1896 |
+
load a DTD that declares the HTML entities. To do this, make sure
|
| 1897 |
+
you have the XHTML DTDs installed in your catalogs, and create the
|
| 1898 |
+
parser like this::
|
| 1899 |
+
|
| 1900 |
+
>>> parser = XHTMLParser(load_dtd=True)
|
| 1901 |
+
|
| 1902 |
+
If you additionally want to validate the document, use this::
|
| 1903 |
+
|
| 1904 |
+
>>> parser = XHTMLParser(dtd_validation=True)
|
| 1905 |
+
|
| 1906 |
+
For catalog support, see http://www.xmlsoft.org/catalog.html.
|
| 1907 |
+
"""
|
| 1908 |
+
def __init__(self, **kwargs):
|
| 1909 |
+
super().__init__(**kwargs)
|
| 1910 |
+
self.set_element_class_lookup(HtmlElementClassLookup())
|
| 1911 |
+
|
| 1912 |
+
|
| 1913 |
+
def Element(*args, **kw):
|
| 1914 |
+
"""Create a new HTML Element.
|
| 1915 |
+
|
| 1916 |
+
This can also be used for XHTML documents.
|
| 1917 |
+
"""
|
| 1918 |
+
v = html_parser.makeelement(*args, **kw)
|
| 1919 |
+
return v
|
| 1920 |
+
|
| 1921 |
+
|
| 1922 |
+
html_parser = HTMLParser()
|
| 1923 |
+
xhtml_parser = XHTMLParser()
|
omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/ElementSoup.cpython-310.pyc
ADDED
|
Binary file (507 Bytes). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/_diffcommand.cpython-310.pyc
ADDED
|
Binary file (2.24 kB). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/_html5builder.cpython-310.pyc
ADDED
|
Binary file (3.59 kB). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/_setmixin.cpython-310.pyc
ADDED
|
Binary file (2.08 kB). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/builder.cpython-310.pyc
ADDED
|
Binary file (2.93 kB). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/clean.cpython-310.pyc
ADDED
|
Binary file (538 Bytes). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/defs.cpython-310.pyc
ADDED
|
Binary file (2.81 kB). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/diff.cpython-310.pyc
ADDED
|
Binary file (23.9 kB). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/formfill.cpython-310.pyc
ADDED
|
Binary file (7.37 kB). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/html5parser.cpython-310.pyc
ADDED
|
Binary file (6.41 kB). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/soupparser.cpython-310.pyc
ADDED
|
Binary file (8 kB). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/html/__pycache__/usedoctest.cpython-310.pyc
ADDED
|
Binary file (433 Bytes). View file
|
|
|
omnilmm/lib/python3.10/site-packages/lxml/html/_diffcommand.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import optparse
|
| 2 |
+
import sys
|
| 3 |
+
import re
|
| 4 |
+
import os
|
| 5 |
+
from .diff import htmldiff
|
| 6 |
+
|
| 7 |
+
description = """\
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
parser = optparse.OptionParser(
|
| 11 |
+
usage="%prog [OPTIONS] FILE1 FILE2\n"
|
| 12 |
+
"%prog --annotate [OPTIONS] INFO1 FILE1 INFO2 FILE2 ...",
|
| 13 |
+
description=description,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
parser.add_option(
|
| 17 |
+
'-o', '--output',
|
| 18 |
+
metavar="FILE",
|
| 19 |
+
dest="output",
|
| 20 |
+
default="-",
|
| 21 |
+
help="File to write the difference to",
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
parser.add_option(
|
| 25 |
+
'-a', '--annotation',
|
| 26 |
+
action="store_true",
|
| 27 |
+
dest="annotation",
|
| 28 |
+
help="Do an annotation")
|
| 29 |
+
|
| 30 |
+
def main(args=None):
|
| 31 |
+
if args is None:
|
| 32 |
+
args = sys.argv[1:]
|
| 33 |
+
options, args = parser.parse_args(args)
|
| 34 |
+
if options.annotation:
|
| 35 |
+
return annotate(options, args)
|
| 36 |
+
if len(args) != 2:
|
| 37 |
+
print('Error: you must give two files')
|
| 38 |
+
parser.print_help()
|
| 39 |
+
sys.exit(1)
|
| 40 |
+
file1, file2 = args
|
| 41 |
+
input1 = read_file(file1)
|
| 42 |
+
input2 = read_file(file2)
|
| 43 |
+
body1 = split_body(input1)[1]
|
| 44 |
+
pre, body2, post = split_body(input2)
|
| 45 |
+
result = htmldiff(body1, body2)
|
| 46 |
+
result = pre + result + post
|
| 47 |
+
if options.output == '-':
|
| 48 |
+
if not result.endswith('\n'):
|
| 49 |
+
result += '\n'
|
| 50 |
+
sys.stdout.write(result)
|
| 51 |
+
else:
|
| 52 |
+
with open(options.output, 'wb') as f:
|
| 53 |
+
f.write(result)
|
| 54 |
+
|
| 55 |
+
def read_file(filename):
|
| 56 |
+
if filename == '-':
|
| 57 |
+
c = sys.stdin.read()
|
| 58 |
+
elif not os.path.exists(filename):
|
| 59 |
+
raise OSError(
|
| 60 |
+
"Input file %s does not exist" % filename)
|
| 61 |
+
else:
|
| 62 |
+
with open(filename, 'rb') as f:
|
| 63 |
+
c = f.read()
|
| 64 |
+
return c
|
| 65 |
+
|
| 66 |
+
body_start_re = re.compile(
|
| 67 |
+
r"<body.*?>", re.I|re.S)
|
| 68 |
+
body_end_re = re.compile(
|
| 69 |
+
r"</body.*?>", re.I|re.S)
|
| 70 |
+
|
| 71 |
+
def split_body(html):
|
| 72 |
+
pre = post = ''
|
| 73 |
+
match = body_start_re.search(html)
|
| 74 |
+
if match:
|
| 75 |
+
pre = html[:match.end()]
|
| 76 |
+
html = html[match.end():]
|
| 77 |
+
match = body_end_re.search(html)
|
| 78 |
+
if match:
|
| 79 |
+
post = html[match.start():]
|
| 80 |
+
html = html[:match.start()]
|
| 81 |
+
return pre, html, post
|
| 82 |
+
|
| 83 |
+
def annotate(options, args):
|
| 84 |
+
print("Not yet implemented")
|
| 85 |
+
sys.exit(1)
|
| 86 |
+
|
omnilmm/lib/python3.10/site-packages/lxml/html/_html5builder.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Legacy module - don't use in new code!
|
| 3 |
+
|
| 4 |
+
html5lib now has its own proper implementation.
|
| 5 |
+
|
| 6 |
+
This module implements a tree builder for html5lib that generates lxml
|
| 7 |
+
html element trees. This module uses camelCase as it follows the
|
| 8 |
+
html5lib style guide.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from html5lib.treebuilders import _base, etree as etree_builders
|
| 12 |
+
from lxml import html, etree
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class DocumentType:
|
| 16 |
+
|
| 17 |
+
def __init__(self, name, publicId, systemId):
|
| 18 |
+
self.name = name
|
| 19 |
+
self.publicId = publicId
|
| 20 |
+
self.systemId = systemId
|
| 21 |
+
|
| 22 |
+
class Document:
|
| 23 |
+
|
| 24 |
+
def __init__(self):
|
| 25 |
+
self._elementTree = None
|
| 26 |
+
self.childNodes = []
|
| 27 |
+
|
| 28 |
+
def appendChild(self, element):
|
| 29 |
+
self._elementTree.getroot().addnext(element._element)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class TreeBuilder(_base.TreeBuilder):
|
| 33 |
+
documentClass = Document
|
| 34 |
+
doctypeClass = DocumentType
|
| 35 |
+
elementClass = None
|
| 36 |
+
commentClass = None
|
| 37 |
+
fragmentClass = Document
|
| 38 |
+
|
| 39 |
+
def __init__(self, *args, **kwargs):
|
| 40 |
+
html_builder = etree_builders.getETreeModule(html, fullTree=False)
|
| 41 |
+
etree_builder = etree_builders.getETreeModule(etree, fullTree=False)
|
| 42 |
+
self.elementClass = html_builder.Element
|
| 43 |
+
self.commentClass = etree_builder.Comment
|
| 44 |
+
_base.TreeBuilder.__init__(self, *args, **kwargs)
|
| 45 |
+
|
| 46 |
+
def reset(self):
|
| 47 |
+
_base.TreeBuilder.reset(self)
|
| 48 |
+
self.rootInserted = False
|
| 49 |
+
self.initialComments = []
|
| 50 |
+
self.doctype = None
|
| 51 |
+
|
| 52 |
+
def getDocument(self):
|
| 53 |
+
return self.document._elementTree
|
| 54 |
+
|
| 55 |
+
def getFragment(self):
|
| 56 |
+
fragment = []
|
| 57 |
+
element = self.openElements[0]._element
|
| 58 |
+
if element.text:
|
| 59 |
+
fragment.append(element.text)
|
| 60 |
+
fragment.extend(element.getchildren())
|
| 61 |
+
if element.tail:
|
| 62 |
+
fragment.append(element.tail)
|
| 63 |
+
return fragment
|
| 64 |
+
|
| 65 |
+
def insertDoctype(self, name, publicId, systemId):
|
| 66 |
+
doctype = self.doctypeClass(name, publicId, systemId)
|
| 67 |
+
self.doctype = doctype
|
| 68 |
+
|
| 69 |
+
def insertComment(self, data, parent=None):
|
| 70 |
+
if not self.rootInserted:
|
| 71 |
+
self.initialComments.append(data)
|
| 72 |
+
else:
|
| 73 |
+
_base.TreeBuilder.insertComment(self, data, parent)
|
| 74 |
+
|
| 75 |
+
def insertRoot(self, name):
|
| 76 |
+
buf = []
|
| 77 |
+
if self.doctype and self.doctype.name:
|
| 78 |
+
buf.append('<!DOCTYPE %s' % self.doctype.name)
|
| 79 |
+
if self.doctype.publicId is not None or self.doctype.systemId is not None:
|
| 80 |
+
buf.append(' PUBLIC "%s" "%s"' % (self.doctype.publicId,
|
| 81 |
+
self.doctype.systemId))
|
| 82 |
+
buf.append('>')
|
| 83 |
+
buf.append('<html></html>')
|
| 84 |
+
root = html.fromstring(''.join(buf))
|
| 85 |
+
|
| 86 |
+
# Append the initial comments:
|
| 87 |
+
for comment in self.initialComments:
|
| 88 |
+
root.addprevious(etree.Comment(comment))
|
| 89 |
+
|
| 90 |
+
# Create the root document and add the ElementTree to it
|
| 91 |
+
self.document = self.documentClass()
|
| 92 |
+
self.document._elementTree = root.getroottree()
|
| 93 |
+
|
| 94 |
+
# Add the root element to the internal child/open data structures
|
| 95 |
+
root_element = self.elementClass(name)
|
| 96 |
+
root_element._element = root
|
| 97 |
+
self.document.childNodes.append(root_element)
|
| 98 |
+
self.openElements.append(root_element)
|
| 99 |
+
|
| 100 |
+
self.rootInserted = True
|
omnilmm/lib/python3.10/site-packages/lxml/html/_setmixin.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
try:
|
| 2 |
+
from collections.abc import MutableSet
|
| 3 |
+
except ImportError:
|
| 4 |
+
from collections.abc import MutableSet
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class SetMixin(MutableSet):
|
| 8 |
+
|
| 9 |
+
"""
|
| 10 |
+
Mix-in for sets. You must define __iter__, add, remove
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
def __len__(self):
|
| 14 |
+
length = 0
|
| 15 |
+
for item in self:
|
| 16 |
+
length += 1
|
| 17 |
+
return length
|
| 18 |
+
|
| 19 |
+
def __contains__(self, item):
|
| 20 |
+
for has_item in self:
|
| 21 |
+
if item == has_item:
|
| 22 |
+
return True
|
| 23 |
+
return False
|
| 24 |
+
|
| 25 |
+
issubset = MutableSet.__le__
|
| 26 |
+
issuperset = MutableSet.__ge__
|
| 27 |
+
|
| 28 |
+
union = MutableSet.__or__
|
| 29 |
+
intersection = MutableSet.__and__
|
| 30 |
+
difference = MutableSet.__sub__
|
| 31 |
+
symmetric_difference = MutableSet.__xor__
|
| 32 |
+
|
| 33 |
+
def copy(self):
|
| 34 |
+
return set(self)
|
| 35 |
+
|
| 36 |
+
def update(self, other):
|
| 37 |
+
self |= other
|
| 38 |
+
|
| 39 |
+
def intersection_update(self, other):
|
| 40 |
+
self &= other
|
| 41 |
+
|
| 42 |
+
def difference_update(self, other):
|
| 43 |
+
self -= other
|
| 44 |
+
|
| 45 |
+
def symmetric_difference_update(self, other):
|
| 46 |
+
self ^= other
|
| 47 |
+
|
| 48 |
+
def discard(self, item):
|
| 49 |
+
try:
|
| 50 |
+
self.remove(item)
|
| 51 |
+
except KeyError:
|
| 52 |
+
pass
|
| 53 |
+
|
| 54 |
+
@classmethod
|
| 55 |
+
def _from_iterable(cls, it):
|
| 56 |
+
return set(it)
|
omnilmm/lib/python3.10/site-packages/lxml/html/builder.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------------------
|
| 2 |
+
# The ElementTree toolkit is
|
| 3 |
+
# Copyright (c) 1999-2004 by Fredrik Lundh
|
| 4 |
+
# --------------------------------------------------------------------
|
| 5 |
+
|
| 6 |
+
"""
|
| 7 |
+
A set of HTML generator tags for building HTML documents.
|
| 8 |
+
|
| 9 |
+
Usage::
|
| 10 |
+
|
| 11 |
+
>>> from lxml.html.builder import *
|
| 12 |
+
>>> html = HTML(
|
| 13 |
+
... HEAD( TITLE("Hello World") ),
|
| 14 |
+
... BODY( CLASS("main"),
|
| 15 |
+
... H1("Hello World !")
|
| 16 |
+
... )
|
| 17 |
+
... )
|
| 18 |
+
|
| 19 |
+
>>> import lxml.etree
|
| 20 |
+
>>> print lxml.etree.tostring(html, pretty_print=True)
|
| 21 |
+
<html>
|
| 22 |
+
<head>
|
| 23 |
+
<title>Hello World</title>
|
| 24 |
+
</head>
|
| 25 |
+
<body class="main">
|
| 26 |
+
<h1>Hello World !</h1>
|
| 27 |
+
</body>
|
| 28 |
+
</html>
|
| 29 |
+
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
from lxml.builder import ElementMaker
|
| 33 |
+
from lxml.html import html_parser
|
| 34 |
+
|
| 35 |
+
E = ElementMaker(makeelement=html_parser.makeelement)
|
| 36 |
+
|
| 37 |
+
# elements
|
| 38 |
+
A = E.a #: anchor
|
| 39 |
+
ABBR = E.abbr #: abbreviated form (e.g., WWW, HTTP, etc.)
|
| 40 |
+
ACRONYM = E.acronym #:
|
| 41 |
+
ADDRESS = E.address #: information on author
|
| 42 |
+
APPLET = E.applet #: Java applet (DEPRECATED)
|
| 43 |
+
AREA = E.area #: client-side image map area
|
| 44 |
+
B = E.b #: bold text style
|
| 45 |
+
BASE = E.base #: document base URI
|
| 46 |
+
BASEFONT = E.basefont #: base font size (DEPRECATED)
|
| 47 |
+
BDO = E.bdo #: I18N BiDi over-ride
|
| 48 |
+
BIG = E.big #: large text style
|
| 49 |
+
BLOCKQUOTE = E.blockquote #: long quotation
|
| 50 |
+
BODY = E.body #: document body
|
| 51 |
+
BR = E.br #: forced line break
|
| 52 |
+
BUTTON = E.button #: push button
|
| 53 |
+
CAPTION = E.caption #: table caption
|
| 54 |
+
CENTER = E.center #: shorthand for DIV align=center (DEPRECATED)
|
| 55 |
+
CITE = E.cite #: citation
|
| 56 |
+
CODE = E.code #: computer code fragment
|
| 57 |
+
COL = E.col #: table column
|
| 58 |
+
COLGROUP = E.colgroup #: table column group
|
| 59 |
+
DD = E.dd #: definition description
|
| 60 |
+
DEL = getattr(E, 'del') #: deleted text
|
| 61 |
+
DFN = E.dfn #: instance definition
|
| 62 |
+
DIR = E.dir #: directory list (DEPRECATED)
|
| 63 |
+
DIV = E.div #: generic language/style container
|
| 64 |
+
DL = E.dl #: definition list
|
| 65 |
+
DT = E.dt #: definition term
|
| 66 |
+
EM = E.em #: emphasis
|
| 67 |
+
FIELDSET = E.fieldset #: form control group
|
| 68 |
+
FONT = E.font #: local change to font (DEPRECATED)
|
| 69 |
+
FORM = E.form #: interactive form
|
| 70 |
+
FRAME = E.frame #: subwindow
|
| 71 |
+
FRAMESET = E.frameset #: window subdivision
|
| 72 |
+
H1 = E.h1 #: heading
|
| 73 |
+
H2 = E.h2 #: heading
|
| 74 |
+
H3 = E.h3 #: heading
|
| 75 |
+
H4 = E.h4 #: heading
|
| 76 |
+
H5 = E.h5 #: heading
|
| 77 |
+
H6 = E.h6 #: heading
|
| 78 |
+
HEAD = E.head #: document head
|
| 79 |
+
HR = E.hr #: horizontal rule
|
| 80 |
+
HTML = E.html #: document root element
|
| 81 |
+
I = E.i #: italic text style
|
| 82 |
+
IFRAME = E.iframe #: inline subwindow
|
| 83 |
+
IMG = E.img #: Embedded image
|
| 84 |
+
INPUT = E.input #: form control
|
| 85 |
+
INS = E.ins #: inserted text
|
| 86 |
+
ISINDEX = E.isindex #: single line prompt (DEPRECATED)
|
| 87 |
+
KBD = E.kbd #: text to be entered by the user
|
| 88 |
+
LABEL = E.label #: form field label text
|
| 89 |
+
LEGEND = E.legend #: fieldset legend
|
| 90 |
+
LI = E.li #: list item
|
| 91 |
+
LINK = E.link #: a media-independent link
|
| 92 |
+
MAP = E.map #: client-side image map
|
| 93 |
+
MENU = E.menu #: menu list (DEPRECATED)
|
| 94 |
+
META = E.meta #: generic metainformation
|
| 95 |
+
NOFRAMES = E.noframes #: alternate content container for non frame-based rendering
|
| 96 |
+
NOSCRIPT = E.noscript #: alternate content container for non script-based rendering
|
| 97 |
+
OBJECT = E.object #: generic embedded object
|
| 98 |
+
OL = E.ol #: ordered list
|
| 99 |
+
OPTGROUP = E.optgroup #: option group
|
| 100 |
+
OPTION = E.option #: selectable choice
|
| 101 |
+
P = E.p #: paragraph
|
| 102 |
+
PARAM = E.param #: named property value
|
| 103 |
+
PRE = E.pre #: preformatted text
|
| 104 |
+
Q = E.q #: short inline quotation
|
| 105 |
+
S = E.s #: strike-through text style (DEPRECATED)
|
| 106 |
+
SAMP = E.samp #: sample program output, scripts, etc.
|
| 107 |
+
SCRIPT = E.script #: script statements
|
| 108 |
+
SELECT = E.select #: option selector
|
| 109 |
+
SMALL = E.small #: small text style
|
| 110 |
+
SPAN = E.span #: generic language/style container
|
| 111 |
+
STRIKE = E.strike #: strike-through text (DEPRECATED)
|
| 112 |
+
STRONG = E.strong #: strong emphasis
|
| 113 |
+
STYLE = E.style #: style info
|
| 114 |
+
SUB = E.sub #: subscript
|
| 115 |
+
SUP = E.sup #: superscript
|
| 116 |
+
TABLE = E.table #:
|
| 117 |
+
TBODY = E.tbody #: table body
|
| 118 |
+
TD = E.td #: table data cell
|
| 119 |
+
TEXTAREA = E.textarea #: multi-line text field
|
| 120 |
+
TFOOT = E.tfoot #: table footer
|
| 121 |
+
TH = E.th #: table header cell
|
| 122 |
+
THEAD = E.thead #: table header
|
| 123 |
+
TITLE = E.title #: document title
|
| 124 |
+
TR = E.tr #: table row
|
| 125 |
+
TT = E.tt #: teletype or monospaced text style
|
| 126 |
+
U = E.u #: underlined text style (DEPRECATED)
|
| 127 |
+
UL = E.ul #: unordered list
|
| 128 |
+
VAR = E.var #: instance of a variable or program argument
|
| 129 |
+
|
| 130 |
+
# attributes (only reserved words are included here)
|
| 131 |
+
ATTR = dict
|
| 132 |
+
def CLASS(v): return {'class': v}
|
| 133 |
+
def FOR(v): return {'for': v}
|