Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- parrot/lib/python3.10/asyncio/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/__main__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/base_events.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/base_futures.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/base_subprocess.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/base_tasks.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/coroutines.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/exceptions.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/format_helpers.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/futures.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/locks.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/log.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/protocols.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/queues.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/selector_events.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/sslproto.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/staggered.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/subprocess.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/tasks.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/threads.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/transports.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/trsock.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/unix_events.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/windows_events.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/__pycache__/windows_utils.cpython-310.pyc +0 -0
- parrot/lib/python3.10/asyncio/base_events.py +1934 -0
- parrot/lib/python3.10/asyncio/base_subprocess.py +285 -0
- parrot/lib/python3.10/asyncio/constants.py +27 -0
- parrot/lib/python3.10/asyncio/events.py +819 -0
- parrot/lib/python3.10/asyncio/log.py +7 -0
- parrot/lib/python3.10/asyncio/proactor_events.py +875 -0
- parrot/lib/python3.10/asyncio/queues.py +245 -0
- parrot/lib/python3.10/asyncio/sslproto.py +739 -0
- parrot/lib/python3.10/asyncio/staggered.py +149 -0
- parrot/lib/python3.10/asyncio/streams.py +726 -0
- parrot/lib/python3.10/asyncio/trsock.py +206 -0
- parrot/lib/python3.10/asyncio/unix_events.py +1466 -0
- parrot/lib/python3.10/html/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/pydoc_data/__init__.py +0 -0
- parrot/lib/python3.10/pydoc_data/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/pydoc_data/_pydoc.css +6 -0
- parrot/lib/python3.10/unittest/__main__.py +18 -0
- parrot/lib/python3.10/unittest/__pycache__/__main__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/unittest/__pycache__/async_case.cpython-310.pyc +0 -0
- parrot/lib/python3.10/unittest/__pycache__/loader.cpython-310.pyc +0 -0
- parrot/lib/python3.10/unittest/__pycache__/mock.cpython-310.pyc +0 -0
- parrot/lib/python3.10/unittest/__pycache__/result.cpython-310.pyc +0 -0
- parrot/lib/python3.10/unittest/__pycache__/runner.cpython-310.pyc +0 -0
- parrot/lib/python3.10/unittest/__pycache__/signals.cpython-310.pyc +0 -0
- parrot/lib/python3.10/unittest/async_case.py +170 -0
parrot/lib/python3.10/asyncio/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (739 Bytes). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/__main__.cpython-310.pyc
ADDED
|
Binary file (3.46 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/base_events.cpython-310.pyc
ADDED
|
Binary file (52.1 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/base_futures.cpython-310.pyc
ADDED
|
Binary file (2.17 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/base_subprocess.cpython-310.pyc
ADDED
|
Binary file (9.38 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/base_tasks.cpython-310.pyc
ADDED
|
Binary file (2.24 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/coroutines.cpython-310.pyc
ADDED
|
Binary file (6.62 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/exceptions.cpython-310.pyc
ADDED
|
Binary file (2.72 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/format_helpers.cpython-310.pyc
ADDED
|
Binary file (2.59 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/futures.cpython-310.pyc
ADDED
|
Binary file (12.1 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/locks.cpython-310.pyc
ADDED
|
Binary file (14.4 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/log.cpython-310.pyc
ADDED
|
Binary file (483 Bytes). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/protocols.cpython-310.pyc
ADDED
|
Binary file (8.29 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/queues.cpython-310.pyc
ADDED
|
Binary file (8.3 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/selector_events.cpython-310.pyc
ADDED
|
Binary file (29.8 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/sslproto.cpython-310.pyc
ADDED
|
Binary file (22.2 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/staggered.cpython-310.pyc
ADDED
|
Binary file (4.44 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/subprocess.cpython-310.pyc
ADDED
|
Binary file (7.32 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/tasks.cpython-310.pyc
ADDED
|
Binary file (24.2 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/threads.cpython-310.pyc
ADDED
|
Binary file (1.24 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/transports.cpython-310.pyc
ADDED
|
Binary file (12.6 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/trsock.cpython-310.pyc
ADDED
|
Binary file (7.84 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/unix_events.cpython-310.pyc
ADDED
|
Binary file (41.9 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/windows_events.cpython-310.pyc
ADDED
|
Binary file (24.4 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/__pycache__/windows_utils.cpython-310.pyc
ADDED
|
Binary file (4.75 kB). View file
|
|
|
parrot/lib/python3.10/asyncio/base_events.py
ADDED
|
@@ -0,0 +1,1934 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Base implementation of event loop.
|
| 2 |
+
|
| 3 |
+
The event loop can be broken up into a multiplexer (the part
|
| 4 |
+
responsible for notifying us of I/O events) and the event loop proper,
|
| 5 |
+
which wraps a multiplexer with functionality for scheduling callbacks,
|
| 6 |
+
immediately or at a given time in the future.
|
| 7 |
+
|
| 8 |
+
Whenever a public API takes a callback, subsequent positional
|
| 9 |
+
arguments will be passed to the callback if/when it is called. This
|
| 10 |
+
avoids the proliferation of trivial lambdas implementing closures.
|
| 11 |
+
Keyword arguments for the callback are not supported; this is a
|
| 12 |
+
conscious design decision, leaving the door open for keyword arguments
|
| 13 |
+
to modify the meaning of the API call itself.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import collections
|
| 17 |
+
import collections.abc
|
| 18 |
+
import concurrent.futures
|
| 19 |
+
import functools
|
| 20 |
+
import heapq
|
| 21 |
+
import itertools
|
| 22 |
+
import os
|
| 23 |
+
import socket
|
| 24 |
+
import stat
|
| 25 |
+
import subprocess
|
| 26 |
+
import threading
|
| 27 |
+
import time
|
| 28 |
+
import traceback
|
| 29 |
+
import sys
|
| 30 |
+
import warnings
|
| 31 |
+
import weakref
|
| 32 |
+
|
| 33 |
+
try:
|
| 34 |
+
import ssl
|
| 35 |
+
except ImportError: # pragma: no cover
|
| 36 |
+
ssl = None
|
| 37 |
+
|
| 38 |
+
from . import constants
|
| 39 |
+
from . import coroutines
|
| 40 |
+
from . import events
|
| 41 |
+
from . import exceptions
|
| 42 |
+
from . import futures
|
| 43 |
+
from . import protocols
|
| 44 |
+
from . import sslproto
|
| 45 |
+
from . import staggered
|
| 46 |
+
from . import tasks
|
| 47 |
+
from . import transports
|
| 48 |
+
from . import trsock
|
| 49 |
+
from .log import logger
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
__all__ = 'BaseEventLoop','Server',
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
# Minimum number of _scheduled timer handles before cleanup of
|
| 56 |
+
# cancelled handles is performed.
|
| 57 |
+
_MIN_SCHEDULED_TIMER_HANDLES = 100
|
| 58 |
+
|
| 59 |
+
# Minimum fraction of _scheduled timer handles that are cancelled
|
| 60 |
+
# before cleanup of cancelled handles is performed.
|
| 61 |
+
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
|
| 65 |
+
|
| 66 |
+
# Maximum timeout passed to select to avoid OS limitations
|
| 67 |
+
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
|
| 68 |
+
|
| 69 |
+
# Used for deprecation and removal of `loop.create_datagram_endpoint()`'s
|
| 70 |
+
# *reuse_address* parameter
|
| 71 |
+
_unset = object()
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def _format_handle(handle):
|
| 75 |
+
cb = handle._callback
|
| 76 |
+
if isinstance(getattr(cb, '__self__', None), tasks.Task):
|
| 77 |
+
# format the task
|
| 78 |
+
return repr(cb.__self__)
|
| 79 |
+
else:
|
| 80 |
+
return str(handle)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def _format_pipe(fd):
|
| 84 |
+
if fd == subprocess.PIPE:
|
| 85 |
+
return '<pipe>'
|
| 86 |
+
elif fd == subprocess.STDOUT:
|
| 87 |
+
return '<stdout>'
|
| 88 |
+
else:
|
| 89 |
+
return repr(fd)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def _set_reuseport(sock):
|
| 93 |
+
if not hasattr(socket, 'SO_REUSEPORT'):
|
| 94 |
+
raise ValueError('reuse_port not supported by socket module')
|
| 95 |
+
else:
|
| 96 |
+
try:
|
| 97 |
+
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
| 98 |
+
except OSError:
|
| 99 |
+
raise ValueError('reuse_port not supported by socket module, '
|
| 100 |
+
'SO_REUSEPORT defined but not implemented.')
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
|
| 104 |
+
# Try to skip getaddrinfo if "host" is already an IP. Users might have
|
| 105 |
+
# handled name resolution in their own code and pass in resolved IPs.
|
| 106 |
+
if not hasattr(socket, 'inet_pton'):
|
| 107 |
+
return
|
| 108 |
+
|
| 109 |
+
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
|
| 110 |
+
host is None:
|
| 111 |
+
return None
|
| 112 |
+
|
| 113 |
+
if type == socket.SOCK_STREAM:
|
| 114 |
+
proto = socket.IPPROTO_TCP
|
| 115 |
+
elif type == socket.SOCK_DGRAM:
|
| 116 |
+
proto = socket.IPPROTO_UDP
|
| 117 |
+
else:
|
| 118 |
+
return None
|
| 119 |
+
|
| 120 |
+
if port is None:
|
| 121 |
+
port = 0
|
| 122 |
+
elif isinstance(port, bytes) and port == b'':
|
| 123 |
+
port = 0
|
| 124 |
+
elif isinstance(port, str) and port == '':
|
| 125 |
+
port = 0
|
| 126 |
+
else:
|
| 127 |
+
# If port's a service name like "http", don't skip getaddrinfo.
|
| 128 |
+
try:
|
| 129 |
+
port = int(port)
|
| 130 |
+
except (TypeError, ValueError):
|
| 131 |
+
return None
|
| 132 |
+
|
| 133 |
+
if family == socket.AF_UNSPEC:
|
| 134 |
+
afs = [socket.AF_INET]
|
| 135 |
+
if _HAS_IPv6:
|
| 136 |
+
afs.append(socket.AF_INET6)
|
| 137 |
+
else:
|
| 138 |
+
afs = [family]
|
| 139 |
+
|
| 140 |
+
if isinstance(host, bytes):
|
| 141 |
+
host = host.decode('idna')
|
| 142 |
+
if '%' in host:
|
| 143 |
+
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
|
| 144 |
+
# like '::1%lo0'.
|
| 145 |
+
return None
|
| 146 |
+
|
| 147 |
+
for af in afs:
|
| 148 |
+
try:
|
| 149 |
+
socket.inet_pton(af, host)
|
| 150 |
+
# The host has already been resolved.
|
| 151 |
+
if _HAS_IPv6 and af == socket.AF_INET6:
|
| 152 |
+
return af, type, proto, '', (host, port, flowinfo, scopeid)
|
| 153 |
+
else:
|
| 154 |
+
return af, type, proto, '', (host, port)
|
| 155 |
+
except OSError:
|
| 156 |
+
pass
|
| 157 |
+
|
| 158 |
+
# "host" is not an IP address.
|
| 159 |
+
return None
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def _interleave_addrinfos(addrinfos, first_address_family_count=1):
|
| 163 |
+
"""Interleave list of addrinfo tuples by family."""
|
| 164 |
+
# Group addresses by family
|
| 165 |
+
addrinfos_by_family = collections.OrderedDict()
|
| 166 |
+
for addr in addrinfos:
|
| 167 |
+
family = addr[0]
|
| 168 |
+
if family not in addrinfos_by_family:
|
| 169 |
+
addrinfos_by_family[family] = []
|
| 170 |
+
addrinfos_by_family[family].append(addr)
|
| 171 |
+
addrinfos_lists = list(addrinfos_by_family.values())
|
| 172 |
+
|
| 173 |
+
reordered = []
|
| 174 |
+
if first_address_family_count > 1:
|
| 175 |
+
reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
|
| 176 |
+
del addrinfos_lists[0][:first_address_family_count - 1]
|
| 177 |
+
reordered.extend(
|
| 178 |
+
a for a in itertools.chain.from_iterable(
|
| 179 |
+
itertools.zip_longest(*addrinfos_lists)
|
| 180 |
+
) if a is not None)
|
| 181 |
+
return reordered
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def _run_until_complete_cb(fut):
|
| 185 |
+
if not fut.cancelled():
|
| 186 |
+
exc = fut.exception()
|
| 187 |
+
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
|
| 188 |
+
# Issue #22429: run_forever() already finished, no need to
|
| 189 |
+
# stop it.
|
| 190 |
+
return
|
| 191 |
+
futures._get_loop(fut).stop()
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
if hasattr(socket, 'TCP_NODELAY'):
|
| 195 |
+
def _set_nodelay(sock):
|
| 196 |
+
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
|
| 197 |
+
sock.type == socket.SOCK_STREAM and
|
| 198 |
+
sock.proto == socket.IPPROTO_TCP):
|
| 199 |
+
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
| 200 |
+
else:
|
| 201 |
+
def _set_nodelay(sock):
|
| 202 |
+
pass
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def _check_ssl_socket(sock):
|
| 206 |
+
if ssl is not None and isinstance(sock, ssl.SSLSocket):
|
| 207 |
+
raise TypeError("Socket cannot be of type SSLSocket")
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
class _SendfileFallbackProtocol(protocols.Protocol):
|
| 211 |
+
def __init__(self, transp):
|
| 212 |
+
if not isinstance(transp, transports._FlowControlMixin):
|
| 213 |
+
raise TypeError("transport should be _FlowControlMixin instance")
|
| 214 |
+
self._transport = transp
|
| 215 |
+
self._proto = transp.get_protocol()
|
| 216 |
+
self._should_resume_reading = transp.is_reading()
|
| 217 |
+
self._should_resume_writing = transp._protocol_paused
|
| 218 |
+
transp.pause_reading()
|
| 219 |
+
transp.set_protocol(self)
|
| 220 |
+
if self._should_resume_writing:
|
| 221 |
+
self._write_ready_fut = self._transport._loop.create_future()
|
| 222 |
+
else:
|
| 223 |
+
self._write_ready_fut = None
|
| 224 |
+
|
| 225 |
+
async def drain(self):
|
| 226 |
+
if self._transport.is_closing():
|
| 227 |
+
raise ConnectionError("Connection closed by peer")
|
| 228 |
+
fut = self._write_ready_fut
|
| 229 |
+
if fut is None:
|
| 230 |
+
return
|
| 231 |
+
await fut
|
| 232 |
+
|
| 233 |
+
def connection_made(self, transport):
|
| 234 |
+
raise RuntimeError("Invalid state: "
|
| 235 |
+
"connection should have been established already.")
|
| 236 |
+
|
| 237 |
+
def connection_lost(self, exc):
|
| 238 |
+
if self._write_ready_fut is not None:
|
| 239 |
+
# Never happens if peer disconnects after sending the whole content
|
| 240 |
+
# Thus disconnection is always an exception from user perspective
|
| 241 |
+
if exc is None:
|
| 242 |
+
self._write_ready_fut.set_exception(
|
| 243 |
+
ConnectionError("Connection is closed by peer"))
|
| 244 |
+
else:
|
| 245 |
+
self._write_ready_fut.set_exception(exc)
|
| 246 |
+
self._proto.connection_lost(exc)
|
| 247 |
+
|
| 248 |
+
def pause_writing(self):
|
| 249 |
+
if self._write_ready_fut is not None:
|
| 250 |
+
return
|
| 251 |
+
self._write_ready_fut = self._transport._loop.create_future()
|
| 252 |
+
|
| 253 |
+
def resume_writing(self):
|
| 254 |
+
if self._write_ready_fut is None:
|
| 255 |
+
return
|
| 256 |
+
self._write_ready_fut.set_result(False)
|
| 257 |
+
self._write_ready_fut = None
|
| 258 |
+
|
| 259 |
+
def data_received(self, data):
|
| 260 |
+
raise RuntimeError("Invalid state: reading should be paused")
|
| 261 |
+
|
| 262 |
+
def eof_received(self):
|
| 263 |
+
raise RuntimeError("Invalid state: reading should be paused")
|
| 264 |
+
|
| 265 |
+
async def restore(self):
|
| 266 |
+
self._transport.set_protocol(self._proto)
|
| 267 |
+
if self._should_resume_reading:
|
| 268 |
+
self._transport.resume_reading()
|
| 269 |
+
if self._write_ready_fut is not None:
|
| 270 |
+
# Cancel the future.
|
| 271 |
+
# Basically it has no effect because protocol is switched back,
|
| 272 |
+
# no code should wait for it anymore.
|
| 273 |
+
self._write_ready_fut.cancel()
|
| 274 |
+
if self._should_resume_writing:
|
| 275 |
+
self._proto.resume_writing()
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
class Server(events.AbstractServer):
|
| 279 |
+
|
| 280 |
+
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
|
| 281 |
+
ssl_handshake_timeout):
|
| 282 |
+
self._loop = loop
|
| 283 |
+
self._sockets = sockets
|
| 284 |
+
self._active_count = 0
|
| 285 |
+
self._waiters = []
|
| 286 |
+
self._protocol_factory = protocol_factory
|
| 287 |
+
self._backlog = backlog
|
| 288 |
+
self._ssl_context = ssl_context
|
| 289 |
+
self._ssl_handshake_timeout = ssl_handshake_timeout
|
| 290 |
+
self._serving = False
|
| 291 |
+
self._serving_forever_fut = None
|
| 292 |
+
|
| 293 |
+
def __repr__(self):
|
| 294 |
+
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
|
| 295 |
+
|
| 296 |
+
def _attach(self):
|
| 297 |
+
assert self._sockets is not None
|
| 298 |
+
self._active_count += 1
|
| 299 |
+
|
| 300 |
+
def _detach(self):
|
| 301 |
+
assert self._active_count > 0
|
| 302 |
+
self._active_count -= 1
|
| 303 |
+
if self._active_count == 0 and self._sockets is None:
|
| 304 |
+
self._wakeup()
|
| 305 |
+
|
| 306 |
+
def _wakeup(self):
|
| 307 |
+
waiters = self._waiters
|
| 308 |
+
self._waiters = None
|
| 309 |
+
for waiter in waiters:
|
| 310 |
+
if not waiter.done():
|
| 311 |
+
waiter.set_result(waiter)
|
| 312 |
+
|
| 313 |
+
def _start_serving(self):
|
| 314 |
+
if self._serving:
|
| 315 |
+
return
|
| 316 |
+
self._serving = True
|
| 317 |
+
for sock in self._sockets:
|
| 318 |
+
sock.listen(self._backlog)
|
| 319 |
+
self._loop._start_serving(
|
| 320 |
+
self._protocol_factory, sock, self._ssl_context,
|
| 321 |
+
self, self._backlog, self._ssl_handshake_timeout)
|
| 322 |
+
|
| 323 |
+
def get_loop(self):
|
| 324 |
+
return self._loop
|
| 325 |
+
|
| 326 |
+
def is_serving(self):
|
| 327 |
+
return self._serving
|
| 328 |
+
|
| 329 |
+
@property
|
| 330 |
+
def sockets(self):
|
| 331 |
+
if self._sockets is None:
|
| 332 |
+
return ()
|
| 333 |
+
return tuple(trsock.TransportSocket(s) for s in self._sockets)
|
| 334 |
+
|
| 335 |
+
def close(self):
|
| 336 |
+
sockets = self._sockets
|
| 337 |
+
if sockets is None:
|
| 338 |
+
return
|
| 339 |
+
self._sockets = None
|
| 340 |
+
|
| 341 |
+
for sock in sockets:
|
| 342 |
+
self._loop._stop_serving(sock)
|
| 343 |
+
|
| 344 |
+
self._serving = False
|
| 345 |
+
|
| 346 |
+
if (self._serving_forever_fut is not None and
|
| 347 |
+
not self._serving_forever_fut.done()):
|
| 348 |
+
self._serving_forever_fut.cancel()
|
| 349 |
+
self._serving_forever_fut = None
|
| 350 |
+
|
| 351 |
+
if self._active_count == 0:
|
| 352 |
+
self._wakeup()
|
| 353 |
+
|
| 354 |
+
async def start_serving(self):
|
| 355 |
+
self._start_serving()
|
| 356 |
+
# Skip one loop iteration so that all 'loop.add_reader'
|
| 357 |
+
# go through.
|
| 358 |
+
await tasks.sleep(0)
|
| 359 |
+
|
| 360 |
+
async def serve_forever(self):
|
| 361 |
+
if self._serving_forever_fut is not None:
|
| 362 |
+
raise RuntimeError(
|
| 363 |
+
f'server {self!r} is already being awaited on serve_forever()')
|
| 364 |
+
if self._sockets is None:
|
| 365 |
+
raise RuntimeError(f'server {self!r} is closed')
|
| 366 |
+
|
| 367 |
+
self._start_serving()
|
| 368 |
+
self._serving_forever_fut = self._loop.create_future()
|
| 369 |
+
|
| 370 |
+
try:
|
| 371 |
+
await self._serving_forever_fut
|
| 372 |
+
except exceptions.CancelledError:
|
| 373 |
+
try:
|
| 374 |
+
self.close()
|
| 375 |
+
await self.wait_closed()
|
| 376 |
+
finally:
|
| 377 |
+
raise
|
| 378 |
+
finally:
|
| 379 |
+
self._serving_forever_fut = None
|
| 380 |
+
|
| 381 |
+
async def wait_closed(self):
|
| 382 |
+
if self._sockets is None or self._waiters is None:
|
| 383 |
+
return
|
| 384 |
+
waiter = self._loop.create_future()
|
| 385 |
+
self._waiters.append(waiter)
|
| 386 |
+
await waiter
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
class BaseEventLoop(events.AbstractEventLoop):
|
| 390 |
+
|
| 391 |
+
def __init__(self):
|
| 392 |
+
self._timer_cancelled_count = 0
|
| 393 |
+
self._closed = False
|
| 394 |
+
self._stopping = False
|
| 395 |
+
self._ready = collections.deque()
|
| 396 |
+
self._scheduled = []
|
| 397 |
+
self._default_executor = None
|
| 398 |
+
self._internal_fds = 0
|
| 399 |
+
# Identifier of the thread running the event loop, or None if the
|
| 400 |
+
# event loop is not running
|
| 401 |
+
self._thread_id = None
|
| 402 |
+
self._clock_resolution = time.get_clock_info('monotonic').resolution
|
| 403 |
+
self._exception_handler = None
|
| 404 |
+
self.set_debug(coroutines._is_debug_mode())
|
| 405 |
+
# In debug mode, if the execution of a callback or a step of a task
|
| 406 |
+
# exceed this duration in seconds, the slow callback/task is logged.
|
| 407 |
+
self.slow_callback_duration = 0.1
|
| 408 |
+
self._current_handle = None
|
| 409 |
+
self._task_factory = None
|
| 410 |
+
self._coroutine_origin_tracking_enabled = False
|
| 411 |
+
self._coroutine_origin_tracking_saved_depth = None
|
| 412 |
+
|
| 413 |
+
# A weak set of all asynchronous generators that are
|
| 414 |
+
# being iterated by the loop.
|
| 415 |
+
self._asyncgens = weakref.WeakSet()
|
| 416 |
+
# Set to True when `loop.shutdown_asyncgens` is called.
|
| 417 |
+
self._asyncgens_shutdown_called = False
|
| 418 |
+
# Set to True when `loop.shutdown_default_executor` is called.
|
| 419 |
+
self._executor_shutdown_called = False
|
| 420 |
+
|
| 421 |
+
def __repr__(self):
|
| 422 |
+
return (
|
| 423 |
+
f'<{self.__class__.__name__} running={self.is_running()} '
|
| 424 |
+
f'closed={self.is_closed()} debug={self.get_debug()}>'
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
def create_future(self):
|
| 428 |
+
"""Create a Future object attached to the loop."""
|
| 429 |
+
return futures.Future(loop=self)
|
| 430 |
+
|
| 431 |
+
def create_task(self, coro, *, name=None):
|
| 432 |
+
"""Schedule a coroutine object.
|
| 433 |
+
|
| 434 |
+
Return a task object.
|
| 435 |
+
"""
|
| 436 |
+
self._check_closed()
|
| 437 |
+
if self._task_factory is None:
|
| 438 |
+
task = tasks.Task(coro, loop=self, name=name)
|
| 439 |
+
if task._source_traceback:
|
| 440 |
+
del task._source_traceback[-1]
|
| 441 |
+
else:
|
| 442 |
+
task = self._task_factory(self, coro)
|
| 443 |
+
tasks._set_task_name(task, name)
|
| 444 |
+
|
| 445 |
+
return task
|
| 446 |
+
|
| 447 |
+
def set_task_factory(self, factory):
|
| 448 |
+
"""Set a task factory that will be used by loop.create_task().
|
| 449 |
+
|
| 450 |
+
If factory is None the default task factory will be set.
|
| 451 |
+
|
| 452 |
+
If factory is a callable, it should have a signature matching
|
| 453 |
+
'(loop, coro)', where 'loop' will be a reference to the active
|
| 454 |
+
event loop, 'coro' will be a coroutine object. The callable
|
| 455 |
+
must return a Future.
|
| 456 |
+
"""
|
| 457 |
+
if factory is not None and not callable(factory):
|
| 458 |
+
raise TypeError('task factory must be a callable or None')
|
| 459 |
+
self._task_factory = factory
|
| 460 |
+
|
| 461 |
+
def get_task_factory(self):
|
| 462 |
+
"""Return a task factory, or None if the default one is in use."""
|
| 463 |
+
return self._task_factory
|
| 464 |
+
|
| 465 |
+
def _make_socket_transport(self, sock, protocol, waiter=None, *,
|
| 466 |
+
extra=None, server=None):
|
| 467 |
+
"""Create socket transport."""
|
| 468 |
+
raise NotImplementedError
|
| 469 |
+
|
| 470 |
+
def _make_ssl_transport(
|
| 471 |
+
self, rawsock, protocol, sslcontext, waiter=None,
|
| 472 |
+
*, server_side=False, server_hostname=None,
|
| 473 |
+
extra=None, server=None,
|
| 474 |
+
ssl_handshake_timeout=None,
|
| 475 |
+
call_connection_made=True):
|
| 476 |
+
"""Create SSL transport."""
|
| 477 |
+
raise NotImplementedError
|
| 478 |
+
|
| 479 |
+
def _make_datagram_transport(self, sock, protocol,
|
| 480 |
+
address=None, waiter=None, extra=None):
|
| 481 |
+
"""Create datagram transport."""
|
| 482 |
+
raise NotImplementedError
|
| 483 |
+
|
| 484 |
+
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
|
| 485 |
+
extra=None):
|
| 486 |
+
"""Create read pipe transport."""
|
| 487 |
+
raise NotImplementedError
|
| 488 |
+
|
| 489 |
+
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
|
| 490 |
+
extra=None):
|
| 491 |
+
"""Create write pipe transport."""
|
| 492 |
+
raise NotImplementedError
|
| 493 |
+
|
| 494 |
+
async def _make_subprocess_transport(self, protocol, args, shell,
|
| 495 |
+
stdin, stdout, stderr, bufsize,
|
| 496 |
+
extra=None, **kwargs):
|
| 497 |
+
"""Create subprocess transport."""
|
| 498 |
+
raise NotImplementedError
|
| 499 |
+
|
| 500 |
+
def _write_to_self(self):
|
| 501 |
+
"""Write a byte to self-pipe, to wake up the event loop.
|
| 502 |
+
|
| 503 |
+
This may be called from a different thread.
|
| 504 |
+
|
| 505 |
+
The subclass is responsible for implementing the self-pipe.
|
| 506 |
+
"""
|
| 507 |
+
raise NotImplementedError
|
| 508 |
+
|
| 509 |
+
def _process_events(self, event_list):
|
| 510 |
+
"""Process selector events."""
|
| 511 |
+
raise NotImplementedError
|
| 512 |
+
|
| 513 |
+
def _check_closed(self):
|
| 514 |
+
if self._closed:
|
| 515 |
+
raise RuntimeError('Event loop is closed')
|
| 516 |
+
|
| 517 |
+
def _check_default_executor(self):
|
| 518 |
+
if self._executor_shutdown_called:
|
| 519 |
+
raise RuntimeError('Executor shutdown has been called')
|
| 520 |
+
|
| 521 |
+
def _asyncgen_finalizer_hook(self, agen):
|
| 522 |
+
self._asyncgens.discard(agen)
|
| 523 |
+
if not self.is_closed():
|
| 524 |
+
self.call_soon_threadsafe(self.create_task, agen.aclose())
|
| 525 |
+
|
| 526 |
+
def _asyncgen_firstiter_hook(self, agen):
|
| 527 |
+
if self._asyncgens_shutdown_called:
|
| 528 |
+
warnings.warn(
|
| 529 |
+
f"asynchronous generator {agen!r} was scheduled after "
|
| 530 |
+
f"loop.shutdown_asyncgens() call",
|
| 531 |
+
ResourceWarning, source=self)
|
| 532 |
+
|
| 533 |
+
self._asyncgens.add(agen)
|
| 534 |
+
|
| 535 |
+
async def shutdown_asyncgens(self):
|
| 536 |
+
"""Shutdown all active asynchronous generators."""
|
| 537 |
+
self._asyncgens_shutdown_called = True
|
| 538 |
+
|
| 539 |
+
if not len(self._asyncgens):
|
| 540 |
+
# If Python version is <3.6 or we don't have any asynchronous
|
| 541 |
+
# generators alive.
|
| 542 |
+
return
|
| 543 |
+
|
| 544 |
+
closing_agens = list(self._asyncgens)
|
| 545 |
+
self._asyncgens.clear()
|
| 546 |
+
|
| 547 |
+
results = await tasks.gather(
|
| 548 |
+
*[ag.aclose() for ag in closing_agens],
|
| 549 |
+
return_exceptions=True)
|
| 550 |
+
|
| 551 |
+
for result, agen in zip(results, closing_agens):
|
| 552 |
+
if isinstance(result, Exception):
|
| 553 |
+
self.call_exception_handler({
|
| 554 |
+
'message': f'an error occurred during closing of '
|
| 555 |
+
f'asynchronous generator {agen!r}',
|
| 556 |
+
'exception': result,
|
| 557 |
+
'asyncgen': agen
|
| 558 |
+
})
|
| 559 |
+
|
| 560 |
+
async def shutdown_default_executor(self):
|
| 561 |
+
"""Schedule the shutdown of the default executor."""
|
| 562 |
+
self._executor_shutdown_called = True
|
| 563 |
+
if self._default_executor is None:
|
| 564 |
+
return
|
| 565 |
+
future = self.create_future()
|
| 566 |
+
thread = threading.Thread(target=self._do_shutdown, args=(future,))
|
| 567 |
+
thread.start()
|
| 568 |
+
try:
|
| 569 |
+
await future
|
| 570 |
+
finally:
|
| 571 |
+
thread.join()
|
| 572 |
+
|
| 573 |
+
def _do_shutdown(self, future):
|
| 574 |
+
try:
|
| 575 |
+
self._default_executor.shutdown(wait=True)
|
| 576 |
+
if not self.is_closed():
|
| 577 |
+
self.call_soon_threadsafe(future.set_result, None)
|
| 578 |
+
except Exception as ex:
|
| 579 |
+
if not self.is_closed():
|
| 580 |
+
self.call_soon_threadsafe(future.set_exception, ex)
|
| 581 |
+
|
| 582 |
+
def _check_running(self):
|
| 583 |
+
if self.is_running():
|
| 584 |
+
raise RuntimeError('This event loop is already running')
|
| 585 |
+
if events._get_running_loop() is not None:
|
| 586 |
+
raise RuntimeError(
|
| 587 |
+
'Cannot run the event loop while another loop is running')
|
| 588 |
+
|
| 589 |
+
def run_forever(self):
|
| 590 |
+
"""Run until stop() is called."""
|
| 591 |
+
self._check_closed()
|
| 592 |
+
self._check_running()
|
| 593 |
+
self._set_coroutine_origin_tracking(self._debug)
|
| 594 |
+
|
| 595 |
+
old_agen_hooks = sys.get_asyncgen_hooks()
|
| 596 |
+
try:
|
| 597 |
+
self._thread_id = threading.get_ident()
|
| 598 |
+
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
|
| 599 |
+
finalizer=self._asyncgen_finalizer_hook)
|
| 600 |
+
|
| 601 |
+
events._set_running_loop(self)
|
| 602 |
+
while True:
|
| 603 |
+
self._run_once()
|
| 604 |
+
if self._stopping:
|
| 605 |
+
break
|
| 606 |
+
finally:
|
| 607 |
+
self._stopping = False
|
| 608 |
+
self._thread_id = None
|
| 609 |
+
events._set_running_loop(None)
|
| 610 |
+
self._set_coroutine_origin_tracking(False)
|
| 611 |
+
sys.set_asyncgen_hooks(*old_agen_hooks)
|
| 612 |
+
|
| 613 |
+
def run_until_complete(self, future):
|
| 614 |
+
"""Run until the Future is done.
|
| 615 |
+
|
| 616 |
+
If the argument is a coroutine, it is wrapped in a Task.
|
| 617 |
+
|
| 618 |
+
WARNING: It would be disastrous to call run_until_complete()
|
| 619 |
+
with the same coroutine twice -- it would wrap it in two
|
| 620 |
+
different Tasks and that can't be good.
|
| 621 |
+
|
| 622 |
+
Return the Future's result, or raise its exception.
|
| 623 |
+
"""
|
| 624 |
+
self._check_closed()
|
| 625 |
+
self._check_running()
|
| 626 |
+
|
| 627 |
+
new_task = not futures.isfuture(future)
|
| 628 |
+
future = tasks.ensure_future(future, loop=self)
|
| 629 |
+
if new_task:
|
| 630 |
+
# An exception is raised if the future didn't complete, so there
|
| 631 |
+
# is no need to log the "destroy pending task" message
|
| 632 |
+
future._log_destroy_pending = False
|
| 633 |
+
|
| 634 |
+
future.add_done_callback(_run_until_complete_cb)
|
| 635 |
+
try:
|
| 636 |
+
self.run_forever()
|
| 637 |
+
except:
|
| 638 |
+
if new_task and future.done() and not future.cancelled():
|
| 639 |
+
# The coroutine raised a BaseException. Consume the exception
|
| 640 |
+
# to not log a warning, the caller doesn't have access to the
|
| 641 |
+
# local task.
|
| 642 |
+
future.exception()
|
| 643 |
+
raise
|
| 644 |
+
finally:
|
| 645 |
+
future.remove_done_callback(_run_until_complete_cb)
|
| 646 |
+
if not future.done():
|
| 647 |
+
raise RuntimeError('Event loop stopped before Future completed.')
|
| 648 |
+
|
| 649 |
+
return future.result()
|
| 650 |
+
|
| 651 |
+
def stop(self):
|
| 652 |
+
"""Stop running the event loop.
|
| 653 |
+
|
| 654 |
+
Every callback already scheduled will still run. This simply informs
|
| 655 |
+
run_forever to stop looping after a complete iteration.
|
| 656 |
+
"""
|
| 657 |
+
self._stopping = True
|
| 658 |
+
|
| 659 |
+
def close(self):
|
| 660 |
+
"""Close the event loop.
|
| 661 |
+
|
| 662 |
+
This clears the queues and shuts down the executor,
|
| 663 |
+
but does not wait for the executor to finish.
|
| 664 |
+
|
| 665 |
+
The event loop must not be running.
|
| 666 |
+
"""
|
| 667 |
+
if self.is_running():
|
| 668 |
+
raise RuntimeError("Cannot close a running event loop")
|
| 669 |
+
if self._closed:
|
| 670 |
+
return
|
| 671 |
+
if self._debug:
|
| 672 |
+
logger.debug("Close %r", self)
|
| 673 |
+
self._closed = True
|
| 674 |
+
self._ready.clear()
|
| 675 |
+
self._scheduled.clear()
|
| 676 |
+
self._executor_shutdown_called = True
|
| 677 |
+
executor = self._default_executor
|
| 678 |
+
if executor is not None:
|
| 679 |
+
self._default_executor = None
|
| 680 |
+
executor.shutdown(wait=False)
|
| 681 |
+
|
| 682 |
+
def is_closed(self):
|
| 683 |
+
"""Returns True if the event loop was closed."""
|
| 684 |
+
return self._closed
|
| 685 |
+
|
| 686 |
+
def __del__(self, _warn=warnings.warn):
|
| 687 |
+
if not self.is_closed():
|
| 688 |
+
_warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
|
| 689 |
+
if not self.is_running():
|
| 690 |
+
self.close()
|
| 691 |
+
|
| 692 |
+
def is_running(self):
|
| 693 |
+
"""Returns True if the event loop is running."""
|
| 694 |
+
return (self._thread_id is not None)
|
| 695 |
+
|
| 696 |
+
def time(self):
|
| 697 |
+
"""Return the time according to the event loop's clock.
|
| 698 |
+
|
| 699 |
+
This is a float expressed in seconds since an epoch, but the
|
| 700 |
+
epoch, precision, accuracy and drift are unspecified and may
|
| 701 |
+
differ per event loop.
|
| 702 |
+
"""
|
| 703 |
+
return time.monotonic()
|
| 704 |
+
|
| 705 |
+
def call_later(self, delay, callback, *args, context=None):
|
| 706 |
+
"""Arrange for a callback to be called at a given time.
|
| 707 |
+
|
| 708 |
+
Return a Handle: an opaque object with a cancel() method that
|
| 709 |
+
can be used to cancel the call.
|
| 710 |
+
|
| 711 |
+
The delay can be an int or float, expressed in seconds. It is
|
| 712 |
+
always relative to the current time.
|
| 713 |
+
|
| 714 |
+
Each callback will be called exactly once. If two callbacks
|
| 715 |
+
are scheduled for exactly the same time, it undefined which
|
| 716 |
+
will be called first.
|
| 717 |
+
|
| 718 |
+
Any positional arguments after the callback will be passed to
|
| 719 |
+
the callback when it is called.
|
| 720 |
+
"""
|
| 721 |
+
timer = self.call_at(self.time() + delay, callback, *args,
|
| 722 |
+
context=context)
|
| 723 |
+
if timer._source_traceback:
|
| 724 |
+
del timer._source_traceback[-1]
|
| 725 |
+
return timer
|
| 726 |
+
|
| 727 |
+
def call_at(self, when, callback, *args, context=None):
|
| 728 |
+
"""Like call_later(), but uses an absolute time.
|
| 729 |
+
|
| 730 |
+
Absolute time corresponds to the event loop's time() method.
|
| 731 |
+
"""
|
| 732 |
+
self._check_closed()
|
| 733 |
+
if self._debug:
|
| 734 |
+
self._check_thread()
|
| 735 |
+
self._check_callback(callback, 'call_at')
|
| 736 |
+
timer = events.TimerHandle(when, callback, args, self, context)
|
| 737 |
+
if timer._source_traceback:
|
| 738 |
+
del timer._source_traceback[-1]
|
| 739 |
+
heapq.heappush(self._scheduled, timer)
|
| 740 |
+
timer._scheduled = True
|
| 741 |
+
return timer
|
| 742 |
+
|
| 743 |
+
def call_soon(self, callback, *args, context=None):
|
| 744 |
+
"""Arrange for a callback to be called as soon as possible.
|
| 745 |
+
|
| 746 |
+
This operates as a FIFO queue: callbacks are called in the
|
| 747 |
+
order in which they are registered. Each callback will be
|
| 748 |
+
called exactly once.
|
| 749 |
+
|
| 750 |
+
Any positional arguments after the callback will be passed to
|
| 751 |
+
the callback when it is called.
|
| 752 |
+
"""
|
| 753 |
+
self._check_closed()
|
| 754 |
+
if self._debug:
|
| 755 |
+
self._check_thread()
|
| 756 |
+
self._check_callback(callback, 'call_soon')
|
| 757 |
+
handle = self._call_soon(callback, args, context)
|
| 758 |
+
if handle._source_traceback:
|
| 759 |
+
del handle._source_traceback[-1]
|
| 760 |
+
return handle
|
| 761 |
+
|
| 762 |
+
def _check_callback(self, callback, method):
|
| 763 |
+
if (coroutines.iscoroutine(callback) or
|
| 764 |
+
coroutines.iscoroutinefunction(callback)):
|
| 765 |
+
raise TypeError(
|
| 766 |
+
f"coroutines cannot be used with {method}()")
|
| 767 |
+
if not callable(callback):
|
| 768 |
+
raise TypeError(
|
| 769 |
+
f'a callable object was expected by {method}(), '
|
| 770 |
+
f'got {callback!r}')
|
| 771 |
+
|
| 772 |
+
def _call_soon(self, callback, args, context):
|
| 773 |
+
handle = events.Handle(callback, args, self, context)
|
| 774 |
+
if handle._source_traceback:
|
| 775 |
+
del handle._source_traceback[-1]
|
| 776 |
+
self._ready.append(handle)
|
| 777 |
+
return handle
|
| 778 |
+
|
| 779 |
+
def _check_thread(self):
|
| 780 |
+
"""Check that the current thread is the thread running the event loop.
|
| 781 |
+
|
| 782 |
+
Non-thread-safe methods of this class make this assumption and will
|
| 783 |
+
likely behave incorrectly when the assumption is violated.
|
| 784 |
+
|
| 785 |
+
Should only be called when (self._debug == True). The caller is
|
| 786 |
+
responsible for checking this condition for performance reasons.
|
| 787 |
+
"""
|
| 788 |
+
if self._thread_id is None:
|
| 789 |
+
return
|
| 790 |
+
thread_id = threading.get_ident()
|
| 791 |
+
if thread_id != self._thread_id:
|
| 792 |
+
raise RuntimeError(
|
| 793 |
+
"Non-thread-safe operation invoked on an event loop other "
|
| 794 |
+
"than the current one")
|
| 795 |
+
|
| 796 |
+
def call_soon_threadsafe(self, callback, *args, context=None):
|
| 797 |
+
"""Like call_soon(), but thread-safe."""
|
| 798 |
+
self._check_closed()
|
| 799 |
+
if self._debug:
|
| 800 |
+
self._check_callback(callback, 'call_soon_threadsafe')
|
| 801 |
+
handle = self._call_soon(callback, args, context)
|
| 802 |
+
if handle._source_traceback:
|
| 803 |
+
del handle._source_traceback[-1]
|
| 804 |
+
self._write_to_self()
|
| 805 |
+
return handle
|
| 806 |
+
|
| 807 |
+
def run_in_executor(self, executor, func, *args):
|
| 808 |
+
self._check_closed()
|
| 809 |
+
if self._debug:
|
| 810 |
+
self._check_callback(func, 'run_in_executor')
|
| 811 |
+
if executor is None:
|
| 812 |
+
executor = self._default_executor
|
| 813 |
+
# Only check when the default executor is being used
|
| 814 |
+
self._check_default_executor()
|
| 815 |
+
if executor is None:
|
| 816 |
+
executor = concurrent.futures.ThreadPoolExecutor(
|
| 817 |
+
thread_name_prefix='asyncio'
|
| 818 |
+
)
|
| 819 |
+
self._default_executor = executor
|
| 820 |
+
return futures.wrap_future(
|
| 821 |
+
executor.submit(func, *args), loop=self)
|
| 822 |
+
|
| 823 |
+
def set_default_executor(self, executor):
|
| 824 |
+
if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
|
| 825 |
+
warnings.warn(
|
| 826 |
+
'Using the default executor that is not an instance of '
|
| 827 |
+
'ThreadPoolExecutor is deprecated and will be prohibited '
|
| 828 |
+
'in Python 3.9',
|
| 829 |
+
DeprecationWarning, 2)
|
| 830 |
+
self._default_executor = executor
|
| 831 |
+
|
| 832 |
+
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
|
| 833 |
+
msg = [f"{host}:{port!r}"]
|
| 834 |
+
if family:
|
| 835 |
+
msg.append(f'family={family!r}')
|
| 836 |
+
if type:
|
| 837 |
+
msg.append(f'type={type!r}')
|
| 838 |
+
if proto:
|
| 839 |
+
msg.append(f'proto={proto!r}')
|
| 840 |
+
if flags:
|
| 841 |
+
msg.append(f'flags={flags!r}')
|
| 842 |
+
msg = ', '.join(msg)
|
| 843 |
+
logger.debug('Get address info %s', msg)
|
| 844 |
+
|
| 845 |
+
t0 = self.time()
|
| 846 |
+
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
|
| 847 |
+
dt = self.time() - t0
|
| 848 |
+
|
| 849 |
+
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
|
| 850 |
+
if dt >= self.slow_callback_duration:
|
| 851 |
+
logger.info(msg)
|
| 852 |
+
else:
|
| 853 |
+
logger.debug(msg)
|
| 854 |
+
return addrinfo
|
| 855 |
+
|
| 856 |
+
async def getaddrinfo(self, host, port, *,
|
| 857 |
+
family=0, type=0, proto=0, flags=0):
|
| 858 |
+
if self._debug:
|
| 859 |
+
getaddr_func = self._getaddrinfo_debug
|
| 860 |
+
else:
|
| 861 |
+
getaddr_func = socket.getaddrinfo
|
| 862 |
+
|
| 863 |
+
return await self.run_in_executor(
|
| 864 |
+
None, getaddr_func, host, port, family, type, proto, flags)
|
| 865 |
+
|
| 866 |
+
async def getnameinfo(self, sockaddr, flags=0):
|
| 867 |
+
return await self.run_in_executor(
|
| 868 |
+
None, socket.getnameinfo, sockaddr, flags)
|
| 869 |
+
|
| 870 |
+
async def sock_sendfile(self, sock, file, offset=0, count=None,
|
| 871 |
+
*, fallback=True):
|
| 872 |
+
if self._debug and sock.gettimeout() != 0:
|
| 873 |
+
raise ValueError("the socket must be non-blocking")
|
| 874 |
+
_check_ssl_socket(sock)
|
| 875 |
+
self._check_sendfile_params(sock, file, offset, count)
|
| 876 |
+
try:
|
| 877 |
+
return await self._sock_sendfile_native(sock, file,
|
| 878 |
+
offset, count)
|
| 879 |
+
except exceptions.SendfileNotAvailableError as exc:
|
| 880 |
+
if not fallback:
|
| 881 |
+
raise
|
| 882 |
+
return await self._sock_sendfile_fallback(sock, file,
|
| 883 |
+
offset, count)
|
| 884 |
+
|
| 885 |
+
async def _sock_sendfile_native(self, sock, file, offset, count):
|
| 886 |
+
# NB: sendfile syscall is not supported for SSL sockets and
|
| 887 |
+
# non-mmap files even if sendfile is supported by OS
|
| 888 |
+
raise exceptions.SendfileNotAvailableError(
|
| 889 |
+
f"syscall sendfile is not available for socket {sock!r} "
|
| 890 |
+
f"and file {file!r} combination")
|
| 891 |
+
|
| 892 |
+
async def _sock_sendfile_fallback(self, sock, file, offset, count):
|
| 893 |
+
if offset:
|
| 894 |
+
file.seek(offset)
|
| 895 |
+
blocksize = (
|
| 896 |
+
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
|
| 897 |
+
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
|
| 898 |
+
)
|
| 899 |
+
buf = bytearray(blocksize)
|
| 900 |
+
total_sent = 0
|
| 901 |
+
try:
|
| 902 |
+
while True:
|
| 903 |
+
if count:
|
| 904 |
+
blocksize = min(count - total_sent, blocksize)
|
| 905 |
+
if blocksize <= 0:
|
| 906 |
+
break
|
| 907 |
+
view = memoryview(buf)[:blocksize]
|
| 908 |
+
read = await self.run_in_executor(None, file.readinto, view)
|
| 909 |
+
if not read:
|
| 910 |
+
break # EOF
|
| 911 |
+
await self.sock_sendall(sock, view[:read])
|
| 912 |
+
total_sent += read
|
| 913 |
+
return total_sent
|
| 914 |
+
finally:
|
| 915 |
+
if total_sent > 0 and hasattr(file, 'seek'):
|
| 916 |
+
file.seek(offset + total_sent)
|
| 917 |
+
|
| 918 |
+
def _check_sendfile_params(self, sock, file, offset, count):
|
| 919 |
+
if 'b' not in getattr(file, 'mode', 'b'):
|
| 920 |
+
raise ValueError("file should be opened in binary mode")
|
| 921 |
+
if not sock.type == socket.SOCK_STREAM:
|
| 922 |
+
raise ValueError("only SOCK_STREAM type sockets are supported")
|
| 923 |
+
if count is not None:
|
| 924 |
+
if not isinstance(count, int):
|
| 925 |
+
raise TypeError(
|
| 926 |
+
"count must be a positive integer (got {!r})".format(count))
|
| 927 |
+
if count <= 0:
|
| 928 |
+
raise ValueError(
|
| 929 |
+
"count must be a positive integer (got {!r})".format(count))
|
| 930 |
+
if not isinstance(offset, int):
|
| 931 |
+
raise TypeError(
|
| 932 |
+
"offset must be a non-negative integer (got {!r})".format(
|
| 933 |
+
offset))
|
| 934 |
+
if offset < 0:
|
| 935 |
+
raise ValueError(
|
| 936 |
+
"offset must be a non-negative integer (got {!r})".format(
|
| 937 |
+
offset))
|
| 938 |
+
|
| 939 |
+
async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
|
| 940 |
+
"""Create, bind and connect one socket."""
|
| 941 |
+
my_exceptions = []
|
| 942 |
+
exceptions.append(my_exceptions)
|
| 943 |
+
family, type_, proto, _, address = addr_info
|
| 944 |
+
sock = None
|
| 945 |
+
try:
|
| 946 |
+
sock = socket.socket(family=family, type=type_, proto=proto)
|
| 947 |
+
sock.setblocking(False)
|
| 948 |
+
if local_addr_infos is not None:
|
| 949 |
+
for lfamily, _, _, _, laddr in local_addr_infos:
|
| 950 |
+
# skip local addresses of different family
|
| 951 |
+
if lfamily != family:
|
| 952 |
+
continue
|
| 953 |
+
try:
|
| 954 |
+
sock.bind(laddr)
|
| 955 |
+
break
|
| 956 |
+
except OSError as exc:
|
| 957 |
+
msg = (
|
| 958 |
+
f'error while attempting to bind on '
|
| 959 |
+
f'address {laddr!r}: '
|
| 960 |
+
f'{exc.strerror.lower()}'
|
| 961 |
+
)
|
| 962 |
+
exc = OSError(exc.errno, msg)
|
| 963 |
+
my_exceptions.append(exc)
|
| 964 |
+
else: # all bind attempts failed
|
| 965 |
+
if my_exceptions:
|
| 966 |
+
raise my_exceptions.pop()
|
| 967 |
+
else:
|
| 968 |
+
raise OSError(f"no matching local address with {family=} found")
|
| 969 |
+
await self.sock_connect(sock, address)
|
| 970 |
+
return sock
|
| 971 |
+
except OSError as exc:
|
| 972 |
+
my_exceptions.append(exc)
|
| 973 |
+
if sock is not None:
|
| 974 |
+
sock.close()
|
| 975 |
+
raise
|
| 976 |
+
except:
|
| 977 |
+
if sock is not None:
|
| 978 |
+
sock.close()
|
| 979 |
+
raise
|
| 980 |
+
finally:
|
| 981 |
+
exceptions = my_exceptions = None
|
| 982 |
+
|
| 983 |
+
async def create_connection(
|
| 984 |
+
self, protocol_factory, host=None, port=None,
|
| 985 |
+
*, ssl=None, family=0,
|
| 986 |
+
proto=0, flags=0, sock=None,
|
| 987 |
+
local_addr=None, server_hostname=None,
|
| 988 |
+
ssl_handshake_timeout=None,
|
| 989 |
+
happy_eyeballs_delay=None, interleave=None):
|
| 990 |
+
"""Connect to a TCP server.
|
| 991 |
+
|
| 992 |
+
Create a streaming transport connection to a given internet host and
|
| 993 |
+
port: socket family AF_INET or socket.AF_INET6 depending on host (or
|
| 994 |
+
family if specified), socket type SOCK_STREAM. protocol_factory must be
|
| 995 |
+
a callable returning a protocol instance.
|
| 996 |
+
|
| 997 |
+
This method is a coroutine which will try to establish the connection
|
| 998 |
+
in the background. When successful, the coroutine returns a
|
| 999 |
+
(transport, protocol) pair.
|
| 1000 |
+
"""
|
| 1001 |
+
if server_hostname is not None and not ssl:
|
| 1002 |
+
raise ValueError('server_hostname is only meaningful with ssl')
|
| 1003 |
+
|
| 1004 |
+
if server_hostname is None and ssl:
|
| 1005 |
+
# Use host as default for server_hostname. It is an error
|
| 1006 |
+
# if host is empty or not set, e.g. when an
|
| 1007 |
+
# already-connected socket was passed or when only a port
|
| 1008 |
+
# is given. To avoid this error, you can pass
|
| 1009 |
+
# server_hostname='' -- this will bypass the hostname
|
| 1010 |
+
# check. (This also means that if host is a numeric
|
| 1011 |
+
# IP/IPv6 address, we will attempt to verify that exact
|
| 1012 |
+
# address; this will probably fail, but it is possible to
|
| 1013 |
+
# create a certificate for a specific IP address, so we
|
| 1014 |
+
# don't judge it here.)
|
| 1015 |
+
if not host:
|
| 1016 |
+
raise ValueError('You must set server_hostname '
|
| 1017 |
+
'when using ssl without a host')
|
| 1018 |
+
server_hostname = host
|
| 1019 |
+
|
| 1020 |
+
if ssl_handshake_timeout is not None and not ssl:
|
| 1021 |
+
raise ValueError(
|
| 1022 |
+
'ssl_handshake_timeout is only meaningful with ssl')
|
| 1023 |
+
|
| 1024 |
+
if sock is not None:
|
| 1025 |
+
_check_ssl_socket(sock)
|
| 1026 |
+
|
| 1027 |
+
if happy_eyeballs_delay is not None and interleave is None:
|
| 1028 |
+
# If using happy eyeballs, default to interleave addresses by family
|
| 1029 |
+
interleave = 1
|
| 1030 |
+
|
| 1031 |
+
if host is not None or port is not None:
|
| 1032 |
+
if sock is not None:
|
| 1033 |
+
raise ValueError(
|
| 1034 |
+
'host/port and sock can not be specified at the same time')
|
| 1035 |
+
|
| 1036 |
+
infos = await self._ensure_resolved(
|
| 1037 |
+
(host, port), family=family,
|
| 1038 |
+
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
|
| 1039 |
+
if not infos:
|
| 1040 |
+
raise OSError('getaddrinfo() returned empty list')
|
| 1041 |
+
|
| 1042 |
+
if local_addr is not None:
|
| 1043 |
+
laddr_infos = await self._ensure_resolved(
|
| 1044 |
+
local_addr, family=family,
|
| 1045 |
+
type=socket.SOCK_STREAM, proto=proto,
|
| 1046 |
+
flags=flags, loop=self)
|
| 1047 |
+
if not laddr_infos:
|
| 1048 |
+
raise OSError('getaddrinfo() returned empty list')
|
| 1049 |
+
else:
|
| 1050 |
+
laddr_infos = None
|
| 1051 |
+
|
| 1052 |
+
if interleave:
|
| 1053 |
+
infos = _interleave_addrinfos(infos, interleave)
|
| 1054 |
+
|
| 1055 |
+
exceptions = []
|
| 1056 |
+
if happy_eyeballs_delay is None:
|
| 1057 |
+
# not using happy eyeballs
|
| 1058 |
+
for addrinfo in infos:
|
| 1059 |
+
try:
|
| 1060 |
+
sock = await self._connect_sock(
|
| 1061 |
+
exceptions, addrinfo, laddr_infos)
|
| 1062 |
+
break
|
| 1063 |
+
except OSError:
|
| 1064 |
+
continue
|
| 1065 |
+
else: # using happy eyeballs
|
| 1066 |
+
sock, _, _ = await staggered.staggered_race(
|
| 1067 |
+
(functools.partial(self._connect_sock,
|
| 1068 |
+
exceptions, addrinfo, laddr_infos)
|
| 1069 |
+
for addrinfo in infos),
|
| 1070 |
+
happy_eyeballs_delay, loop=self)
|
| 1071 |
+
|
| 1072 |
+
if sock is None:
|
| 1073 |
+
exceptions = [exc for sub in exceptions for exc in sub]
|
| 1074 |
+
try:
|
| 1075 |
+
if len(exceptions) == 1:
|
| 1076 |
+
raise exceptions[0]
|
| 1077 |
+
else:
|
| 1078 |
+
# If they all have the same str(), raise one.
|
| 1079 |
+
model = str(exceptions[0])
|
| 1080 |
+
if all(str(exc) == model for exc in exceptions):
|
| 1081 |
+
raise exceptions[0]
|
| 1082 |
+
# Raise a combined exception so the user can see all
|
| 1083 |
+
# the various error messages.
|
| 1084 |
+
raise OSError('Multiple exceptions: {}'.format(
|
| 1085 |
+
', '.join(str(exc) for exc in exceptions)))
|
| 1086 |
+
finally:
|
| 1087 |
+
exceptions = None
|
| 1088 |
+
|
| 1089 |
+
else:
|
| 1090 |
+
if sock is None:
|
| 1091 |
+
raise ValueError(
|
| 1092 |
+
'host and port was not specified and no sock specified')
|
| 1093 |
+
if sock.type != socket.SOCK_STREAM:
|
| 1094 |
+
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
|
| 1095 |
+
# are SOCK_STREAM.
|
| 1096 |
+
# We support passing AF_UNIX sockets even though we have
|
| 1097 |
+
# a dedicated API for that: create_unix_connection.
|
| 1098 |
+
# Disallowing AF_UNIX in this method, breaks backwards
|
| 1099 |
+
# compatibility.
|
| 1100 |
+
raise ValueError(
|
| 1101 |
+
f'A Stream Socket was expected, got {sock!r}')
|
| 1102 |
+
|
| 1103 |
+
transport, protocol = await self._create_connection_transport(
|
| 1104 |
+
sock, protocol_factory, ssl, server_hostname,
|
| 1105 |
+
ssl_handshake_timeout=ssl_handshake_timeout)
|
| 1106 |
+
if self._debug:
|
| 1107 |
+
# Get the socket from the transport because SSL transport closes
|
| 1108 |
+
# the old socket and creates a new SSL socket
|
| 1109 |
+
sock = transport.get_extra_info('socket')
|
| 1110 |
+
logger.debug("%r connected to %s:%r: (%r, %r)",
|
| 1111 |
+
sock, host, port, transport, protocol)
|
| 1112 |
+
return transport, protocol
|
| 1113 |
+
|
| 1114 |
+
async def _create_connection_transport(
|
| 1115 |
+
self, sock, protocol_factory, ssl,
|
| 1116 |
+
server_hostname, server_side=False,
|
| 1117 |
+
ssl_handshake_timeout=None):
|
| 1118 |
+
|
| 1119 |
+
sock.setblocking(False)
|
| 1120 |
+
|
| 1121 |
+
protocol = protocol_factory()
|
| 1122 |
+
waiter = self.create_future()
|
| 1123 |
+
if ssl:
|
| 1124 |
+
sslcontext = None if isinstance(ssl, bool) else ssl
|
| 1125 |
+
transport = self._make_ssl_transport(
|
| 1126 |
+
sock, protocol, sslcontext, waiter,
|
| 1127 |
+
server_side=server_side, server_hostname=server_hostname,
|
| 1128 |
+
ssl_handshake_timeout=ssl_handshake_timeout)
|
| 1129 |
+
else:
|
| 1130 |
+
transport = self._make_socket_transport(sock, protocol, waiter)
|
| 1131 |
+
|
| 1132 |
+
try:
|
| 1133 |
+
await waiter
|
| 1134 |
+
except:
|
| 1135 |
+
transport.close()
|
| 1136 |
+
raise
|
| 1137 |
+
|
| 1138 |
+
return transport, protocol
|
| 1139 |
+
|
| 1140 |
+
async def sendfile(self, transport, file, offset=0, count=None,
|
| 1141 |
+
*, fallback=True):
|
| 1142 |
+
"""Send a file to transport.
|
| 1143 |
+
|
| 1144 |
+
Return the total number of bytes which were sent.
|
| 1145 |
+
|
| 1146 |
+
The method uses high-performance os.sendfile if available.
|
| 1147 |
+
|
| 1148 |
+
file must be a regular file object opened in binary mode.
|
| 1149 |
+
|
| 1150 |
+
offset tells from where to start reading the file. If specified,
|
| 1151 |
+
count is the total number of bytes to transmit as opposed to
|
| 1152 |
+
sending the file until EOF is reached. File position is updated on
|
| 1153 |
+
return or also in case of error in which case file.tell()
|
| 1154 |
+
can be used to figure out the number of bytes
|
| 1155 |
+
which were sent.
|
| 1156 |
+
|
| 1157 |
+
fallback set to True makes asyncio to manually read and send
|
| 1158 |
+
the file when the platform does not support the sendfile syscall
|
| 1159 |
+
(e.g. Windows or SSL socket on Unix).
|
| 1160 |
+
|
| 1161 |
+
Raise SendfileNotAvailableError if the system does not support
|
| 1162 |
+
sendfile syscall and fallback is False.
|
| 1163 |
+
"""
|
| 1164 |
+
if transport.is_closing():
|
| 1165 |
+
raise RuntimeError("Transport is closing")
|
| 1166 |
+
mode = getattr(transport, '_sendfile_compatible',
|
| 1167 |
+
constants._SendfileMode.UNSUPPORTED)
|
| 1168 |
+
if mode is constants._SendfileMode.UNSUPPORTED:
|
| 1169 |
+
raise RuntimeError(
|
| 1170 |
+
f"sendfile is not supported for transport {transport!r}")
|
| 1171 |
+
if mode is constants._SendfileMode.TRY_NATIVE:
|
| 1172 |
+
try:
|
| 1173 |
+
return await self._sendfile_native(transport, file,
|
| 1174 |
+
offset, count)
|
| 1175 |
+
except exceptions.SendfileNotAvailableError as exc:
|
| 1176 |
+
if not fallback:
|
| 1177 |
+
raise
|
| 1178 |
+
|
| 1179 |
+
if not fallback:
|
| 1180 |
+
raise RuntimeError(
|
| 1181 |
+
f"fallback is disabled and native sendfile is not "
|
| 1182 |
+
f"supported for transport {transport!r}")
|
| 1183 |
+
|
| 1184 |
+
return await self._sendfile_fallback(transport, file,
|
| 1185 |
+
offset, count)
|
| 1186 |
+
|
| 1187 |
+
async def _sendfile_native(self, transp, file, offset, count):
|
| 1188 |
+
raise exceptions.SendfileNotAvailableError(
|
| 1189 |
+
"sendfile syscall is not supported")
|
| 1190 |
+
|
| 1191 |
+
async def _sendfile_fallback(self, transp, file, offset, count):
|
| 1192 |
+
if offset:
|
| 1193 |
+
file.seek(offset)
|
| 1194 |
+
blocksize = min(count, 16384) if count else 16384
|
| 1195 |
+
buf = bytearray(blocksize)
|
| 1196 |
+
total_sent = 0
|
| 1197 |
+
proto = _SendfileFallbackProtocol(transp)
|
| 1198 |
+
try:
|
| 1199 |
+
while True:
|
| 1200 |
+
if count:
|
| 1201 |
+
blocksize = min(count - total_sent, blocksize)
|
| 1202 |
+
if blocksize <= 0:
|
| 1203 |
+
return total_sent
|
| 1204 |
+
view = memoryview(buf)[:blocksize]
|
| 1205 |
+
read = await self.run_in_executor(None, file.readinto, view)
|
| 1206 |
+
if not read:
|
| 1207 |
+
return total_sent # EOF
|
| 1208 |
+
await proto.drain()
|
| 1209 |
+
transp.write(view[:read])
|
| 1210 |
+
total_sent += read
|
| 1211 |
+
finally:
|
| 1212 |
+
if total_sent > 0 and hasattr(file, 'seek'):
|
| 1213 |
+
file.seek(offset + total_sent)
|
| 1214 |
+
await proto.restore()
|
| 1215 |
+
|
| 1216 |
+
async def start_tls(self, transport, protocol, sslcontext, *,
|
| 1217 |
+
server_side=False,
|
| 1218 |
+
server_hostname=None,
|
| 1219 |
+
ssl_handshake_timeout=None):
|
| 1220 |
+
"""Upgrade transport to TLS.
|
| 1221 |
+
|
| 1222 |
+
Return a new transport that *protocol* should start using
|
| 1223 |
+
immediately.
|
| 1224 |
+
"""
|
| 1225 |
+
if ssl is None:
|
| 1226 |
+
raise RuntimeError('Python ssl module is not available')
|
| 1227 |
+
|
| 1228 |
+
if not isinstance(sslcontext, ssl.SSLContext):
|
| 1229 |
+
raise TypeError(
|
| 1230 |
+
f'sslcontext is expected to be an instance of ssl.SSLContext, '
|
| 1231 |
+
f'got {sslcontext!r}')
|
| 1232 |
+
|
| 1233 |
+
if not getattr(transport, '_start_tls_compatible', False):
|
| 1234 |
+
raise TypeError(
|
| 1235 |
+
f'transport {transport!r} is not supported by start_tls()')
|
| 1236 |
+
|
| 1237 |
+
waiter = self.create_future()
|
| 1238 |
+
ssl_protocol = sslproto.SSLProtocol(
|
| 1239 |
+
self, protocol, sslcontext, waiter,
|
| 1240 |
+
server_side, server_hostname,
|
| 1241 |
+
ssl_handshake_timeout=ssl_handshake_timeout,
|
| 1242 |
+
call_connection_made=False)
|
| 1243 |
+
|
| 1244 |
+
# Pause early so that "ssl_protocol.data_received()" doesn't
|
| 1245 |
+
# have a chance to get called before "ssl_protocol.connection_made()".
|
| 1246 |
+
transport.pause_reading()
|
| 1247 |
+
|
| 1248 |
+
transport.set_protocol(ssl_protocol)
|
| 1249 |
+
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
|
| 1250 |
+
resume_cb = self.call_soon(transport.resume_reading)
|
| 1251 |
+
|
| 1252 |
+
try:
|
| 1253 |
+
await waiter
|
| 1254 |
+
except BaseException:
|
| 1255 |
+
transport.close()
|
| 1256 |
+
conmade_cb.cancel()
|
| 1257 |
+
resume_cb.cancel()
|
| 1258 |
+
raise
|
| 1259 |
+
|
| 1260 |
+
return ssl_protocol._app_transport
|
| 1261 |
+
|
| 1262 |
+
async def create_datagram_endpoint(self, protocol_factory,
|
| 1263 |
+
local_addr=None, remote_addr=None, *,
|
| 1264 |
+
family=0, proto=0, flags=0,
|
| 1265 |
+
reuse_address=_unset, reuse_port=None,
|
| 1266 |
+
allow_broadcast=None, sock=None):
|
| 1267 |
+
"""Create datagram connection."""
|
| 1268 |
+
if sock is not None:
|
| 1269 |
+
if sock.type != socket.SOCK_DGRAM:
|
| 1270 |
+
raise ValueError(
|
| 1271 |
+
f'A UDP Socket was expected, got {sock!r}')
|
| 1272 |
+
if (local_addr or remote_addr or
|
| 1273 |
+
family or proto or flags or
|
| 1274 |
+
reuse_port or allow_broadcast):
|
| 1275 |
+
# show the problematic kwargs in exception msg
|
| 1276 |
+
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
|
| 1277 |
+
family=family, proto=proto, flags=flags,
|
| 1278 |
+
reuse_address=reuse_address, reuse_port=reuse_port,
|
| 1279 |
+
allow_broadcast=allow_broadcast)
|
| 1280 |
+
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
|
| 1281 |
+
raise ValueError(
|
| 1282 |
+
f'socket modifier keyword arguments can not be used '
|
| 1283 |
+
f'when sock is specified. ({problems})')
|
| 1284 |
+
sock.setblocking(False)
|
| 1285 |
+
r_addr = None
|
| 1286 |
+
else:
|
| 1287 |
+
if not (local_addr or remote_addr):
|
| 1288 |
+
if family == 0:
|
| 1289 |
+
raise ValueError('unexpected address family')
|
| 1290 |
+
addr_pairs_info = (((family, proto), (None, None)),)
|
| 1291 |
+
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
|
| 1292 |
+
for addr in (local_addr, remote_addr):
|
| 1293 |
+
if addr is not None and not isinstance(addr, str):
|
| 1294 |
+
raise TypeError('string is expected')
|
| 1295 |
+
|
| 1296 |
+
if local_addr and local_addr[0] not in (0, '\x00'):
|
| 1297 |
+
try:
|
| 1298 |
+
if stat.S_ISSOCK(os.stat(local_addr).st_mode):
|
| 1299 |
+
os.remove(local_addr)
|
| 1300 |
+
except FileNotFoundError:
|
| 1301 |
+
pass
|
| 1302 |
+
except OSError as err:
|
| 1303 |
+
# Directory may have permissions only to create socket.
|
| 1304 |
+
logger.error('Unable to check or remove stale UNIX '
|
| 1305 |
+
'socket %r: %r',
|
| 1306 |
+
local_addr, err)
|
| 1307 |
+
|
| 1308 |
+
addr_pairs_info = (((family, proto),
|
| 1309 |
+
(local_addr, remote_addr)), )
|
| 1310 |
+
else:
|
| 1311 |
+
# join address by (family, protocol)
|
| 1312 |
+
addr_infos = {} # Using order preserving dict
|
| 1313 |
+
for idx, addr in ((0, local_addr), (1, remote_addr)):
|
| 1314 |
+
if addr is not None:
|
| 1315 |
+
assert isinstance(addr, tuple) and len(addr) == 2, (
|
| 1316 |
+
'2-tuple is expected')
|
| 1317 |
+
|
| 1318 |
+
infos = await self._ensure_resolved(
|
| 1319 |
+
addr, family=family, type=socket.SOCK_DGRAM,
|
| 1320 |
+
proto=proto, flags=flags, loop=self)
|
| 1321 |
+
if not infos:
|
| 1322 |
+
raise OSError('getaddrinfo() returned empty list')
|
| 1323 |
+
|
| 1324 |
+
for fam, _, pro, _, address in infos:
|
| 1325 |
+
key = (fam, pro)
|
| 1326 |
+
if key not in addr_infos:
|
| 1327 |
+
addr_infos[key] = [None, None]
|
| 1328 |
+
addr_infos[key][idx] = address
|
| 1329 |
+
|
| 1330 |
+
# each addr has to have info for each (family, proto) pair
|
| 1331 |
+
addr_pairs_info = [
|
| 1332 |
+
(key, addr_pair) for key, addr_pair in addr_infos.items()
|
| 1333 |
+
if not ((local_addr and addr_pair[0] is None) or
|
| 1334 |
+
(remote_addr and addr_pair[1] is None))]
|
| 1335 |
+
|
| 1336 |
+
if not addr_pairs_info:
|
| 1337 |
+
raise ValueError('can not get address information')
|
| 1338 |
+
|
| 1339 |
+
exceptions = []
|
| 1340 |
+
|
| 1341 |
+
# bpo-37228
|
| 1342 |
+
if reuse_address is not _unset:
|
| 1343 |
+
if reuse_address:
|
| 1344 |
+
raise ValueError("Passing `reuse_address=True` is no "
|
| 1345 |
+
"longer supported, as the usage of "
|
| 1346 |
+
"SO_REUSEPORT in UDP poses a significant "
|
| 1347 |
+
"security concern.")
|
| 1348 |
+
else:
|
| 1349 |
+
warnings.warn("The *reuse_address* parameter has been "
|
| 1350 |
+
"deprecated as of 3.5.10 and is scheduled "
|
| 1351 |
+
"for removal in 3.11.", DeprecationWarning,
|
| 1352 |
+
stacklevel=2)
|
| 1353 |
+
|
| 1354 |
+
for ((family, proto),
|
| 1355 |
+
(local_address, remote_address)) in addr_pairs_info:
|
| 1356 |
+
sock = None
|
| 1357 |
+
r_addr = None
|
| 1358 |
+
try:
|
| 1359 |
+
sock = socket.socket(
|
| 1360 |
+
family=family, type=socket.SOCK_DGRAM, proto=proto)
|
| 1361 |
+
if reuse_port:
|
| 1362 |
+
_set_reuseport(sock)
|
| 1363 |
+
if allow_broadcast:
|
| 1364 |
+
sock.setsockopt(
|
| 1365 |
+
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
|
| 1366 |
+
sock.setblocking(False)
|
| 1367 |
+
|
| 1368 |
+
if local_addr:
|
| 1369 |
+
sock.bind(local_address)
|
| 1370 |
+
if remote_addr:
|
| 1371 |
+
if not allow_broadcast:
|
| 1372 |
+
await self.sock_connect(sock, remote_address)
|
| 1373 |
+
r_addr = remote_address
|
| 1374 |
+
except OSError as exc:
|
| 1375 |
+
if sock is not None:
|
| 1376 |
+
sock.close()
|
| 1377 |
+
exceptions.append(exc)
|
| 1378 |
+
except:
|
| 1379 |
+
if sock is not None:
|
| 1380 |
+
sock.close()
|
| 1381 |
+
raise
|
| 1382 |
+
else:
|
| 1383 |
+
break
|
| 1384 |
+
else:
|
| 1385 |
+
raise exceptions[0]
|
| 1386 |
+
|
| 1387 |
+
protocol = protocol_factory()
|
| 1388 |
+
waiter = self.create_future()
|
| 1389 |
+
transport = self._make_datagram_transport(
|
| 1390 |
+
sock, protocol, r_addr, waiter)
|
| 1391 |
+
if self._debug:
|
| 1392 |
+
if local_addr:
|
| 1393 |
+
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
|
| 1394 |
+
"created: (%r, %r)",
|
| 1395 |
+
local_addr, remote_addr, transport, protocol)
|
| 1396 |
+
else:
|
| 1397 |
+
logger.debug("Datagram endpoint remote_addr=%r created: "
|
| 1398 |
+
"(%r, %r)",
|
| 1399 |
+
remote_addr, transport, protocol)
|
| 1400 |
+
|
| 1401 |
+
try:
|
| 1402 |
+
await waiter
|
| 1403 |
+
except:
|
| 1404 |
+
transport.close()
|
| 1405 |
+
raise
|
| 1406 |
+
|
| 1407 |
+
return transport, protocol
|
| 1408 |
+
|
| 1409 |
+
async def _ensure_resolved(self, address, *,
|
| 1410 |
+
family=0, type=socket.SOCK_STREAM,
|
| 1411 |
+
proto=0, flags=0, loop):
|
| 1412 |
+
host, port = address[:2]
|
| 1413 |
+
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
|
| 1414 |
+
if info is not None:
|
| 1415 |
+
# "host" is already a resolved IP.
|
| 1416 |
+
return [info]
|
| 1417 |
+
else:
|
| 1418 |
+
return await loop.getaddrinfo(host, port, family=family, type=type,
|
| 1419 |
+
proto=proto, flags=flags)
|
| 1420 |
+
|
| 1421 |
+
async def _create_server_getaddrinfo(self, host, port, family, flags):
|
| 1422 |
+
infos = await self._ensure_resolved((host, port), family=family,
|
| 1423 |
+
type=socket.SOCK_STREAM,
|
| 1424 |
+
flags=flags, loop=self)
|
| 1425 |
+
if not infos:
|
| 1426 |
+
raise OSError(f'getaddrinfo({host!r}) returned empty list')
|
| 1427 |
+
return infos
|
| 1428 |
+
|
| 1429 |
+
async def create_server(
|
| 1430 |
+
self, protocol_factory, host=None, port=None,
|
| 1431 |
+
*,
|
| 1432 |
+
family=socket.AF_UNSPEC,
|
| 1433 |
+
flags=socket.AI_PASSIVE,
|
| 1434 |
+
sock=None,
|
| 1435 |
+
backlog=100,
|
| 1436 |
+
ssl=None,
|
| 1437 |
+
reuse_address=None,
|
| 1438 |
+
reuse_port=None,
|
| 1439 |
+
ssl_handshake_timeout=None,
|
| 1440 |
+
start_serving=True):
|
| 1441 |
+
"""Create a TCP server.
|
| 1442 |
+
|
| 1443 |
+
The host parameter can be a string, in that case the TCP server is
|
| 1444 |
+
bound to host and port.
|
| 1445 |
+
|
| 1446 |
+
The host parameter can also be a sequence of strings and in that case
|
| 1447 |
+
the TCP server is bound to all hosts of the sequence. If a host
|
| 1448 |
+
appears multiple times (possibly indirectly e.g. when hostnames
|
| 1449 |
+
resolve to the same IP address), the server is only bound once to that
|
| 1450 |
+
host.
|
| 1451 |
+
|
| 1452 |
+
Return a Server object which can be used to stop the service.
|
| 1453 |
+
|
| 1454 |
+
This method is a coroutine.
|
| 1455 |
+
"""
|
| 1456 |
+
if isinstance(ssl, bool):
|
| 1457 |
+
raise TypeError('ssl argument must be an SSLContext or None')
|
| 1458 |
+
|
| 1459 |
+
if ssl_handshake_timeout is not None and ssl is None:
|
| 1460 |
+
raise ValueError(
|
| 1461 |
+
'ssl_handshake_timeout is only meaningful with ssl')
|
| 1462 |
+
|
| 1463 |
+
if sock is not None:
|
| 1464 |
+
_check_ssl_socket(sock)
|
| 1465 |
+
|
| 1466 |
+
if host is not None or port is not None:
|
| 1467 |
+
if sock is not None:
|
| 1468 |
+
raise ValueError(
|
| 1469 |
+
'host/port and sock can not be specified at the same time')
|
| 1470 |
+
|
| 1471 |
+
if reuse_address is None:
|
| 1472 |
+
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
|
| 1473 |
+
sockets = []
|
| 1474 |
+
if host == '':
|
| 1475 |
+
hosts = [None]
|
| 1476 |
+
elif (isinstance(host, str) or
|
| 1477 |
+
not isinstance(host, collections.abc.Iterable)):
|
| 1478 |
+
hosts = [host]
|
| 1479 |
+
else:
|
| 1480 |
+
hosts = host
|
| 1481 |
+
|
| 1482 |
+
fs = [self._create_server_getaddrinfo(host, port, family=family,
|
| 1483 |
+
flags=flags)
|
| 1484 |
+
for host in hosts]
|
| 1485 |
+
infos = await tasks.gather(*fs)
|
| 1486 |
+
infos = set(itertools.chain.from_iterable(infos))
|
| 1487 |
+
|
| 1488 |
+
completed = False
|
| 1489 |
+
try:
|
| 1490 |
+
for res in infos:
|
| 1491 |
+
af, socktype, proto, canonname, sa = res
|
| 1492 |
+
try:
|
| 1493 |
+
sock = socket.socket(af, socktype, proto)
|
| 1494 |
+
except socket.error:
|
| 1495 |
+
# Assume it's a bad family/type/protocol combination.
|
| 1496 |
+
if self._debug:
|
| 1497 |
+
logger.warning('create_server() failed to create '
|
| 1498 |
+
'socket.socket(%r, %r, %r)',
|
| 1499 |
+
af, socktype, proto, exc_info=True)
|
| 1500 |
+
continue
|
| 1501 |
+
sockets.append(sock)
|
| 1502 |
+
if reuse_address:
|
| 1503 |
+
sock.setsockopt(
|
| 1504 |
+
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
|
| 1505 |
+
if reuse_port:
|
| 1506 |
+
_set_reuseport(sock)
|
| 1507 |
+
# Disable IPv4/IPv6 dual stack support (enabled by
|
| 1508 |
+
# default on Linux) which makes a single socket
|
| 1509 |
+
# listen on both address families.
|
| 1510 |
+
if (_HAS_IPv6 and
|
| 1511 |
+
af == socket.AF_INET6 and
|
| 1512 |
+
hasattr(socket, 'IPPROTO_IPV6')):
|
| 1513 |
+
sock.setsockopt(socket.IPPROTO_IPV6,
|
| 1514 |
+
socket.IPV6_V6ONLY,
|
| 1515 |
+
True)
|
| 1516 |
+
try:
|
| 1517 |
+
sock.bind(sa)
|
| 1518 |
+
except OSError as err:
|
| 1519 |
+
raise OSError(err.errno, 'error while attempting '
|
| 1520 |
+
'to bind on address %r: %s'
|
| 1521 |
+
% (sa, err.strerror.lower())) from None
|
| 1522 |
+
completed = True
|
| 1523 |
+
finally:
|
| 1524 |
+
if not completed:
|
| 1525 |
+
for sock in sockets:
|
| 1526 |
+
sock.close()
|
| 1527 |
+
else:
|
| 1528 |
+
if sock is None:
|
| 1529 |
+
raise ValueError('Neither host/port nor sock were specified')
|
| 1530 |
+
if sock.type != socket.SOCK_STREAM:
|
| 1531 |
+
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
|
| 1532 |
+
sockets = [sock]
|
| 1533 |
+
|
| 1534 |
+
for sock in sockets:
|
| 1535 |
+
sock.setblocking(False)
|
| 1536 |
+
|
| 1537 |
+
server = Server(self, sockets, protocol_factory,
|
| 1538 |
+
ssl, backlog, ssl_handshake_timeout)
|
| 1539 |
+
if start_serving:
|
| 1540 |
+
server._start_serving()
|
| 1541 |
+
# Skip one loop iteration so that all 'loop.add_reader'
|
| 1542 |
+
# go through.
|
| 1543 |
+
await tasks.sleep(0)
|
| 1544 |
+
|
| 1545 |
+
if self._debug:
|
| 1546 |
+
logger.info("%r is serving", server)
|
| 1547 |
+
return server
|
| 1548 |
+
|
| 1549 |
+
async def connect_accepted_socket(
|
| 1550 |
+
self, protocol_factory, sock,
|
| 1551 |
+
*, ssl=None,
|
| 1552 |
+
ssl_handshake_timeout=None):
|
| 1553 |
+
if sock.type != socket.SOCK_STREAM:
|
| 1554 |
+
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
|
| 1555 |
+
|
| 1556 |
+
if ssl_handshake_timeout is not None and not ssl:
|
| 1557 |
+
raise ValueError(
|
| 1558 |
+
'ssl_handshake_timeout is only meaningful with ssl')
|
| 1559 |
+
|
| 1560 |
+
if sock is not None:
|
| 1561 |
+
_check_ssl_socket(sock)
|
| 1562 |
+
|
| 1563 |
+
transport, protocol = await self._create_connection_transport(
|
| 1564 |
+
sock, protocol_factory, ssl, '', server_side=True,
|
| 1565 |
+
ssl_handshake_timeout=ssl_handshake_timeout)
|
| 1566 |
+
if self._debug:
|
| 1567 |
+
# Get the socket from the transport because SSL transport closes
|
| 1568 |
+
# the old socket and creates a new SSL socket
|
| 1569 |
+
sock = transport.get_extra_info('socket')
|
| 1570 |
+
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
|
| 1571 |
+
return transport, protocol
|
| 1572 |
+
|
| 1573 |
+
async def connect_read_pipe(self, protocol_factory, pipe):
|
| 1574 |
+
protocol = protocol_factory()
|
| 1575 |
+
waiter = self.create_future()
|
| 1576 |
+
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
|
| 1577 |
+
|
| 1578 |
+
try:
|
| 1579 |
+
await waiter
|
| 1580 |
+
except:
|
| 1581 |
+
transport.close()
|
| 1582 |
+
raise
|
| 1583 |
+
|
| 1584 |
+
if self._debug:
|
| 1585 |
+
logger.debug('Read pipe %r connected: (%r, %r)',
|
| 1586 |
+
pipe.fileno(), transport, protocol)
|
| 1587 |
+
return transport, protocol
|
| 1588 |
+
|
| 1589 |
+
async def connect_write_pipe(self, protocol_factory, pipe):
|
| 1590 |
+
protocol = protocol_factory()
|
| 1591 |
+
waiter = self.create_future()
|
| 1592 |
+
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
|
| 1593 |
+
|
| 1594 |
+
try:
|
| 1595 |
+
await waiter
|
| 1596 |
+
except:
|
| 1597 |
+
transport.close()
|
| 1598 |
+
raise
|
| 1599 |
+
|
| 1600 |
+
if self._debug:
|
| 1601 |
+
logger.debug('Write pipe %r connected: (%r, %r)',
|
| 1602 |
+
pipe.fileno(), transport, protocol)
|
| 1603 |
+
return transport, protocol
|
| 1604 |
+
|
| 1605 |
+
def _log_subprocess(self, msg, stdin, stdout, stderr):
|
| 1606 |
+
info = [msg]
|
| 1607 |
+
if stdin is not None:
|
| 1608 |
+
info.append(f'stdin={_format_pipe(stdin)}')
|
| 1609 |
+
if stdout is not None and stderr == subprocess.STDOUT:
|
| 1610 |
+
info.append(f'stdout=stderr={_format_pipe(stdout)}')
|
| 1611 |
+
else:
|
| 1612 |
+
if stdout is not None:
|
| 1613 |
+
info.append(f'stdout={_format_pipe(stdout)}')
|
| 1614 |
+
if stderr is not None:
|
| 1615 |
+
info.append(f'stderr={_format_pipe(stderr)}')
|
| 1616 |
+
logger.debug(' '.join(info))
|
| 1617 |
+
|
| 1618 |
+
async def subprocess_shell(self, protocol_factory, cmd, *,
|
| 1619 |
+
stdin=subprocess.PIPE,
|
| 1620 |
+
stdout=subprocess.PIPE,
|
| 1621 |
+
stderr=subprocess.PIPE,
|
| 1622 |
+
universal_newlines=False,
|
| 1623 |
+
shell=True, bufsize=0,
|
| 1624 |
+
encoding=None, errors=None, text=None,
|
| 1625 |
+
**kwargs):
|
| 1626 |
+
if not isinstance(cmd, (bytes, str)):
|
| 1627 |
+
raise ValueError("cmd must be a string")
|
| 1628 |
+
if universal_newlines:
|
| 1629 |
+
raise ValueError("universal_newlines must be False")
|
| 1630 |
+
if not shell:
|
| 1631 |
+
raise ValueError("shell must be True")
|
| 1632 |
+
if bufsize != 0:
|
| 1633 |
+
raise ValueError("bufsize must be 0")
|
| 1634 |
+
if text:
|
| 1635 |
+
raise ValueError("text must be False")
|
| 1636 |
+
if encoding is not None:
|
| 1637 |
+
raise ValueError("encoding must be None")
|
| 1638 |
+
if errors is not None:
|
| 1639 |
+
raise ValueError("errors must be None")
|
| 1640 |
+
|
| 1641 |
+
protocol = protocol_factory()
|
| 1642 |
+
debug_log = None
|
| 1643 |
+
if self._debug:
|
| 1644 |
+
# don't log parameters: they may contain sensitive information
|
| 1645 |
+
# (password) and may be too long
|
| 1646 |
+
debug_log = 'run shell command %r' % cmd
|
| 1647 |
+
self._log_subprocess(debug_log, stdin, stdout, stderr)
|
| 1648 |
+
transport = await self._make_subprocess_transport(
|
| 1649 |
+
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
|
| 1650 |
+
if self._debug and debug_log is not None:
|
| 1651 |
+
logger.info('%s: %r', debug_log, transport)
|
| 1652 |
+
return transport, protocol
|
| 1653 |
+
|
| 1654 |
+
async def subprocess_exec(self, protocol_factory, program, *args,
|
| 1655 |
+
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
|
| 1656 |
+
stderr=subprocess.PIPE, universal_newlines=False,
|
| 1657 |
+
shell=False, bufsize=0,
|
| 1658 |
+
encoding=None, errors=None, text=None,
|
| 1659 |
+
**kwargs):
|
| 1660 |
+
if universal_newlines:
|
| 1661 |
+
raise ValueError("universal_newlines must be False")
|
| 1662 |
+
if shell:
|
| 1663 |
+
raise ValueError("shell must be False")
|
| 1664 |
+
if bufsize != 0:
|
| 1665 |
+
raise ValueError("bufsize must be 0")
|
| 1666 |
+
if text:
|
| 1667 |
+
raise ValueError("text must be False")
|
| 1668 |
+
if encoding is not None:
|
| 1669 |
+
raise ValueError("encoding must be None")
|
| 1670 |
+
if errors is not None:
|
| 1671 |
+
raise ValueError("errors must be None")
|
| 1672 |
+
|
| 1673 |
+
popen_args = (program,) + args
|
| 1674 |
+
protocol = protocol_factory()
|
| 1675 |
+
debug_log = None
|
| 1676 |
+
if self._debug:
|
| 1677 |
+
# don't log parameters: they may contain sensitive information
|
| 1678 |
+
# (password) and may be too long
|
| 1679 |
+
debug_log = f'execute program {program!r}'
|
| 1680 |
+
self._log_subprocess(debug_log, stdin, stdout, stderr)
|
| 1681 |
+
transport = await self._make_subprocess_transport(
|
| 1682 |
+
protocol, popen_args, False, stdin, stdout, stderr,
|
| 1683 |
+
bufsize, **kwargs)
|
| 1684 |
+
if self._debug and debug_log is not None:
|
| 1685 |
+
logger.info('%s: %r', debug_log, transport)
|
| 1686 |
+
return transport, protocol
|
| 1687 |
+
|
| 1688 |
+
def get_exception_handler(self):
|
| 1689 |
+
"""Return an exception handler, or None if the default one is in use.
|
| 1690 |
+
"""
|
| 1691 |
+
return self._exception_handler
|
| 1692 |
+
|
| 1693 |
+
def set_exception_handler(self, handler):
|
| 1694 |
+
"""Set handler as the new event loop exception handler.
|
| 1695 |
+
|
| 1696 |
+
If handler is None, the default exception handler will
|
| 1697 |
+
be set.
|
| 1698 |
+
|
| 1699 |
+
If handler is a callable object, it should have a
|
| 1700 |
+
signature matching '(loop, context)', where 'loop'
|
| 1701 |
+
will be a reference to the active event loop, 'context'
|
| 1702 |
+
will be a dict object (see `call_exception_handler()`
|
| 1703 |
+
documentation for details about context).
|
| 1704 |
+
"""
|
| 1705 |
+
if handler is not None and not callable(handler):
|
| 1706 |
+
raise TypeError(f'A callable object or None is expected, '
|
| 1707 |
+
f'got {handler!r}')
|
| 1708 |
+
self._exception_handler = handler
|
| 1709 |
+
|
| 1710 |
+
def default_exception_handler(self, context):
|
| 1711 |
+
"""Default exception handler.
|
| 1712 |
+
|
| 1713 |
+
This is called when an exception occurs and no exception
|
| 1714 |
+
handler is set, and can be called by a custom exception
|
| 1715 |
+
handler that wants to defer to the default behavior.
|
| 1716 |
+
|
| 1717 |
+
This default handler logs the error message and other
|
| 1718 |
+
context-dependent information. In debug mode, a truncated
|
| 1719 |
+
stack trace is also appended showing where the given object
|
| 1720 |
+
(e.g. a handle or future or task) was created, if any.
|
| 1721 |
+
|
| 1722 |
+
The context parameter has the same meaning as in
|
| 1723 |
+
`call_exception_handler()`.
|
| 1724 |
+
"""
|
| 1725 |
+
message = context.get('message')
|
| 1726 |
+
if not message:
|
| 1727 |
+
message = 'Unhandled exception in event loop'
|
| 1728 |
+
|
| 1729 |
+
exception = context.get('exception')
|
| 1730 |
+
if exception is not None:
|
| 1731 |
+
exc_info = (type(exception), exception, exception.__traceback__)
|
| 1732 |
+
else:
|
| 1733 |
+
exc_info = False
|
| 1734 |
+
|
| 1735 |
+
if ('source_traceback' not in context and
|
| 1736 |
+
self._current_handle is not None and
|
| 1737 |
+
self._current_handle._source_traceback):
|
| 1738 |
+
context['handle_traceback'] = \
|
| 1739 |
+
self._current_handle._source_traceback
|
| 1740 |
+
|
| 1741 |
+
log_lines = [message]
|
| 1742 |
+
for key in sorted(context):
|
| 1743 |
+
if key in {'message', 'exception'}:
|
| 1744 |
+
continue
|
| 1745 |
+
value = context[key]
|
| 1746 |
+
if key == 'source_traceback':
|
| 1747 |
+
tb = ''.join(traceback.format_list(value))
|
| 1748 |
+
value = 'Object created at (most recent call last):\n'
|
| 1749 |
+
value += tb.rstrip()
|
| 1750 |
+
elif key == 'handle_traceback':
|
| 1751 |
+
tb = ''.join(traceback.format_list(value))
|
| 1752 |
+
value = 'Handle created at (most recent call last):\n'
|
| 1753 |
+
value += tb.rstrip()
|
| 1754 |
+
else:
|
| 1755 |
+
value = repr(value)
|
| 1756 |
+
log_lines.append(f'{key}: {value}')
|
| 1757 |
+
|
| 1758 |
+
logger.error('\n'.join(log_lines), exc_info=exc_info)
|
| 1759 |
+
|
| 1760 |
+
def call_exception_handler(self, context):
|
| 1761 |
+
"""Call the current event loop's exception handler.
|
| 1762 |
+
|
| 1763 |
+
The context argument is a dict containing the following keys:
|
| 1764 |
+
|
| 1765 |
+
- 'message': Error message;
|
| 1766 |
+
- 'exception' (optional): Exception object;
|
| 1767 |
+
- 'future' (optional): Future instance;
|
| 1768 |
+
- 'task' (optional): Task instance;
|
| 1769 |
+
- 'handle' (optional): Handle instance;
|
| 1770 |
+
- 'protocol' (optional): Protocol instance;
|
| 1771 |
+
- 'transport' (optional): Transport instance;
|
| 1772 |
+
- 'socket' (optional): Socket instance;
|
| 1773 |
+
- 'asyncgen' (optional): Asynchronous generator that caused
|
| 1774 |
+
the exception.
|
| 1775 |
+
|
| 1776 |
+
New keys maybe introduced in the future.
|
| 1777 |
+
|
| 1778 |
+
Note: do not overload this method in an event loop subclass.
|
| 1779 |
+
For custom exception handling, use the
|
| 1780 |
+
`set_exception_handler()` method.
|
| 1781 |
+
"""
|
| 1782 |
+
if self._exception_handler is None:
|
| 1783 |
+
try:
|
| 1784 |
+
self.default_exception_handler(context)
|
| 1785 |
+
except (SystemExit, KeyboardInterrupt):
|
| 1786 |
+
raise
|
| 1787 |
+
except BaseException:
|
| 1788 |
+
# Second protection layer for unexpected errors
|
| 1789 |
+
# in the default implementation, as well as for subclassed
|
| 1790 |
+
# event loops with overloaded "default_exception_handler".
|
| 1791 |
+
logger.error('Exception in default exception handler',
|
| 1792 |
+
exc_info=True)
|
| 1793 |
+
else:
|
| 1794 |
+
try:
|
| 1795 |
+
self._exception_handler(self, context)
|
| 1796 |
+
except (SystemExit, KeyboardInterrupt):
|
| 1797 |
+
raise
|
| 1798 |
+
except BaseException as exc:
|
| 1799 |
+
# Exception in the user set custom exception handler.
|
| 1800 |
+
try:
|
| 1801 |
+
# Let's try default handler.
|
| 1802 |
+
self.default_exception_handler({
|
| 1803 |
+
'message': 'Unhandled error in exception handler',
|
| 1804 |
+
'exception': exc,
|
| 1805 |
+
'context': context,
|
| 1806 |
+
})
|
| 1807 |
+
except (SystemExit, KeyboardInterrupt):
|
| 1808 |
+
raise
|
| 1809 |
+
except BaseException:
|
| 1810 |
+
# Guard 'default_exception_handler' in case it is
|
| 1811 |
+
# overloaded.
|
| 1812 |
+
logger.error('Exception in default exception handler '
|
| 1813 |
+
'while handling an unexpected error '
|
| 1814 |
+
'in custom exception handler',
|
| 1815 |
+
exc_info=True)
|
| 1816 |
+
|
| 1817 |
+
def _add_callback(self, handle):
|
| 1818 |
+
"""Add a Handle to _ready."""
|
| 1819 |
+
if not handle._cancelled:
|
| 1820 |
+
self._ready.append(handle)
|
| 1821 |
+
|
| 1822 |
+
def _add_callback_signalsafe(self, handle):
|
| 1823 |
+
"""Like _add_callback() but called from a signal handler."""
|
| 1824 |
+
self._add_callback(handle)
|
| 1825 |
+
self._write_to_self()
|
| 1826 |
+
|
| 1827 |
+
def _timer_handle_cancelled(self, handle):
|
| 1828 |
+
"""Notification that a TimerHandle has been cancelled."""
|
| 1829 |
+
if handle._scheduled:
|
| 1830 |
+
self._timer_cancelled_count += 1
|
| 1831 |
+
|
| 1832 |
+
def _run_once(self):
|
| 1833 |
+
"""Run one full iteration of the event loop.
|
| 1834 |
+
|
| 1835 |
+
This calls all currently ready callbacks, polls for I/O,
|
| 1836 |
+
schedules the resulting callbacks, and finally schedules
|
| 1837 |
+
'call_later' callbacks.
|
| 1838 |
+
"""
|
| 1839 |
+
|
| 1840 |
+
sched_count = len(self._scheduled)
|
| 1841 |
+
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
|
| 1842 |
+
self._timer_cancelled_count / sched_count >
|
| 1843 |
+
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
|
| 1844 |
+
# Remove delayed calls that were cancelled if their number
|
| 1845 |
+
# is too high
|
| 1846 |
+
new_scheduled = []
|
| 1847 |
+
for handle in self._scheduled:
|
| 1848 |
+
if handle._cancelled:
|
| 1849 |
+
handle._scheduled = False
|
| 1850 |
+
else:
|
| 1851 |
+
new_scheduled.append(handle)
|
| 1852 |
+
|
| 1853 |
+
heapq.heapify(new_scheduled)
|
| 1854 |
+
self._scheduled = new_scheduled
|
| 1855 |
+
self._timer_cancelled_count = 0
|
| 1856 |
+
else:
|
| 1857 |
+
# Remove delayed calls that were cancelled from head of queue.
|
| 1858 |
+
while self._scheduled and self._scheduled[0]._cancelled:
|
| 1859 |
+
self._timer_cancelled_count -= 1
|
| 1860 |
+
handle = heapq.heappop(self._scheduled)
|
| 1861 |
+
handle._scheduled = False
|
| 1862 |
+
|
| 1863 |
+
timeout = None
|
| 1864 |
+
if self._ready or self._stopping:
|
| 1865 |
+
timeout = 0
|
| 1866 |
+
elif self._scheduled:
|
| 1867 |
+
# Compute the desired timeout.
|
| 1868 |
+
when = self._scheduled[0]._when
|
| 1869 |
+
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
|
| 1870 |
+
|
| 1871 |
+
event_list = self._selector.select(timeout)
|
| 1872 |
+
self._process_events(event_list)
|
| 1873 |
+
# Needed to break cycles when an exception occurs.
|
| 1874 |
+
event_list = None
|
| 1875 |
+
|
| 1876 |
+
# Handle 'later' callbacks that are ready.
|
| 1877 |
+
end_time = self.time() + self._clock_resolution
|
| 1878 |
+
while self._scheduled:
|
| 1879 |
+
handle = self._scheduled[0]
|
| 1880 |
+
if handle._when >= end_time:
|
| 1881 |
+
break
|
| 1882 |
+
handle = heapq.heappop(self._scheduled)
|
| 1883 |
+
handle._scheduled = False
|
| 1884 |
+
self._ready.append(handle)
|
| 1885 |
+
|
| 1886 |
+
# This is the only place where callbacks are actually *called*.
|
| 1887 |
+
# All other places just add them to ready.
|
| 1888 |
+
# Note: We run all currently scheduled callbacks, but not any
|
| 1889 |
+
# callbacks scheduled by callbacks run this time around --
|
| 1890 |
+
# they will be run the next time (after another I/O poll).
|
| 1891 |
+
# Use an idiom that is thread-safe without using locks.
|
| 1892 |
+
ntodo = len(self._ready)
|
| 1893 |
+
for i in range(ntodo):
|
| 1894 |
+
handle = self._ready.popleft()
|
| 1895 |
+
if handle._cancelled:
|
| 1896 |
+
continue
|
| 1897 |
+
if self._debug:
|
| 1898 |
+
try:
|
| 1899 |
+
self._current_handle = handle
|
| 1900 |
+
t0 = self.time()
|
| 1901 |
+
handle._run()
|
| 1902 |
+
dt = self.time() - t0
|
| 1903 |
+
if dt >= self.slow_callback_duration:
|
| 1904 |
+
logger.warning('Executing %s took %.3f seconds',
|
| 1905 |
+
_format_handle(handle), dt)
|
| 1906 |
+
finally:
|
| 1907 |
+
self._current_handle = None
|
| 1908 |
+
else:
|
| 1909 |
+
handle._run()
|
| 1910 |
+
handle = None # Needed to break cycles when an exception occurs.
|
| 1911 |
+
|
| 1912 |
+
def _set_coroutine_origin_tracking(self, enabled):
|
| 1913 |
+
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
|
| 1914 |
+
return
|
| 1915 |
+
|
| 1916 |
+
if enabled:
|
| 1917 |
+
self._coroutine_origin_tracking_saved_depth = (
|
| 1918 |
+
sys.get_coroutine_origin_tracking_depth())
|
| 1919 |
+
sys.set_coroutine_origin_tracking_depth(
|
| 1920 |
+
constants.DEBUG_STACK_DEPTH)
|
| 1921 |
+
else:
|
| 1922 |
+
sys.set_coroutine_origin_tracking_depth(
|
| 1923 |
+
self._coroutine_origin_tracking_saved_depth)
|
| 1924 |
+
|
| 1925 |
+
self._coroutine_origin_tracking_enabled = enabled
|
| 1926 |
+
|
| 1927 |
+
def get_debug(self):
|
| 1928 |
+
return self._debug
|
| 1929 |
+
|
| 1930 |
+
def set_debug(self, enabled):
|
| 1931 |
+
self._debug = enabled
|
| 1932 |
+
|
| 1933 |
+
if self.is_running():
|
| 1934 |
+
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
|
parrot/lib/python3.10/asyncio/base_subprocess.py
ADDED
|
@@ -0,0 +1,285 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import subprocess
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
from . import protocols
|
| 6 |
+
from . import transports
|
| 7 |
+
from .log import logger
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class BaseSubprocessTransport(transports.SubprocessTransport):
|
| 11 |
+
|
| 12 |
+
def __init__(self, loop, protocol, args, shell,
|
| 13 |
+
stdin, stdout, stderr, bufsize,
|
| 14 |
+
waiter=None, extra=None, **kwargs):
|
| 15 |
+
super().__init__(extra)
|
| 16 |
+
self._closed = False
|
| 17 |
+
self._protocol = protocol
|
| 18 |
+
self._loop = loop
|
| 19 |
+
self._proc = None
|
| 20 |
+
self._pid = None
|
| 21 |
+
self._returncode = None
|
| 22 |
+
self._exit_waiters = []
|
| 23 |
+
self._pending_calls = collections.deque()
|
| 24 |
+
self._pipes = {}
|
| 25 |
+
self._finished = False
|
| 26 |
+
|
| 27 |
+
if stdin == subprocess.PIPE:
|
| 28 |
+
self._pipes[0] = None
|
| 29 |
+
if stdout == subprocess.PIPE:
|
| 30 |
+
self._pipes[1] = None
|
| 31 |
+
if stderr == subprocess.PIPE:
|
| 32 |
+
self._pipes[2] = None
|
| 33 |
+
|
| 34 |
+
# Create the child process: set the _proc attribute
|
| 35 |
+
try:
|
| 36 |
+
self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,
|
| 37 |
+
stderr=stderr, bufsize=bufsize, **kwargs)
|
| 38 |
+
except:
|
| 39 |
+
self.close()
|
| 40 |
+
raise
|
| 41 |
+
|
| 42 |
+
self._pid = self._proc.pid
|
| 43 |
+
self._extra['subprocess'] = self._proc
|
| 44 |
+
|
| 45 |
+
if self._loop.get_debug():
|
| 46 |
+
if isinstance(args, (bytes, str)):
|
| 47 |
+
program = args
|
| 48 |
+
else:
|
| 49 |
+
program = args[0]
|
| 50 |
+
logger.debug('process %r created: pid %s',
|
| 51 |
+
program, self._pid)
|
| 52 |
+
|
| 53 |
+
self._loop.create_task(self._connect_pipes(waiter))
|
| 54 |
+
|
| 55 |
+
def __repr__(self):
|
| 56 |
+
info = [self.__class__.__name__]
|
| 57 |
+
if self._closed:
|
| 58 |
+
info.append('closed')
|
| 59 |
+
if self._pid is not None:
|
| 60 |
+
info.append(f'pid={self._pid}')
|
| 61 |
+
if self._returncode is not None:
|
| 62 |
+
info.append(f'returncode={self._returncode}')
|
| 63 |
+
elif self._pid is not None:
|
| 64 |
+
info.append('running')
|
| 65 |
+
else:
|
| 66 |
+
info.append('not started')
|
| 67 |
+
|
| 68 |
+
stdin = self._pipes.get(0)
|
| 69 |
+
if stdin is not None:
|
| 70 |
+
info.append(f'stdin={stdin.pipe}')
|
| 71 |
+
|
| 72 |
+
stdout = self._pipes.get(1)
|
| 73 |
+
stderr = self._pipes.get(2)
|
| 74 |
+
if stdout is not None and stderr is stdout:
|
| 75 |
+
info.append(f'stdout=stderr={stdout.pipe}')
|
| 76 |
+
else:
|
| 77 |
+
if stdout is not None:
|
| 78 |
+
info.append(f'stdout={stdout.pipe}')
|
| 79 |
+
if stderr is not None:
|
| 80 |
+
info.append(f'stderr={stderr.pipe}')
|
| 81 |
+
|
| 82 |
+
return '<{}>'.format(' '.join(info))
|
| 83 |
+
|
| 84 |
+
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
|
| 85 |
+
raise NotImplementedError
|
| 86 |
+
|
| 87 |
+
def set_protocol(self, protocol):
|
| 88 |
+
self._protocol = protocol
|
| 89 |
+
|
| 90 |
+
def get_protocol(self):
|
| 91 |
+
return self._protocol
|
| 92 |
+
|
| 93 |
+
def is_closing(self):
|
| 94 |
+
return self._closed
|
| 95 |
+
|
| 96 |
+
def close(self):
|
| 97 |
+
if self._closed:
|
| 98 |
+
return
|
| 99 |
+
self._closed = True
|
| 100 |
+
|
| 101 |
+
for proto in self._pipes.values():
|
| 102 |
+
if proto is None:
|
| 103 |
+
continue
|
| 104 |
+
proto.pipe.close()
|
| 105 |
+
|
| 106 |
+
if (self._proc is not None and
|
| 107 |
+
# has the child process finished?
|
| 108 |
+
self._returncode is None and
|
| 109 |
+
# the child process has finished, but the
|
| 110 |
+
# transport hasn't been notified yet?
|
| 111 |
+
self._proc.poll() is None):
|
| 112 |
+
|
| 113 |
+
if self._loop.get_debug():
|
| 114 |
+
logger.warning('Close running child process: kill %r', self)
|
| 115 |
+
|
| 116 |
+
try:
|
| 117 |
+
self._proc.kill()
|
| 118 |
+
except ProcessLookupError:
|
| 119 |
+
pass
|
| 120 |
+
|
| 121 |
+
# Don't clear the _proc reference yet: _post_init() may still run
|
| 122 |
+
|
| 123 |
+
def __del__(self, _warn=warnings.warn):
|
| 124 |
+
if not self._closed:
|
| 125 |
+
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
|
| 126 |
+
self.close()
|
| 127 |
+
|
| 128 |
+
def get_pid(self):
|
| 129 |
+
return self._pid
|
| 130 |
+
|
| 131 |
+
def get_returncode(self):
|
| 132 |
+
return self._returncode
|
| 133 |
+
|
| 134 |
+
def get_pipe_transport(self, fd):
|
| 135 |
+
if fd in self._pipes:
|
| 136 |
+
return self._pipes[fd].pipe
|
| 137 |
+
else:
|
| 138 |
+
return None
|
| 139 |
+
|
| 140 |
+
def _check_proc(self):
|
| 141 |
+
if self._proc is None:
|
| 142 |
+
raise ProcessLookupError()
|
| 143 |
+
|
| 144 |
+
def send_signal(self, signal):
|
| 145 |
+
self._check_proc()
|
| 146 |
+
self._proc.send_signal(signal)
|
| 147 |
+
|
| 148 |
+
def terminate(self):
|
| 149 |
+
self._check_proc()
|
| 150 |
+
self._proc.terminate()
|
| 151 |
+
|
| 152 |
+
def kill(self):
|
| 153 |
+
self._check_proc()
|
| 154 |
+
self._proc.kill()
|
| 155 |
+
|
| 156 |
+
async def _connect_pipes(self, waiter):
|
| 157 |
+
try:
|
| 158 |
+
proc = self._proc
|
| 159 |
+
loop = self._loop
|
| 160 |
+
|
| 161 |
+
if proc.stdin is not None:
|
| 162 |
+
_, pipe = await loop.connect_write_pipe(
|
| 163 |
+
lambda: WriteSubprocessPipeProto(self, 0),
|
| 164 |
+
proc.stdin)
|
| 165 |
+
self._pipes[0] = pipe
|
| 166 |
+
|
| 167 |
+
if proc.stdout is not None:
|
| 168 |
+
_, pipe = await loop.connect_read_pipe(
|
| 169 |
+
lambda: ReadSubprocessPipeProto(self, 1),
|
| 170 |
+
proc.stdout)
|
| 171 |
+
self._pipes[1] = pipe
|
| 172 |
+
|
| 173 |
+
if proc.stderr is not None:
|
| 174 |
+
_, pipe = await loop.connect_read_pipe(
|
| 175 |
+
lambda: ReadSubprocessPipeProto(self, 2),
|
| 176 |
+
proc.stderr)
|
| 177 |
+
self._pipes[2] = pipe
|
| 178 |
+
|
| 179 |
+
assert self._pending_calls is not None
|
| 180 |
+
|
| 181 |
+
loop.call_soon(self._protocol.connection_made, self)
|
| 182 |
+
for callback, data in self._pending_calls:
|
| 183 |
+
loop.call_soon(callback, *data)
|
| 184 |
+
self._pending_calls = None
|
| 185 |
+
except (SystemExit, KeyboardInterrupt):
|
| 186 |
+
raise
|
| 187 |
+
except BaseException as exc:
|
| 188 |
+
if waiter is not None and not waiter.cancelled():
|
| 189 |
+
waiter.set_exception(exc)
|
| 190 |
+
else:
|
| 191 |
+
if waiter is not None and not waiter.cancelled():
|
| 192 |
+
waiter.set_result(None)
|
| 193 |
+
|
| 194 |
+
def _call(self, cb, *data):
|
| 195 |
+
if self._pending_calls is not None:
|
| 196 |
+
self._pending_calls.append((cb, data))
|
| 197 |
+
else:
|
| 198 |
+
self._loop.call_soon(cb, *data)
|
| 199 |
+
|
| 200 |
+
def _pipe_connection_lost(self, fd, exc):
|
| 201 |
+
self._call(self._protocol.pipe_connection_lost, fd, exc)
|
| 202 |
+
self._try_finish()
|
| 203 |
+
|
| 204 |
+
def _pipe_data_received(self, fd, data):
|
| 205 |
+
self._call(self._protocol.pipe_data_received, fd, data)
|
| 206 |
+
|
| 207 |
+
def _process_exited(self, returncode):
|
| 208 |
+
assert returncode is not None, returncode
|
| 209 |
+
assert self._returncode is None, self._returncode
|
| 210 |
+
if self._loop.get_debug():
|
| 211 |
+
logger.info('%r exited with return code %r', self, returncode)
|
| 212 |
+
self._returncode = returncode
|
| 213 |
+
if self._proc.returncode is None:
|
| 214 |
+
# asyncio uses a child watcher: copy the status into the Popen
|
| 215 |
+
# object. On Python 3.6, it is required to avoid a ResourceWarning.
|
| 216 |
+
self._proc.returncode = returncode
|
| 217 |
+
self._call(self._protocol.process_exited)
|
| 218 |
+
self._try_finish()
|
| 219 |
+
|
| 220 |
+
# wake up futures waiting for wait()
|
| 221 |
+
for waiter in self._exit_waiters:
|
| 222 |
+
if not waiter.cancelled():
|
| 223 |
+
waiter.set_result(returncode)
|
| 224 |
+
self._exit_waiters = None
|
| 225 |
+
|
| 226 |
+
async def _wait(self):
|
| 227 |
+
"""Wait until the process exit and return the process return code.
|
| 228 |
+
|
| 229 |
+
This method is a coroutine."""
|
| 230 |
+
if self._returncode is not None:
|
| 231 |
+
return self._returncode
|
| 232 |
+
|
| 233 |
+
waiter = self._loop.create_future()
|
| 234 |
+
self._exit_waiters.append(waiter)
|
| 235 |
+
return await waiter
|
| 236 |
+
|
| 237 |
+
def _try_finish(self):
|
| 238 |
+
assert not self._finished
|
| 239 |
+
if self._returncode is None:
|
| 240 |
+
return
|
| 241 |
+
if all(p is not None and p.disconnected
|
| 242 |
+
for p in self._pipes.values()):
|
| 243 |
+
self._finished = True
|
| 244 |
+
self._call(self._call_connection_lost, None)
|
| 245 |
+
|
| 246 |
+
def _call_connection_lost(self, exc):
|
| 247 |
+
try:
|
| 248 |
+
self._protocol.connection_lost(exc)
|
| 249 |
+
finally:
|
| 250 |
+
self._loop = None
|
| 251 |
+
self._proc = None
|
| 252 |
+
self._protocol = None
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
class WriteSubprocessPipeProto(protocols.BaseProtocol):
|
| 256 |
+
|
| 257 |
+
def __init__(self, proc, fd):
|
| 258 |
+
self.proc = proc
|
| 259 |
+
self.fd = fd
|
| 260 |
+
self.pipe = None
|
| 261 |
+
self.disconnected = False
|
| 262 |
+
|
| 263 |
+
def connection_made(self, transport):
|
| 264 |
+
self.pipe = transport
|
| 265 |
+
|
| 266 |
+
def __repr__(self):
|
| 267 |
+
return f'<{self.__class__.__name__} fd={self.fd} pipe={self.pipe!r}>'
|
| 268 |
+
|
| 269 |
+
def connection_lost(self, exc):
|
| 270 |
+
self.disconnected = True
|
| 271 |
+
self.proc._pipe_connection_lost(self.fd, exc)
|
| 272 |
+
self.proc = None
|
| 273 |
+
|
| 274 |
+
def pause_writing(self):
|
| 275 |
+
self.proc._protocol.pause_writing()
|
| 276 |
+
|
| 277 |
+
def resume_writing(self):
|
| 278 |
+
self.proc._protocol.resume_writing()
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
class ReadSubprocessPipeProto(WriteSubprocessPipeProto,
|
| 282 |
+
protocols.Protocol):
|
| 283 |
+
|
| 284 |
+
def data_received(self, data):
|
| 285 |
+
self.proc._pipe_data_received(self.fd, data)
|
parrot/lib/python3.10/asyncio/constants.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import enum
|
| 2 |
+
|
| 3 |
+
# After the connection is lost, log warnings after this many write()s.
|
| 4 |
+
LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5
|
| 5 |
+
|
| 6 |
+
# Seconds to wait before retrying accept().
|
| 7 |
+
ACCEPT_RETRY_DELAY = 1
|
| 8 |
+
|
| 9 |
+
# Number of stack entries to capture in debug mode.
|
| 10 |
+
# The larger the number, the slower the operation in debug mode
|
| 11 |
+
# (see extract_stack() in format_helpers.py).
|
| 12 |
+
DEBUG_STACK_DEPTH = 10
|
| 13 |
+
|
| 14 |
+
# Number of seconds to wait for SSL handshake to complete
|
| 15 |
+
# The default timeout matches that of Nginx.
|
| 16 |
+
SSL_HANDSHAKE_TIMEOUT = 60.0
|
| 17 |
+
|
| 18 |
+
# Used in sendfile fallback code. We use fallback for platforms
|
| 19 |
+
# that don't support sendfile, or for TLS connections.
|
| 20 |
+
SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 256
|
| 21 |
+
|
| 22 |
+
# The enum should be here to break circular dependencies between
|
| 23 |
+
# base_events and sslproto
|
| 24 |
+
class _SendfileMode(enum.Enum):
|
| 25 |
+
UNSUPPORTED = enum.auto()
|
| 26 |
+
TRY_NATIVE = enum.auto()
|
| 27 |
+
FALLBACK = enum.auto()
|
parrot/lib/python3.10/asyncio/events.py
ADDED
|
@@ -0,0 +1,819 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Event loop and event loop policy."""
|
| 2 |
+
|
| 3 |
+
__all__ = (
|
| 4 |
+
'AbstractEventLoopPolicy',
|
| 5 |
+
'AbstractEventLoop', 'AbstractServer',
|
| 6 |
+
'Handle', 'TimerHandle',
|
| 7 |
+
'get_event_loop_policy', 'set_event_loop_policy',
|
| 8 |
+
'get_event_loop', 'set_event_loop', 'new_event_loop',
|
| 9 |
+
'get_child_watcher', 'set_child_watcher',
|
| 10 |
+
'_set_running_loop', 'get_running_loop',
|
| 11 |
+
'_get_running_loop',
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
import contextvars
|
| 15 |
+
import os
|
| 16 |
+
import socket
|
| 17 |
+
import subprocess
|
| 18 |
+
import sys
|
| 19 |
+
import threading
|
| 20 |
+
|
| 21 |
+
from . import format_helpers
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class Handle:
|
| 25 |
+
"""Object returned by callback registration methods."""
|
| 26 |
+
|
| 27 |
+
__slots__ = ('_callback', '_args', '_cancelled', '_loop',
|
| 28 |
+
'_source_traceback', '_repr', '__weakref__',
|
| 29 |
+
'_context')
|
| 30 |
+
|
| 31 |
+
def __init__(self, callback, args, loop, context=None):
|
| 32 |
+
if context is None:
|
| 33 |
+
context = contextvars.copy_context()
|
| 34 |
+
self._context = context
|
| 35 |
+
self._loop = loop
|
| 36 |
+
self._callback = callback
|
| 37 |
+
self._args = args
|
| 38 |
+
self._cancelled = False
|
| 39 |
+
self._repr = None
|
| 40 |
+
if self._loop.get_debug():
|
| 41 |
+
self._source_traceback = format_helpers.extract_stack(
|
| 42 |
+
sys._getframe(1))
|
| 43 |
+
else:
|
| 44 |
+
self._source_traceback = None
|
| 45 |
+
|
| 46 |
+
def _repr_info(self):
|
| 47 |
+
info = [self.__class__.__name__]
|
| 48 |
+
if self._cancelled:
|
| 49 |
+
info.append('cancelled')
|
| 50 |
+
if self._callback is not None:
|
| 51 |
+
info.append(format_helpers._format_callback_source(
|
| 52 |
+
self._callback, self._args))
|
| 53 |
+
if self._source_traceback:
|
| 54 |
+
frame = self._source_traceback[-1]
|
| 55 |
+
info.append(f'created at {frame[0]}:{frame[1]}')
|
| 56 |
+
return info
|
| 57 |
+
|
| 58 |
+
def __repr__(self):
|
| 59 |
+
if self._repr is not None:
|
| 60 |
+
return self._repr
|
| 61 |
+
info = self._repr_info()
|
| 62 |
+
return '<{}>'.format(' '.join(info))
|
| 63 |
+
|
| 64 |
+
def cancel(self):
|
| 65 |
+
if not self._cancelled:
|
| 66 |
+
self._cancelled = True
|
| 67 |
+
if self._loop.get_debug():
|
| 68 |
+
# Keep a representation in debug mode to keep callback and
|
| 69 |
+
# parameters. For example, to log the warning
|
| 70 |
+
# "Executing <Handle...> took 2.5 second"
|
| 71 |
+
self._repr = repr(self)
|
| 72 |
+
self._callback = None
|
| 73 |
+
self._args = None
|
| 74 |
+
|
| 75 |
+
def cancelled(self):
|
| 76 |
+
return self._cancelled
|
| 77 |
+
|
| 78 |
+
def _run(self):
|
| 79 |
+
try:
|
| 80 |
+
self._context.run(self._callback, *self._args)
|
| 81 |
+
except (SystemExit, KeyboardInterrupt):
|
| 82 |
+
raise
|
| 83 |
+
except BaseException as exc:
|
| 84 |
+
cb = format_helpers._format_callback_source(
|
| 85 |
+
self._callback, self._args)
|
| 86 |
+
msg = f'Exception in callback {cb}'
|
| 87 |
+
context = {
|
| 88 |
+
'message': msg,
|
| 89 |
+
'exception': exc,
|
| 90 |
+
'handle': self,
|
| 91 |
+
}
|
| 92 |
+
if self._source_traceback:
|
| 93 |
+
context['source_traceback'] = self._source_traceback
|
| 94 |
+
self._loop.call_exception_handler(context)
|
| 95 |
+
self = None # Needed to break cycles when an exception occurs.
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class TimerHandle(Handle):
|
| 99 |
+
"""Object returned by timed callback registration methods."""
|
| 100 |
+
|
| 101 |
+
__slots__ = ['_scheduled', '_when']
|
| 102 |
+
|
| 103 |
+
def __init__(self, when, callback, args, loop, context=None):
|
| 104 |
+
assert when is not None
|
| 105 |
+
super().__init__(callback, args, loop, context)
|
| 106 |
+
if self._source_traceback:
|
| 107 |
+
del self._source_traceback[-1]
|
| 108 |
+
self._when = when
|
| 109 |
+
self._scheduled = False
|
| 110 |
+
|
| 111 |
+
def _repr_info(self):
|
| 112 |
+
info = super()._repr_info()
|
| 113 |
+
pos = 2 if self._cancelled else 1
|
| 114 |
+
info.insert(pos, f'when={self._when}')
|
| 115 |
+
return info
|
| 116 |
+
|
| 117 |
+
def __hash__(self):
|
| 118 |
+
return hash(self._when)
|
| 119 |
+
|
| 120 |
+
def __lt__(self, other):
|
| 121 |
+
if isinstance(other, TimerHandle):
|
| 122 |
+
return self._when < other._when
|
| 123 |
+
return NotImplemented
|
| 124 |
+
|
| 125 |
+
def __le__(self, other):
|
| 126 |
+
if isinstance(other, TimerHandle):
|
| 127 |
+
return self._when < other._when or self.__eq__(other)
|
| 128 |
+
return NotImplemented
|
| 129 |
+
|
| 130 |
+
def __gt__(self, other):
|
| 131 |
+
if isinstance(other, TimerHandle):
|
| 132 |
+
return self._when > other._when
|
| 133 |
+
return NotImplemented
|
| 134 |
+
|
| 135 |
+
def __ge__(self, other):
|
| 136 |
+
if isinstance(other, TimerHandle):
|
| 137 |
+
return self._when > other._when or self.__eq__(other)
|
| 138 |
+
return NotImplemented
|
| 139 |
+
|
| 140 |
+
def __eq__(self, other):
|
| 141 |
+
if isinstance(other, TimerHandle):
|
| 142 |
+
return (self._when == other._when and
|
| 143 |
+
self._callback == other._callback and
|
| 144 |
+
self._args == other._args and
|
| 145 |
+
self._cancelled == other._cancelled)
|
| 146 |
+
return NotImplemented
|
| 147 |
+
|
| 148 |
+
def cancel(self):
|
| 149 |
+
if not self._cancelled:
|
| 150 |
+
self._loop._timer_handle_cancelled(self)
|
| 151 |
+
super().cancel()
|
| 152 |
+
|
| 153 |
+
def when(self):
|
| 154 |
+
"""Return a scheduled callback time.
|
| 155 |
+
|
| 156 |
+
The time is an absolute timestamp, using the same time
|
| 157 |
+
reference as loop.time().
|
| 158 |
+
"""
|
| 159 |
+
return self._when
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class AbstractServer:
|
| 163 |
+
"""Abstract server returned by create_server()."""
|
| 164 |
+
|
| 165 |
+
def close(self):
|
| 166 |
+
"""Stop serving. This leaves existing connections open."""
|
| 167 |
+
raise NotImplementedError
|
| 168 |
+
|
| 169 |
+
def get_loop(self):
|
| 170 |
+
"""Get the event loop the Server object is attached to."""
|
| 171 |
+
raise NotImplementedError
|
| 172 |
+
|
| 173 |
+
def is_serving(self):
|
| 174 |
+
"""Return True if the server is accepting connections."""
|
| 175 |
+
raise NotImplementedError
|
| 176 |
+
|
| 177 |
+
async def start_serving(self):
|
| 178 |
+
"""Start accepting connections.
|
| 179 |
+
|
| 180 |
+
This method is idempotent, so it can be called when
|
| 181 |
+
the server is already being serving.
|
| 182 |
+
"""
|
| 183 |
+
raise NotImplementedError
|
| 184 |
+
|
| 185 |
+
async def serve_forever(self):
|
| 186 |
+
"""Start accepting connections until the coroutine is cancelled.
|
| 187 |
+
|
| 188 |
+
The server is closed when the coroutine is cancelled.
|
| 189 |
+
"""
|
| 190 |
+
raise NotImplementedError
|
| 191 |
+
|
| 192 |
+
async def wait_closed(self):
|
| 193 |
+
"""Coroutine to wait until service is closed."""
|
| 194 |
+
raise NotImplementedError
|
| 195 |
+
|
| 196 |
+
async def __aenter__(self):
|
| 197 |
+
return self
|
| 198 |
+
|
| 199 |
+
async def __aexit__(self, *exc):
|
| 200 |
+
self.close()
|
| 201 |
+
await self.wait_closed()
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
class AbstractEventLoop:
|
| 205 |
+
"""Abstract event loop."""
|
| 206 |
+
|
| 207 |
+
# Running and stopping the event loop.
|
| 208 |
+
|
| 209 |
+
def run_forever(self):
|
| 210 |
+
"""Run the event loop until stop() is called."""
|
| 211 |
+
raise NotImplementedError
|
| 212 |
+
|
| 213 |
+
def run_until_complete(self, future):
|
| 214 |
+
"""Run the event loop until a Future is done.
|
| 215 |
+
|
| 216 |
+
Return the Future's result, or raise its exception.
|
| 217 |
+
"""
|
| 218 |
+
raise NotImplementedError
|
| 219 |
+
|
| 220 |
+
def stop(self):
|
| 221 |
+
"""Stop the event loop as soon as reasonable.
|
| 222 |
+
|
| 223 |
+
Exactly how soon that is may depend on the implementation, but
|
| 224 |
+
no more I/O callbacks should be scheduled.
|
| 225 |
+
"""
|
| 226 |
+
raise NotImplementedError
|
| 227 |
+
|
| 228 |
+
def is_running(self):
|
| 229 |
+
"""Return whether the event loop is currently running."""
|
| 230 |
+
raise NotImplementedError
|
| 231 |
+
|
| 232 |
+
def is_closed(self):
|
| 233 |
+
"""Returns True if the event loop was closed."""
|
| 234 |
+
raise NotImplementedError
|
| 235 |
+
|
| 236 |
+
def close(self):
|
| 237 |
+
"""Close the loop.
|
| 238 |
+
|
| 239 |
+
The loop should not be running.
|
| 240 |
+
|
| 241 |
+
This is idempotent and irreversible.
|
| 242 |
+
|
| 243 |
+
No other methods should be called after this one.
|
| 244 |
+
"""
|
| 245 |
+
raise NotImplementedError
|
| 246 |
+
|
| 247 |
+
async def shutdown_asyncgens(self):
|
| 248 |
+
"""Shutdown all active asynchronous generators."""
|
| 249 |
+
raise NotImplementedError
|
| 250 |
+
|
| 251 |
+
async def shutdown_default_executor(self):
|
| 252 |
+
"""Schedule the shutdown of the default executor."""
|
| 253 |
+
raise NotImplementedError
|
| 254 |
+
|
| 255 |
+
# Methods scheduling callbacks. All these return Handles.
|
| 256 |
+
|
| 257 |
+
def _timer_handle_cancelled(self, handle):
|
| 258 |
+
"""Notification that a TimerHandle has been cancelled."""
|
| 259 |
+
raise NotImplementedError
|
| 260 |
+
|
| 261 |
+
def call_soon(self, callback, *args, context=None):
|
| 262 |
+
return self.call_later(0, callback, *args, context=context)
|
| 263 |
+
|
| 264 |
+
def call_later(self, delay, callback, *args, context=None):
|
| 265 |
+
raise NotImplementedError
|
| 266 |
+
|
| 267 |
+
def call_at(self, when, callback, *args, context=None):
|
| 268 |
+
raise NotImplementedError
|
| 269 |
+
|
| 270 |
+
def time(self):
|
| 271 |
+
raise NotImplementedError
|
| 272 |
+
|
| 273 |
+
def create_future(self):
|
| 274 |
+
raise NotImplementedError
|
| 275 |
+
|
| 276 |
+
# Method scheduling a coroutine object: create a task.
|
| 277 |
+
|
| 278 |
+
def create_task(self, coro, *, name=None):
|
| 279 |
+
raise NotImplementedError
|
| 280 |
+
|
| 281 |
+
# Methods for interacting with threads.
|
| 282 |
+
|
| 283 |
+
def call_soon_threadsafe(self, callback, *args, context=None):
|
| 284 |
+
raise NotImplementedError
|
| 285 |
+
|
| 286 |
+
def run_in_executor(self, executor, func, *args):
|
| 287 |
+
raise NotImplementedError
|
| 288 |
+
|
| 289 |
+
def set_default_executor(self, executor):
|
| 290 |
+
raise NotImplementedError
|
| 291 |
+
|
| 292 |
+
# Network I/O methods returning Futures.
|
| 293 |
+
|
| 294 |
+
async def getaddrinfo(self, host, port, *,
|
| 295 |
+
family=0, type=0, proto=0, flags=0):
|
| 296 |
+
raise NotImplementedError
|
| 297 |
+
|
| 298 |
+
async def getnameinfo(self, sockaddr, flags=0):
|
| 299 |
+
raise NotImplementedError
|
| 300 |
+
|
| 301 |
+
async def create_connection(
|
| 302 |
+
self, protocol_factory, host=None, port=None,
|
| 303 |
+
*, ssl=None, family=0, proto=0,
|
| 304 |
+
flags=0, sock=None, local_addr=None,
|
| 305 |
+
server_hostname=None,
|
| 306 |
+
ssl_handshake_timeout=None,
|
| 307 |
+
happy_eyeballs_delay=None, interleave=None):
|
| 308 |
+
raise NotImplementedError
|
| 309 |
+
|
| 310 |
+
async def create_server(
|
| 311 |
+
self, protocol_factory, host=None, port=None,
|
| 312 |
+
*, family=socket.AF_UNSPEC,
|
| 313 |
+
flags=socket.AI_PASSIVE, sock=None, backlog=100,
|
| 314 |
+
ssl=None, reuse_address=None, reuse_port=None,
|
| 315 |
+
ssl_handshake_timeout=None,
|
| 316 |
+
start_serving=True):
|
| 317 |
+
"""A coroutine which creates a TCP server bound to host and port.
|
| 318 |
+
|
| 319 |
+
The return value is a Server object which can be used to stop
|
| 320 |
+
the service.
|
| 321 |
+
|
| 322 |
+
If host is an empty string or None all interfaces are assumed
|
| 323 |
+
and a list of multiple sockets will be returned (most likely
|
| 324 |
+
one for IPv4 and another one for IPv6). The host parameter can also be
|
| 325 |
+
a sequence (e.g. list) of hosts to bind to.
|
| 326 |
+
|
| 327 |
+
family can be set to either AF_INET or AF_INET6 to force the
|
| 328 |
+
socket to use IPv4 or IPv6. If not set it will be determined
|
| 329 |
+
from host (defaults to AF_UNSPEC).
|
| 330 |
+
|
| 331 |
+
flags is a bitmask for getaddrinfo().
|
| 332 |
+
|
| 333 |
+
sock can optionally be specified in order to use a preexisting
|
| 334 |
+
socket object.
|
| 335 |
+
|
| 336 |
+
backlog is the maximum number of queued connections passed to
|
| 337 |
+
listen() (defaults to 100).
|
| 338 |
+
|
| 339 |
+
ssl can be set to an SSLContext to enable SSL over the
|
| 340 |
+
accepted connections.
|
| 341 |
+
|
| 342 |
+
reuse_address tells the kernel to reuse a local socket in
|
| 343 |
+
TIME_WAIT state, without waiting for its natural timeout to
|
| 344 |
+
expire. If not specified will automatically be set to True on
|
| 345 |
+
UNIX.
|
| 346 |
+
|
| 347 |
+
reuse_port tells the kernel to allow this endpoint to be bound to
|
| 348 |
+
the same port as other existing endpoints are bound to, so long as
|
| 349 |
+
they all set this flag when being created. This option is not
|
| 350 |
+
supported on Windows.
|
| 351 |
+
|
| 352 |
+
ssl_handshake_timeout is the time in seconds that an SSL server
|
| 353 |
+
will wait for completion of the SSL handshake before aborting the
|
| 354 |
+
connection. Default is 60s.
|
| 355 |
+
|
| 356 |
+
start_serving set to True (default) causes the created server
|
| 357 |
+
to start accepting connections immediately. When set to False,
|
| 358 |
+
the user should await Server.start_serving() or Server.serve_forever()
|
| 359 |
+
to make the server to start accepting connections.
|
| 360 |
+
"""
|
| 361 |
+
raise NotImplementedError
|
| 362 |
+
|
| 363 |
+
async def sendfile(self, transport, file, offset=0, count=None,
|
| 364 |
+
*, fallback=True):
|
| 365 |
+
"""Send a file through a transport.
|
| 366 |
+
|
| 367 |
+
Return an amount of sent bytes.
|
| 368 |
+
"""
|
| 369 |
+
raise NotImplementedError
|
| 370 |
+
|
| 371 |
+
async def start_tls(self, transport, protocol, sslcontext, *,
|
| 372 |
+
server_side=False,
|
| 373 |
+
server_hostname=None,
|
| 374 |
+
ssl_handshake_timeout=None):
|
| 375 |
+
"""Upgrade a transport to TLS.
|
| 376 |
+
|
| 377 |
+
Return a new transport that *protocol* should start using
|
| 378 |
+
immediately.
|
| 379 |
+
"""
|
| 380 |
+
raise NotImplementedError
|
| 381 |
+
|
| 382 |
+
async def create_unix_connection(
|
| 383 |
+
self, protocol_factory, path=None, *,
|
| 384 |
+
ssl=None, sock=None,
|
| 385 |
+
server_hostname=None,
|
| 386 |
+
ssl_handshake_timeout=None):
|
| 387 |
+
raise NotImplementedError
|
| 388 |
+
|
| 389 |
+
async def create_unix_server(
|
| 390 |
+
self, protocol_factory, path=None, *,
|
| 391 |
+
sock=None, backlog=100, ssl=None,
|
| 392 |
+
ssl_handshake_timeout=None,
|
| 393 |
+
start_serving=True):
|
| 394 |
+
"""A coroutine which creates a UNIX Domain Socket server.
|
| 395 |
+
|
| 396 |
+
The return value is a Server object, which can be used to stop
|
| 397 |
+
the service.
|
| 398 |
+
|
| 399 |
+
path is a str, representing a file system path to bind the
|
| 400 |
+
server socket to.
|
| 401 |
+
|
| 402 |
+
sock can optionally be specified in order to use a preexisting
|
| 403 |
+
socket object.
|
| 404 |
+
|
| 405 |
+
backlog is the maximum number of queued connections passed to
|
| 406 |
+
listen() (defaults to 100).
|
| 407 |
+
|
| 408 |
+
ssl can be set to an SSLContext to enable SSL over the
|
| 409 |
+
accepted connections.
|
| 410 |
+
|
| 411 |
+
ssl_handshake_timeout is the time in seconds that an SSL server
|
| 412 |
+
will wait for the SSL handshake to complete (defaults to 60s).
|
| 413 |
+
|
| 414 |
+
start_serving set to True (default) causes the created server
|
| 415 |
+
to start accepting connections immediately. When set to False,
|
| 416 |
+
the user should await Server.start_serving() or Server.serve_forever()
|
| 417 |
+
to make the server to start accepting connections.
|
| 418 |
+
"""
|
| 419 |
+
raise NotImplementedError
|
| 420 |
+
|
| 421 |
+
async def connect_accepted_socket(
|
| 422 |
+
self, protocol_factory, sock,
|
| 423 |
+
*, ssl=None,
|
| 424 |
+
ssl_handshake_timeout=None):
|
| 425 |
+
"""Handle an accepted connection.
|
| 426 |
+
|
| 427 |
+
This is used by servers that accept connections outside of
|
| 428 |
+
asyncio, but use asyncio to handle connections.
|
| 429 |
+
|
| 430 |
+
This method is a coroutine. When completed, the coroutine
|
| 431 |
+
returns a (transport, protocol) pair.
|
| 432 |
+
"""
|
| 433 |
+
raise NotImplementedError
|
| 434 |
+
|
| 435 |
+
async def create_datagram_endpoint(self, protocol_factory,
|
| 436 |
+
local_addr=None, remote_addr=None, *,
|
| 437 |
+
family=0, proto=0, flags=0,
|
| 438 |
+
reuse_address=None, reuse_port=None,
|
| 439 |
+
allow_broadcast=None, sock=None):
|
| 440 |
+
"""A coroutine which creates a datagram endpoint.
|
| 441 |
+
|
| 442 |
+
This method will try to establish the endpoint in the background.
|
| 443 |
+
When successful, the coroutine returns a (transport, protocol) pair.
|
| 444 |
+
|
| 445 |
+
protocol_factory must be a callable returning a protocol instance.
|
| 446 |
+
|
| 447 |
+
socket family AF_INET, socket.AF_INET6 or socket.AF_UNIX depending on
|
| 448 |
+
host (or family if specified), socket type SOCK_DGRAM.
|
| 449 |
+
|
| 450 |
+
reuse_address tells the kernel to reuse a local socket in
|
| 451 |
+
TIME_WAIT state, without waiting for its natural timeout to
|
| 452 |
+
expire. If not specified it will automatically be set to True on
|
| 453 |
+
UNIX.
|
| 454 |
+
|
| 455 |
+
reuse_port tells the kernel to allow this endpoint to be bound to
|
| 456 |
+
the same port as other existing endpoints are bound to, so long as
|
| 457 |
+
they all set this flag when being created. This option is not
|
| 458 |
+
supported on Windows and some UNIX's. If the
|
| 459 |
+
:py:data:`~socket.SO_REUSEPORT` constant is not defined then this
|
| 460 |
+
capability is unsupported.
|
| 461 |
+
|
| 462 |
+
allow_broadcast tells the kernel to allow this endpoint to send
|
| 463 |
+
messages to the broadcast address.
|
| 464 |
+
|
| 465 |
+
sock can optionally be specified in order to use a preexisting
|
| 466 |
+
socket object.
|
| 467 |
+
"""
|
| 468 |
+
raise NotImplementedError
|
| 469 |
+
|
| 470 |
+
# Pipes and subprocesses.
|
| 471 |
+
|
| 472 |
+
async def connect_read_pipe(self, protocol_factory, pipe):
|
| 473 |
+
"""Register read pipe in event loop. Set the pipe to non-blocking mode.
|
| 474 |
+
|
| 475 |
+
protocol_factory should instantiate object with Protocol interface.
|
| 476 |
+
pipe is a file-like object.
|
| 477 |
+
Return pair (transport, protocol), where transport supports the
|
| 478 |
+
ReadTransport interface."""
|
| 479 |
+
# The reason to accept file-like object instead of just file descriptor
|
| 480 |
+
# is: we need to own pipe and close it at transport finishing
|
| 481 |
+
# Can got complicated errors if pass f.fileno(),
|
| 482 |
+
# close fd in pipe transport then close f and vice versa.
|
| 483 |
+
raise NotImplementedError
|
| 484 |
+
|
| 485 |
+
async def connect_write_pipe(self, protocol_factory, pipe):
|
| 486 |
+
"""Register write pipe in event loop.
|
| 487 |
+
|
| 488 |
+
protocol_factory should instantiate object with BaseProtocol interface.
|
| 489 |
+
Pipe is file-like object already switched to nonblocking.
|
| 490 |
+
Return pair (transport, protocol), where transport support
|
| 491 |
+
WriteTransport interface."""
|
| 492 |
+
# The reason to accept file-like object instead of just file descriptor
|
| 493 |
+
# is: we need to own pipe and close it at transport finishing
|
| 494 |
+
# Can got complicated errors if pass f.fileno(),
|
| 495 |
+
# close fd in pipe transport then close f and vice versa.
|
| 496 |
+
raise NotImplementedError
|
| 497 |
+
|
| 498 |
+
async def subprocess_shell(self, protocol_factory, cmd, *,
|
| 499 |
+
stdin=subprocess.PIPE,
|
| 500 |
+
stdout=subprocess.PIPE,
|
| 501 |
+
stderr=subprocess.PIPE,
|
| 502 |
+
**kwargs):
|
| 503 |
+
raise NotImplementedError
|
| 504 |
+
|
| 505 |
+
async def subprocess_exec(self, protocol_factory, *args,
|
| 506 |
+
stdin=subprocess.PIPE,
|
| 507 |
+
stdout=subprocess.PIPE,
|
| 508 |
+
stderr=subprocess.PIPE,
|
| 509 |
+
**kwargs):
|
| 510 |
+
raise NotImplementedError
|
| 511 |
+
|
| 512 |
+
# Ready-based callback registration methods.
|
| 513 |
+
# The add_*() methods return None.
|
| 514 |
+
# The remove_*() methods return True if something was removed,
|
| 515 |
+
# False if there was nothing to delete.
|
| 516 |
+
|
| 517 |
+
def add_reader(self, fd, callback, *args):
|
| 518 |
+
raise NotImplementedError
|
| 519 |
+
|
| 520 |
+
def remove_reader(self, fd):
|
| 521 |
+
raise NotImplementedError
|
| 522 |
+
|
| 523 |
+
def add_writer(self, fd, callback, *args):
|
| 524 |
+
raise NotImplementedError
|
| 525 |
+
|
| 526 |
+
def remove_writer(self, fd):
|
| 527 |
+
raise NotImplementedError
|
| 528 |
+
|
| 529 |
+
# Completion based I/O methods returning Futures.
|
| 530 |
+
|
| 531 |
+
async def sock_recv(self, sock, nbytes):
|
| 532 |
+
raise NotImplementedError
|
| 533 |
+
|
| 534 |
+
async def sock_recv_into(self, sock, buf):
|
| 535 |
+
raise NotImplementedError
|
| 536 |
+
|
| 537 |
+
async def sock_sendall(self, sock, data):
|
| 538 |
+
raise NotImplementedError
|
| 539 |
+
|
| 540 |
+
async def sock_connect(self, sock, address):
|
| 541 |
+
raise NotImplementedError
|
| 542 |
+
|
| 543 |
+
async def sock_accept(self, sock):
|
| 544 |
+
raise NotImplementedError
|
| 545 |
+
|
| 546 |
+
async def sock_sendfile(self, sock, file, offset=0, count=None,
|
| 547 |
+
*, fallback=None):
|
| 548 |
+
raise NotImplementedError
|
| 549 |
+
|
| 550 |
+
# Signal handling.
|
| 551 |
+
|
| 552 |
+
def add_signal_handler(self, sig, callback, *args):
|
| 553 |
+
raise NotImplementedError
|
| 554 |
+
|
| 555 |
+
def remove_signal_handler(self, sig):
|
| 556 |
+
raise NotImplementedError
|
| 557 |
+
|
| 558 |
+
# Task factory.
|
| 559 |
+
|
| 560 |
+
def set_task_factory(self, factory):
|
| 561 |
+
raise NotImplementedError
|
| 562 |
+
|
| 563 |
+
def get_task_factory(self):
|
| 564 |
+
raise NotImplementedError
|
| 565 |
+
|
| 566 |
+
# Error handlers.
|
| 567 |
+
|
| 568 |
+
def get_exception_handler(self):
|
| 569 |
+
raise NotImplementedError
|
| 570 |
+
|
| 571 |
+
def set_exception_handler(self, handler):
|
| 572 |
+
raise NotImplementedError
|
| 573 |
+
|
| 574 |
+
def default_exception_handler(self, context):
|
| 575 |
+
raise NotImplementedError
|
| 576 |
+
|
| 577 |
+
def call_exception_handler(self, context):
|
| 578 |
+
raise NotImplementedError
|
| 579 |
+
|
| 580 |
+
# Debug flag management.
|
| 581 |
+
|
| 582 |
+
def get_debug(self):
|
| 583 |
+
raise NotImplementedError
|
| 584 |
+
|
| 585 |
+
def set_debug(self, enabled):
|
| 586 |
+
raise NotImplementedError
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
class AbstractEventLoopPolicy:
|
| 590 |
+
"""Abstract policy for accessing the event loop."""
|
| 591 |
+
|
| 592 |
+
def get_event_loop(self):
|
| 593 |
+
"""Get the event loop for the current context.
|
| 594 |
+
|
| 595 |
+
Returns an event loop object implementing the BaseEventLoop interface,
|
| 596 |
+
or raises an exception in case no event loop has been set for the
|
| 597 |
+
current context and the current policy does not specify to create one.
|
| 598 |
+
|
| 599 |
+
It should never return None."""
|
| 600 |
+
raise NotImplementedError
|
| 601 |
+
|
| 602 |
+
def set_event_loop(self, loop):
|
| 603 |
+
"""Set the event loop for the current context to loop."""
|
| 604 |
+
raise NotImplementedError
|
| 605 |
+
|
| 606 |
+
def new_event_loop(self):
|
| 607 |
+
"""Create and return a new event loop object according to this
|
| 608 |
+
policy's rules. If there's need to set this loop as the event loop for
|
| 609 |
+
the current context, set_event_loop must be called explicitly."""
|
| 610 |
+
raise NotImplementedError
|
| 611 |
+
|
| 612 |
+
# Child processes handling (Unix only).
|
| 613 |
+
|
| 614 |
+
def get_child_watcher(self):
|
| 615 |
+
"Get the watcher for child processes."
|
| 616 |
+
raise NotImplementedError
|
| 617 |
+
|
| 618 |
+
def set_child_watcher(self, watcher):
|
| 619 |
+
"""Set the watcher for child processes."""
|
| 620 |
+
raise NotImplementedError
|
| 621 |
+
|
| 622 |
+
|
| 623 |
+
class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
|
| 624 |
+
"""Default policy implementation for accessing the event loop.
|
| 625 |
+
|
| 626 |
+
In this policy, each thread has its own event loop. However, we
|
| 627 |
+
only automatically create an event loop by default for the main
|
| 628 |
+
thread; other threads by default have no event loop.
|
| 629 |
+
|
| 630 |
+
Other policies may have different rules (e.g. a single global
|
| 631 |
+
event loop, or automatically creating an event loop per thread, or
|
| 632 |
+
using some other notion of context to which an event loop is
|
| 633 |
+
associated).
|
| 634 |
+
"""
|
| 635 |
+
|
| 636 |
+
_loop_factory = None
|
| 637 |
+
|
| 638 |
+
class _Local(threading.local):
|
| 639 |
+
_loop = None
|
| 640 |
+
_set_called = False
|
| 641 |
+
|
| 642 |
+
def __init__(self):
|
| 643 |
+
self._local = self._Local()
|
| 644 |
+
|
| 645 |
+
def get_event_loop(self):
|
| 646 |
+
"""Get the event loop for the current context.
|
| 647 |
+
|
| 648 |
+
Returns an instance of EventLoop or raises an exception.
|
| 649 |
+
"""
|
| 650 |
+
if (self._local._loop is None and
|
| 651 |
+
not self._local._set_called and
|
| 652 |
+
threading.current_thread() is threading.main_thread()):
|
| 653 |
+
self.set_event_loop(self.new_event_loop())
|
| 654 |
+
|
| 655 |
+
if self._local._loop is None:
|
| 656 |
+
raise RuntimeError('There is no current event loop in thread %r.'
|
| 657 |
+
% threading.current_thread().name)
|
| 658 |
+
|
| 659 |
+
return self._local._loop
|
| 660 |
+
|
| 661 |
+
def set_event_loop(self, loop):
|
| 662 |
+
"""Set the event loop."""
|
| 663 |
+
self._local._set_called = True
|
| 664 |
+
assert loop is None or isinstance(loop, AbstractEventLoop)
|
| 665 |
+
self._local._loop = loop
|
| 666 |
+
|
| 667 |
+
def new_event_loop(self):
|
| 668 |
+
"""Create a new event loop.
|
| 669 |
+
|
| 670 |
+
You must call set_event_loop() to make this the current event
|
| 671 |
+
loop.
|
| 672 |
+
"""
|
| 673 |
+
return self._loop_factory()
|
| 674 |
+
|
| 675 |
+
|
| 676 |
+
# Event loop policy. The policy itself is always global, even if the
|
| 677 |
+
# policy's rules say that there is an event loop per thread (or other
|
| 678 |
+
# notion of context). The default policy is installed by the first
|
| 679 |
+
# call to get_event_loop_policy().
|
| 680 |
+
_event_loop_policy = None
|
| 681 |
+
|
| 682 |
+
# Lock for protecting the on-the-fly creation of the event loop policy.
|
| 683 |
+
_lock = threading.Lock()
|
| 684 |
+
|
| 685 |
+
|
| 686 |
+
# A TLS for the running event loop, used by _get_running_loop.
|
| 687 |
+
class _RunningLoop(threading.local):
|
| 688 |
+
loop_pid = (None, None)
|
| 689 |
+
|
| 690 |
+
|
| 691 |
+
_running_loop = _RunningLoop()
|
| 692 |
+
|
| 693 |
+
|
| 694 |
+
def get_running_loop():
|
| 695 |
+
"""Return the running event loop. Raise a RuntimeError if there is none.
|
| 696 |
+
|
| 697 |
+
This function is thread-specific.
|
| 698 |
+
"""
|
| 699 |
+
# NOTE: this function is implemented in C (see _asynciomodule.c)
|
| 700 |
+
loop = _get_running_loop()
|
| 701 |
+
if loop is None:
|
| 702 |
+
raise RuntimeError('no running event loop')
|
| 703 |
+
return loop
|
| 704 |
+
|
| 705 |
+
|
| 706 |
+
def _get_running_loop():
|
| 707 |
+
"""Return the running event loop or None.
|
| 708 |
+
|
| 709 |
+
This is a low-level function intended to be used by event loops.
|
| 710 |
+
This function is thread-specific.
|
| 711 |
+
"""
|
| 712 |
+
# NOTE: this function is implemented in C (see _asynciomodule.c)
|
| 713 |
+
running_loop, pid = _running_loop.loop_pid
|
| 714 |
+
if running_loop is not None and pid == os.getpid():
|
| 715 |
+
return running_loop
|
| 716 |
+
|
| 717 |
+
|
| 718 |
+
def _set_running_loop(loop):
|
| 719 |
+
"""Set the running event loop.
|
| 720 |
+
|
| 721 |
+
This is a low-level function intended to be used by event loops.
|
| 722 |
+
This function is thread-specific.
|
| 723 |
+
"""
|
| 724 |
+
# NOTE: this function is implemented in C (see _asynciomodule.c)
|
| 725 |
+
_running_loop.loop_pid = (loop, os.getpid())
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
def _init_event_loop_policy():
|
| 729 |
+
global _event_loop_policy
|
| 730 |
+
with _lock:
|
| 731 |
+
if _event_loop_policy is None: # pragma: no branch
|
| 732 |
+
from . import DefaultEventLoopPolicy
|
| 733 |
+
_event_loop_policy = DefaultEventLoopPolicy()
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
def get_event_loop_policy():
|
| 737 |
+
"""Get the current event loop policy."""
|
| 738 |
+
if _event_loop_policy is None:
|
| 739 |
+
_init_event_loop_policy()
|
| 740 |
+
return _event_loop_policy
|
| 741 |
+
|
| 742 |
+
|
| 743 |
+
def set_event_loop_policy(policy):
|
| 744 |
+
"""Set the current event loop policy.
|
| 745 |
+
|
| 746 |
+
If policy is None, the default policy is restored."""
|
| 747 |
+
global _event_loop_policy
|
| 748 |
+
assert policy is None or isinstance(policy, AbstractEventLoopPolicy)
|
| 749 |
+
_event_loop_policy = policy
|
| 750 |
+
|
| 751 |
+
|
| 752 |
+
def get_event_loop():
|
| 753 |
+
"""Return an asyncio event loop.
|
| 754 |
+
|
| 755 |
+
When called from a coroutine or a callback (e.g. scheduled with call_soon
|
| 756 |
+
or similar API), this function will always return the running event loop.
|
| 757 |
+
|
| 758 |
+
If there is no running event loop set, the function will return
|
| 759 |
+
the result of `get_event_loop_policy().get_event_loop()` call.
|
| 760 |
+
"""
|
| 761 |
+
# NOTE: this function is implemented in C (see _asynciomodule.c)
|
| 762 |
+
return _py__get_event_loop()
|
| 763 |
+
|
| 764 |
+
|
| 765 |
+
def _get_event_loop(stacklevel=3):
|
| 766 |
+
# This internal method is going away in Python 3.12, left here only for
|
| 767 |
+
# backwards compatibility with 3.10.0 - 3.10.8 and 3.11.0.
|
| 768 |
+
# Similarly, this method's C equivalent in _asyncio is going away as well.
|
| 769 |
+
# See GH-99949 for more details.
|
| 770 |
+
current_loop = _get_running_loop()
|
| 771 |
+
if current_loop is not None:
|
| 772 |
+
return current_loop
|
| 773 |
+
return get_event_loop_policy().get_event_loop()
|
| 774 |
+
|
| 775 |
+
|
| 776 |
+
def set_event_loop(loop):
|
| 777 |
+
"""Equivalent to calling get_event_loop_policy().set_event_loop(loop)."""
|
| 778 |
+
get_event_loop_policy().set_event_loop(loop)
|
| 779 |
+
|
| 780 |
+
|
| 781 |
+
def new_event_loop():
|
| 782 |
+
"""Equivalent to calling get_event_loop_policy().new_event_loop()."""
|
| 783 |
+
return get_event_loop_policy().new_event_loop()
|
| 784 |
+
|
| 785 |
+
|
| 786 |
+
def get_child_watcher():
|
| 787 |
+
"""Equivalent to calling get_event_loop_policy().get_child_watcher()."""
|
| 788 |
+
return get_event_loop_policy().get_child_watcher()
|
| 789 |
+
|
| 790 |
+
|
| 791 |
+
def set_child_watcher(watcher):
|
| 792 |
+
"""Equivalent to calling
|
| 793 |
+
get_event_loop_policy().set_child_watcher(watcher)."""
|
| 794 |
+
return get_event_loop_policy().set_child_watcher(watcher)
|
| 795 |
+
|
| 796 |
+
|
| 797 |
+
# Alias pure-Python implementations for testing purposes.
|
| 798 |
+
_py__get_running_loop = _get_running_loop
|
| 799 |
+
_py__set_running_loop = _set_running_loop
|
| 800 |
+
_py_get_running_loop = get_running_loop
|
| 801 |
+
_py_get_event_loop = get_event_loop
|
| 802 |
+
_py__get_event_loop = _get_event_loop
|
| 803 |
+
|
| 804 |
+
|
| 805 |
+
try:
|
| 806 |
+
# get_event_loop() is one of the most frequently called
|
| 807 |
+
# functions in asyncio. Pure Python implementation is
|
| 808 |
+
# about 4 times slower than C-accelerated.
|
| 809 |
+
from _asyncio import (_get_running_loop, _set_running_loop,
|
| 810 |
+
get_running_loop, get_event_loop, _get_event_loop)
|
| 811 |
+
except ImportError:
|
| 812 |
+
pass
|
| 813 |
+
else:
|
| 814 |
+
# Alias C implementations for testing purposes.
|
| 815 |
+
_c__get_running_loop = _get_running_loop
|
| 816 |
+
_c__set_running_loop = _set_running_loop
|
| 817 |
+
_c_get_running_loop = get_running_loop
|
| 818 |
+
_c_get_event_loop = get_event_loop
|
| 819 |
+
_c__get_event_loop = _get_event_loop
|
parrot/lib/python3.10/asyncio/log.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Logging configuration."""
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
# Name the logger after the package.
|
| 7 |
+
logger = logging.getLogger(__package__)
|
parrot/lib/python3.10/asyncio/proactor_events.py
ADDED
|
@@ -0,0 +1,875 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Event loop using a proactor and related classes.
|
| 2 |
+
|
| 3 |
+
A proactor is a "notify-on-completion" multiplexer. Currently a
|
| 4 |
+
proactor is only implemented on Windows with IOCP.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
__all__ = 'BaseProactorEventLoop',
|
| 8 |
+
|
| 9 |
+
import io
|
| 10 |
+
import os
|
| 11 |
+
import socket
|
| 12 |
+
import warnings
|
| 13 |
+
import signal
|
| 14 |
+
import threading
|
| 15 |
+
import collections
|
| 16 |
+
|
| 17 |
+
from . import base_events
|
| 18 |
+
from . import constants
|
| 19 |
+
from . import futures
|
| 20 |
+
from . import exceptions
|
| 21 |
+
from . import protocols
|
| 22 |
+
from . import sslproto
|
| 23 |
+
from . import transports
|
| 24 |
+
from . import trsock
|
| 25 |
+
from .log import logger
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _set_socket_extra(transport, sock):
|
| 29 |
+
transport._extra['socket'] = trsock.TransportSocket(sock)
|
| 30 |
+
|
| 31 |
+
try:
|
| 32 |
+
transport._extra['sockname'] = sock.getsockname()
|
| 33 |
+
except socket.error:
|
| 34 |
+
if transport._loop.get_debug():
|
| 35 |
+
logger.warning(
|
| 36 |
+
"getsockname() failed on %r", sock, exc_info=True)
|
| 37 |
+
|
| 38 |
+
if 'peername' not in transport._extra:
|
| 39 |
+
try:
|
| 40 |
+
transport._extra['peername'] = sock.getpeername()
|
| 41 |
+
except socket.error:
|
| 42 |
+
# UDP sockets may not have a peer name
|
| 43 |
+
transport._extra['peername'] = None
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class _ProactorBasePipeTransport(transports._FlowControlMixin,
|
| 47 |
+
transports.BaseTransport):
|
| 48 |
+
"""Base class for pipe and socket transports."""
|
| 49 |
+
|
| 50 |
+
def __init__(self, loop, sock, protocol, waiter=None,
|
| 51 |
+
extra=None, server=None):
|
| 52 |
+
super().__init__(extra, loop)
|
| 53 |
+
self._set_extra(sock)
|
| 54 |
+
self._sock = sock
|
| 55 |
+
self.set_protocol(protocol)
|
| 56 |
+
self._server = server
|
| 57 |
+
self._buffer = None # None or bytearray.
|
| 58 |
+
self._read_fut = None
|
| 59 |
+
self._write_fut = None
|
| 60 |
+
self._pending_write = 0
|
| 61 |
+
self._conn_lost = 0
|
| 62 |
+
self._closing = False # Set when close() called.
|
| 63 |
+
self._called_connection_lost = False
|
| 64 |
+
self._eof_written = False
|
| 65 |
+
if self._server is not None:
|
| 66 |
+
self._server._attach()
|
| 67 |
+
self._loop.call_soon(self._protocol.connection_made, self)
|
| 68 |
+
if waiter is not None:
|
| 69 |
+
# only wake up the waiter when connection_made() has been called
|
| 70 |
+
self._loop.call_soon(futures._set_result_unless_cancelled,
|
| 71 |
+
waiter, None)
|
| 72 |
+
|
| 73 |
+
def __repr__(self):
|
| 74 |
+
info = [self.__class__.__name__]
|
| 75 |
+
if self._sock is None:
|
| 76 |
+
info.append('closed')
|
| 77 |
+
elif self._closing:
|
| 78 |
+
info.append('closing')
|
| 79 |
+
if self._sock is not None:
|
| 80 |
+
info.append(f'fd={self._sock.fileno()}')
|
| 81 |
+
if self._read_fut is not None:
|
| 82 |
+
info.append(f'read={self._read_fut!r}')
|
| 83 |
+
if self._write_fut is not None:
|
| 84 |
+
info.append(f'write={self._write_fut!r}')
|
| 85 |
+
if self._buffer:
|
| 86 |
+
info.append(f'write_bufsize={len(self._buffer)}')
|
| 87 |
+
if self._eof_written:
|
| 88 |
+
info.append('EOF written')
|
| 89 |
+
return '<{}>'.format(' '.join(info))
|
| 90 |
+
|
| 91 |
+
def _set_extra(self, sock):
|
| 92 |
+
self._extra['pipe'] = sock
|
| 93 |
+
|
| 94 |
+
def set_protocol(self, protocol):
|
| 95 |
+
self._protocol = protocol
|
| 96 |
+
|
| 97 |
+
def get_protocol(self):
|
| 98 |
+
return self._protocol
|
| 99 |
+
|
| 100 |
+
def is_closing(self):
|
| 101 |
+
return self._closing
|
| 102 |
+
|
| 103 |
+
def close(self):
|
| 104 |
+
if self._closing:
|
| 105 |
+
return
|
| 106 |
+
self._closing = True
|
| 107 |
+
self._conn_lost += 1
|
| 108 |
+
if not self._buffer and self._write_fut is None:
|
| 109 |
+
self._loop.call_soon(self._call_connection_lost, None)
|
| 110 |
+
if self._read_fut is not None:
|
| 111 |
+
self._read_fut.cancel()
|
| 112 |
+
self._read_fut = None
|
| 113 |
+
|
| 114 |
+
def __del__(self, _warn=warnings.warn):
|
| 115 |
+
if self._sock is not None:
|
| 116 |
+
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
|
| 117 |
+
self._sock.close()
|
| 118 |
+
|
| 119 |
+
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
|
| 120 |
+
try:
|
| 121 |
+
if isinstance(exc, OSError):
|
| 122 |
+
if self._loop.get_debug():
|
| 123 |
+
logger.debug("%r: %s", self, message, exc_info=True)
|
| 124 |
+
else:
|
| 125 |
+
self._loop.call_exception_handler({
|
| 126 |
+
'message': message,
|
| 127 |
+
'exception': exc,
|
| 128 |
+
'transport': self,
|
| 129 |
+
'protocol': self._protocol,
|
| 130 |
+
})
|
| 131 |
+
finally:
|
| 132 |
+
self._force_close(exc)
|
| 133 |
+
|
| 134 |
+
def _force_close(self, exc):
|
| 135 |
+
if self._empty_waiter is not None and not self._empty_waiter.done():
|
| 136 |
+
if exc is None:
|
| 137 |
+
self._empty_waiter.set_result(None)
|
| 138 |
+
else:
|
| 139 |
+
self._empty_waiter.set_exception(exc)
|
| 140 |
+
if self._closing and self._called_connection_lost:
|
| 141 |
+
return
|
| 142 |
+
self._closing = True
|
| 143 |
+
self._conn_lost += 1
|
| 144 |
+
if self._write_fut:
|
| 145 |
+
self._write_fut.cancel()
|
| 146 |
+
self._write_fut = None
|
| 147 |
+
if self._read_fut:
|
| 148 |
+
self._read_fut.cancel()
|
| 149 |
+
self._read_fut = None
|
| 150 |
+
self._pending_write = 0
|
| 151 |
+
self._buffer = None
|
| 152 |
+
self._loop.call_soon(self._call_connection_lost, exc)
|
| 153 |
+
|
| 154 |
+
def _call_connection_lost(self, exc):
|
| 155 |
+
if self._called_connection_lost:
|
| 156 |
+
return
|
| 157 |
+
try:
|
| 158 |
+
self._protocol.connection_lost(exc)
|
| 159 |
+
finally:
|
| 160 |
+
# XXX If there is a pending overlapped read on the other
|
| 161 |
+
# end then it may fail with ERROR_NETNAME_DELETED if we
|
| 162 |
+
# just close our end. First calling shutdown() seems to
|
| 163 |
+
# cure it, but maybe using DisconnectEx() would be better.
|
| 164 |
+
if hasattr(self._sock, 'shutdown') and self._sock.fileno() != -1:
|
| 165 |
+
self._sock.shutdown(socket.SHUT_RDWR)
|
| 166 |
+
self._sock.close()
|
| 167 |
+
self._sock = None
|
| 168 |
+
server = self._server
|
| 169 |
+
if server is not None:
|
| 170 |
+
server._detach()
|
| 171 |
+
self._server = None
|
| 172 |
+
self._called_connection_lost = True
|
| 173 |
+
|
| 174 |
+
def get_write_buffer_size(self):
|
| 175 |
+
size = self._pending_write
|
| 176 |
+
if self._buffer is not None:
|
| 177 |
+
size += len(self._buffer)
|
| 178 |
+
return size
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
class _ProactorReadPipeTransport(_ProactorBasePipeTransport,
|
| 182 |
+
transports.ReadTransport):
|
| 183 |
+
"""Transport for read pipes."""
|
| 184 |
+
|
| 185 |
+
def __init__(self, loop, sock, protocol, waiter=None,
|
| 186 |
+
extra=None, server=None, buffer_size=65536):
|
| 187 |
+
self._pending_data_length = -1
|
| 188 |
+
self._paused = True
|
| 189 |
+
super().__init__(loop, sock, protocol, waiter, extra, server)
|
| 190 |
+
|
| 191 |
+
self._data = bytearray(buffer_size)
|
| 192 |
+
self._loop.call_soon(self._loop_reading)
|
| 193 |
+
self._paused = False
|
| 194 |
+
|
| 195 |
+
def is_reading(self):
|
| 196 |
+
return not self._paused and not self._closing
|
| 197 |
+
|
| 198 |
+
def pause_reading(self):
|
| 199 |
+
if self._closing or self._paused:
|
| 200 |
+
return
|
| 201 |
+
self._paused = True
|
| 202 |
+
|
| 203 |
+
# bpo-33694: Don't cancel self._read_fut because cancelling an
|
| 204 |
+
# overlapped WSASend() loss silently data with the current proactor
|
| 205 |
+
# implementation.
|
| 206 |
+
#
|
| 207 |
+
# If CancelIoEx() fails with ERROR_NOT_FOUND, it means that WSASend()
|
| 208 |
+
# completed (even if HasOverlappedIoCompleted() returns 0), but
|
| 209 |
+
# Overlapped.cancel() currently silently ignores the ERROR_NOT_FOUND
|
| 210 |
+
# error. Once the overlapped is ignored, the IOCP loop will ignores the
|
| 211 |
+
# completion I/O event and so not read the result of the overlapped
|
| 212 |
+
# WSARecv().
|
| 213 |
+
|
| 214 |
+
if self._loop.get_debug():
|
| 215 |
+
logger.debug("%r pauses reading", self)
|
| 216 |
+
|
| 217 |
+
def resume_reading(self):
|
| 218 |
+
if self._closing or not self._paused:
|
| 219 |
+
return
|
| 220 |
+
|
| 221 |
+
self._paused = False
|
| 222 |
+
if self._read_fut is None:
|
| 223 |
+
self._loop.call_soon(self._loop_reading, None)
|
| 224 |
+
|
| 225 |
+
length = self._pending_data_length
|
| 226 |
+
self._pending_data_length = -1
|
| 227 |
+
if length > -1:
|
| 228 |
+
# Call the protocol methode after calling _loop_reading(),
|
| 229 |
+
# since the protocol can decide to pause reading again.
|
| 230 |
+
self._loop.call_soon(self._data_received, self._data[:length], length)
|
| 231 |
+
|
| 232 |
+
if self._loop.get_debug():
|
| 233 |
+
logger.debug("%r resumes reading", self)
|
| 234 |
+
|
| 235 |
+
def _eof_received(self):
|
| 236 |
+
if self._loop.get_debug():
|
| 237 |
+
logger.debug("%r received EOF", self)
|
| 238 |
+
|
| 239 |
+
try:
|
| 240 |
+
keep_open = self._protocol.eof_received()
|
| 241 |
+
except (SystemExit, KeyboardInterrupt):
|
| 242 |
+
raise
|
| 243 |
+
except BaseException as exc:
|
| 244 |
+
self._fatal_error(
|
| 245 |
+
exc, 'Fatal error: protocol.eof_received() call failed.')
|
| 246 |
+
return
|
| 247 |
+
|
| 248 |
+
if not keep_open:
|
| 249 |
+
self.close()
|
| 250 |
+
|
| 251 |
+
def _data_received(self, data, length):
|
| 252 |
+
if self._paused:
|
| 253 |
+
# Don't call any protocol method while reading is paused.
|
| 254 |
+
# The protocol will be called on resume_reading().
|
| 255 |
+
assert self._pending_data_length == -1
|
| 256 |
+
self._pending_data_length = length
|
| 257 |
+
return
|
| 258 |
+
|
| 259 |
+
if length == 0:
|
| 260 |
+
self._eof_received()
|
| 261 |
+
return
|
| 262 |
+
|
| 263 |
+
if isinstance(self._protocol, protocols.BufferedProtocol):
|
| 264 |
+
try:
|
| 265 |
+
protocols._feed_data_to_buffered_proto(self._protocol, data)
|
| 266 |
+
except (SystemExit, KeyboardInterrupt):
|
| 267 |
+
raise
|
| 268 |
+
except BaseException as exc:
|
| 269 |
+
self._fatal_error(exc,
|
| 270 |
+
'Fatal error: protocol.buffer_updated() '
|
| 271 |
+
'call failed.')
|
| 272 |
+
return
|
| 273 |
+
else:
|
| 274 |
+
self._protocol.data_received(data)
|
| 275 |
+
|
| 276 |
+
def _loop_reading(self, fut=None):
|
| 277 |
+
length = -1
|
| 278 |
+
data = None
|
| 279 |
+
try:
|
| 280 |
+
if fut is not None:
|
| 281 |
+
assert self._read_fut is fut or (self._read_fut is None and
|
| 282 |
+
self._closing)
|
| 283 |
+
self._read_fut = None
|
| 284 |
+
if fut.done():
|
| 285 |
+
# deliver data later in "finally" clause
|
| 286 |
+
length = fut.result()
|
| 287 |
+
if length == 0:
|
| 288 |
+
# we got end-of-file so no need to reschedule a new read
|
| 289 |
+
return
|
| 290 |
+
|
| 291 |
+
data = self._data[:length]
|
| 292 |
+
else:
|
| 293 |
+
# the future will be replaced by next proactor.recv call
|
| 294 |
+
fut.cancel()
|
| 295 |
+
|
| 296 |
+
if self._closing:
|
| 297 |
+
# since close() has been called we ignore any read data
|
| 298 |
+
return
|
| 299 |
+
|
| 300 |
+
# bpo-33694: buffer_updated() has currently no fast path because of
|
| 301 |
+
# a data loss issue caused by overlapped WSASend() cancellation.
|
| 302 |
+
|
| 303 |
+
if not self._paused:
|
| 304 |
+
# reschedule a new read
|
| 305 |
+
self._read_fut = self._loop._proactor.recv_into(self._sock, self._data)
|
| 306 |
+
except ConnectionAbortedError as exc:
|
| 307 |
+
if not self._closing:
|
| 308 |
+
self._fatal_error(exc, 'Fatal read error on pipe transport')
|
| 309 |
+
elif self._loop.get_debug():
|
| 310 |
+
logger.debug("Read error on pipe transport while closing",
|
| 311 |
+
exc_info=True)
|
| 312 |
+
except ConnectionResetError as exc:
|
| 313 |
+
self._force_close(exc)
|
| 314 |
+
except OSError as exc:
|
| 315 |
+
self._fatal_error(exc, 'Fatal read error on pipe transport')
|
| 316 |
+
except exceptions.CancelledError:
|
| 317 |
+
if not self._closing:
|
| 318 |
+
raise
|
| 319 |
+
else:
|
| 320 |
+
if not self._paused:
|
| 321 |
+
self._read_fut.add_done_callback(self._loop_reading)
|
| 322 |
+
finally:
|
| 323 |
+
if length > -1:
|
| 324 |
+
self._data_received(data, length)
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
|
| 328 |
+
transports.WriteTransport):
|
| 329 |
+
"""Transport for write pipes."""
|
| 330 |
+
|
| 331 |
+
_start_tls_compatible = True
|
| 332 |
+
|
| 333 |
+
def __init__(self, *args, **kw):
|
| 334 |
+
super().__init__(*args, **kw)
|
| 335 |
+
self._empty_waiter = None
|
| 336 |
+
|
| 337 |
+
def write(self, data):
|
| 338 |
+
if not isinstance(data, (bytes, bytearray, memoryview)):
|
| 339 |
+
raise TypeError(
|
| 340 |
+
f"data argument must be a bytes-like object, "
|
| 341 |
+
f"not {type(data).__name__}")
|
| 342 |
+
if self._eof_written:
|
| 343 |
+
raise RuntimeError('write_eof() already called')
|
| 344 |
+
if self._empty_waiter is not None:
|
| 345 |
+
raise RuntimeError('unable to write; sendfile is in progress')
|
| 346 |
+
|
| 347 |
+
if not data:
|
| 348 |
+
return
|
| 349 |
+
|
| 350 |
+
if self._conn_lost:
|
| 351 |
+
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
|
| 352 |
+
logger.warning('socket.send() raised exception.')
|
| 353 |
+
self._conn_lost += 1
|
| 354 |
+
return
|
| 355 |
+
|
| 356 |
+
# Observable states:
|
| 357 |
+
# 1. IDLE: _write_fut and _buffer both None
|
| 358 |
+
# 2. WRITING: _write_fut set; _buffer None
|
| 359 |
+
# 3. BACKED UP: _write_fut set; _buffer a bytearray
|
| 360 |
+
# We always copy the data, so the caller can't modify it
|
| 361 |
+
# while we're still waiting for the I/O to happen.
|
| 362 |
+
if self._write_fut is None: # IDLE -> WRITING
|
| 363 |
+
assert self._buffer is None
|
| 364 |
+
# Pass a copy, except if it's already immutable.
|
| 365 |
+
self._loop_writing(data=bytes(data))
|
| 366 |
+
elif not self._buffer: # WRITING -> BACKED UP
|
| 367 |
+
# Make a mutable copy which we can extend.
|
| 368 |
+
self._buffer = bytearray(data)
|
| 369 |
+
self._maybe_pause_protocol()
|
| 370 |
+
else: # BACKED UP
|
| 371 |
+
# Append to buffer (also copies).
|
| 372 |
+
self._buffer.extend(data)
|
| 373 |
+
self._maybe_pause_protocol()
|
| 374 |
+
|
| 375 |
+
def _loop_writing(self, f=None, data=None):
|
| 376 |
+
try:
|
| 377 |
+
if f is not None and self._write_fut is None and self._closing:
|
| 378 |
+
# XXX most likely self._force_close() has been called, and
|
| 379 |
+
# it has set self._write_fut to None.
|
| 380 |
+
return
|
| 381 |
+
assert f is self._write_fut
|
| 382 |
+
self._write_fut = None
|
| 383 |
+
self._pending_write = 0
|
| 384 |
+
if f:
|
| 385 |
+
f.result()
|
| 386 |
+
if data is None:
|
| 387 |
+
data = self._buffer
|
| 388 |
+
self._buffer = None
|
| 389 |
+
if not data:
|
| 390 |
+
if self._closing:
|
| 391 |
+
self._loop.call_soon(self._call_connection_lost, None)
|
| 392 |
+
if self._eof_written:
|
| 393 |
+
self._sock.shutdown(socket.SHUT_WR)
|
| 394 |
+
# Now that we've reduced the buffer size, tell the
|
| 395 |
+
# protocol to resume writing if it was paused. Note that
|
| 396 |
+
# we do this last since the callback is called immediately
|
| 397 |
+
# and it may add more data to the buffer (even causing the
|
| 398 |
+
# protocol to be paused again).
|
| 399 |
+
self._maybe_resume_protocol()
|
| 400 |
+
else:
|
| 401 |
+
self._write_fut = self._loop._proactor.send(self._sock, data)
|
| 402 |
+
if not self._write_fut.done():
|
| 403 |
+
assert self._pending_write == 0
|
| 404 |
+
self._pending_write = len(data)
|
| 405 |
+
self._write_fut.add_done_callback(self._loop_writing)
|
| 406 |
+
self._maybe_pause_protocol()
|
| 407 |
+
else:
|
| 408 |
+
self._write_fut.add_done_callback(self._loop_writing)
|
| 409 |
+
if self._empty_waiter is not None and self._write_fut is None:
|
| 410 |
+
self._empty_waiter.set_result(None)
|
| 411 |
+
except ConnectionResetError as exc:
|
| 412 |
+
self._force_close(exc)
|
| 413 |
+
except OSError as exc:
|
| 414 |
+
self._fatal_error(exc, 'Fatal write error on pipe transport')
|
| 415 |
+
|
| 416 |
+
def can_write_eof(self):
|
| 417 |
+
return True
|
| 418 |
+
|
| 419 |
+
def write_eof(self):
|
| 420 |
+
self.close()
|
| 421 |
+
|
| 422 |
+
def abort(self):
|
| 423 |
+
self._force_close(None)
|
| 424 |
+
|
| 425 |
+
def _make_empty_waiter(self):
|
| 426 |
+
if self._empty_waiter is not None:
|
| 427 |
+
raise RuntimeError("Empty waiter is already set")
|
| 428 |
+
self._empty_waiter = self._loop.create_future()
|
| 429 |
+
if self._write_fut is None:
|
| 430 |
+
self._empty_waiter.set_result(None)
|
| 431 |
+
return self._empty_waiter
|
| 432 |
+
|
| 433 |
+
def _reset_empty_waiter(self):
|
| 434 |
+
self._empty_waiter = None
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport):
|
| 438 |
+
def __init__(self, *args, **kw):
|
| 439 |
+
super().__init__(*args, **kw)
|
| 440 |
+
self._read_fut = self._loop._proactor.recv(self._sock, 16)
|
| 441 |
+
self._read_fut.add_done_callback(self._pipe_closed)
|
| 442 |
+
|
| 443 |
+
def _pipe_closed(self, fut):
|
| 444 |
+
if fut.cancelled():
|
| 445 |
+
# the transport has been closed
|
| 446 |
+
return
|
| 447 |
+
assert fut.result() == b''
|
| 448 |
+
if self._closing:
|
| 449 |
+
assert self._read_fut is None
|
| 450 |
+
return
|
| 451 |
+
assert fut is self._read_fut, (fut, self._read_fut)
|
| 452 |
+
self._read_fut = None
|
| 453 |
+
if self._write_fut is not None:
|
| 454 |
+
self._force_close(BrokenPipeError())
|
| 455 |
+
else:
|
| 456 |
+
self.close()
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
class _ProactorDatagramTransport(_ProactorBasePipeTransport,
|
| 460 |
+
transports.DatagramTransport):
|
| 461 |
+
max_size = 256 * 1024
|
| 462 |
+
def __init__(self, loop, sock, protocol, address=None,
|
| 463 |
+
waiter=None, extra=None):
|
| 464 |
+
self._address = address
|
| 465 |
+
self._empty_waiter = None
|
| 466 |
+
# We don't need to call _protocol.connection_made() since our base
|
| 467 |
+
# constructor does it for us.
|
| 468 |
+
super().__init__(loop, sock, protocol, waiter=waiter, extra=extra)
|
| 469 |
+
|
| 470 |
+
# The base constructor sets _buffer = None, so we set it here
|
| 471 |
+
self._buffer = collections.deque()
|
| 472 |
+
self._loop.call_soon(self._loop_reading)
|
| 473 |
+
|
| 474 |
+
def _set_extra(self, sock):
|
| 475 |
+
_set_socket_extra(self, sock)
|
| 476 |
+
|
| 477 |
+
def get_write_buffer_size(self):
|
| 478 |
+
return sum(len(data) for data, _ in self._buffer)
|
| 479 |
+
|
| 480 |
+
def abort(self):
|
| 481 |
+
self._force_close(None)
|
| 482 |
+
|
| 483 |
+
def sendto(self, data, addr=None):
|
| 484 |
+
if not isinstance(data, (bytes, bytearray, memoryview)):
|
| 485 |
+
raise TypeError('data argument must be bytes-like object (%r)',
|
| 486 |
+
type(data))
|
| 487 |
+
|
| 488 |
+
if not data:
|
| 489 |
+
return
|
| 490 |
+
|
| 491 |
+
if self._address is not None and addr not in (None, self._address):
|
| 492 |
+
raise ValueError(
|
| 493 |
+
f'Invalid address: must be None or {self._address}')
|
| 494 |
+
|
| 495 |
+
if self._conn_lost and self._address:
|
| 496 |
+
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
|
| 497 |
+
logger.warning('socket.sendto() raised exception.')
|
| 498 |
+
self._conn_lost += 1
|
| 499 |
+
return
|
| 500 |
+
|
| 501 |
+
# Ensure that what we buffer is immutable.
|
| 502 |
+
self._buffer.append((bytes(data), addr))
|
| 503 |
+
|
| 504 |
+
if self._write_fut is None:
|
| 505 |
+
# No current write operations are active, kick one off
|
| 506 |
+
self._loop_writing()
|
| 507 |
+
# else: A write operation is already kicked off
|
| 508 |
+
|
| 509 |
+
self._maybe_pause_protocol()
|
| 510 |
+
|
| 511 |
+
def _loop_writing(self, fut=None):
|
| 512 |
+
try:
|
| 513 |
+
if self._conn_lost:
|
| 514 |
+
return
|
| 515 |
+
|
| 516 |
+
assert fut is self._write_fut
|
| 517 |
+
self._write_fut = None
|
| 518 |
+
if fut:
|
| 519 |
+
# We are in a _loop_writing() done callback, get the result
|
| 520 |
+
fut.result()
|
| 521 |
+
|
| 522 |
+
if not self._buffer or (self._conn_lost and self._address):
|
| 523 |
+
# The connection has been closed
|
| 524 |
+
if self._closing:
|
| 525 |
+
self._loop.call_soon(self._call_connection_lost, None)
|
| 526 |
+
return
|
| 527 |
+
|
| 528 |
+
data, addr = self._buffer.popleft()
|
| 529 |
+
if self._address is not None:
|
| 530 |
+
self._write_fut = self._loop._proactor.send(self._sock,
|
| 531 |
+
data)
|
| 532 |
+
else:
|
| 533 |
+
self._write_fut = self._loop._proactor.sendto(self._sock,
|
| 534 |
+
data,
|
| 535 |
+
addr=addr)
|
| 536 |
+
except OSError as exc:
|
| 537 |
+
self._protocol.error_received(exc)
|
| 538 |
+
except Exception as exc:
|
| 539 |
+
self._fatal_error(exc, 'Fatal write error on datagram transport')
|
| 540 |
+
else:
|
| 541 |
+
self._write_fut.add_done_callback(self._loop_writing)
|
| 542 |
+
self._maybe_resume_protocol()
|
| 543 |
+
|
| 544 |
+
def _loop_reading(self, fut=None):
|
| 545 |
+
data = None
|
| 546 |
+
try:
|
| 547 |
+
if self._conn_lost:
|
| 548 |
+
return
|
| 549 |
+
|
| 550 |
+
assert self._read_fut is fut or (self._read_fut is None and
|
| 551 |
+
self._closing)
|
| 552 |
+
|
| 553 |
+
self._read_fut = None
|
| 554 |
+
if fut is not None:
|
| 555 |
+
res = fut.result()
|
| 556 |
+
|
| 557 |
+
if self._closing:
|
| 558 |
+
# since close() has been called we ignore any read data
|
| 559 |
+
data = None
|
| 560 |
+
return
|
| 561 |
+
|
| 562 |
+
if self._address is not None:
|
| 563 |
+
data, addr = res, self._address
|
| 564 |
+
else:
|
| 565 |
+
data, addr = res
|
| 566 |
+
|
| 567 |
+
if self._conn_lost:
|
| 568 |
+
return
|
| 569 |
+
if self._address is not None:
|
| 570 |
+
self._read_fut = self._loop._proactor.recv(self._sock,
|
| 571 |
+
self.max_size)
|
| 572 |
+
else:
|
| 573 |
+
self._read_fut = self._loop._proactor.recvfrom(self._sock,
|
| 574 |
+
self.max_size)
|
| 575 |
+
except OSError as exc:
|
| 576 |
+
self._protocol.error_received(exc)
|
| 577 |
+
except exceptions.CancelledError:
|
| 578 |
+
if not self._closing:
|
| 579 |
+
raise
|
| 580 |
+
else:
|
| 581 |
+
if self._read_fut is not None:
|
| 582 |
+
self._read_fut.add_done_callback(self._loop_reading)
|
| 583 |
+
finally:
|
| 584 |
+
if data:
|
| 585 |
+
self._protocol.datagram_received(data, addr)
|
| 586 |
+
|
| 587 |
+
|
| 588 |
+
class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport,
|
| 589 |
+
_ProactorBaseWritePipeTransport,
|
| 590 |
+
transports.Transport):
|
| 591 |
+
"""Transport for duplex pipes."""
|
| 592 |
+
|
| 593 |
+
def can_write_eof(self):
|
| 594 |
+
return False
|
| 595 |
+
|
| 596 |
+
def write_eof(self):
|
| 597 |
+
raise NotImplementedError
|
| 598 |
+
|
| 599 |
+
|
| 600 |
+
class _ProactorSocketTransport(_ProactorReadPipeTransport,
|
| 601 |
+
_ProactorBaseWritePipeTransport,
|
| 602 |
+
transports.Transport):
|
| 603 |
+
"""Transport for connected sockets."""
|
| 604 |
+
|
| 605 |
+
_sendfile_compatible = constants._SendfileMode.TRY_NATIVE
|
| 606 |
+
|
| 607 |
+
def __init__(self, loop, sock, protocol, waiter=None,
|
| 608 |
+
extra=None, server=None):
|
| 609 |
+
super().__init__(loop, sock, protocol, waiter, extra, server)
|
| 610 |
+
base_events._set_nodelay(sock)
|
| 611 |
+
|
| 612 |
+
def _set_extra(self, sock):
|
| 613 |
+
_set_socket_extra(self, sock)
|
| 614 |
+
|
| 615 |
+
def can_write_eof(self):
|
| 616 |
+
return True
|
| 617 |
+
|
| 618 |
+
def write_eof(self):
|
| 619 |
+
if self._closing or self._eof_written:
|
| 620 |
+
return
|
| 621 |
+
self._eof_written = True
|
| 622 |
+
if self._write_fut is None:
|
| 623 |
+
self._sock.shutdown(socket.SHUT_WR)
|
| 624 |
+
|
| 625 |
+
|
| 626 |
+
class BaseProactorEventLoop(base_events.BaseEventLoop):
|
| 627 |
+
|
| 628 |
+
def __init__(self, proactor):
|
| 629 |
+
super().__init__()
|
| 630 |
+
logger.debug('Using proactor: %s', proactor.__class__.__name__)
|
| 631 |
+
self._proactor = proactor
|
| 632 |
+
self._selector = proactor # convenient alias
|
| 633 |
+
self._self_reading_future = None
|
| 634 |
+
self._accept_futures = {} # socket file descriptor => Future
|
| 635 |
+
proactor.set_loop(self)
|
| 636 |
+
self._make_self_pipe()
|
| 637 |
+
if threading.current_thread() is threading.main_thread():
|
| 638 |
+
# wakeup fd can only be installed to a file descriptor from the main thread
|
| 639 |
+
signal.set_wakeup_fd(self._csock.fileno())
|
| 640 |
+
|
| 641 |
+
def _make_socket_transport(self, sock, protocol, waiter=None,
|
| 642 |
+
extra=None, server=None):
|
| 643 |
+
return _ProactorSocketTransport(self, sock, protocol, waiter,
|
| 644 |
+
extra, server)
|
| 645 |
+
|
| 646 |
+
def _make_ssl_transport(
|
| 647 |
+
self, rawsock, protocol, sslcontext, waiter=None,
|
| 648 |
+
*, server_side=False, server_hostname=None,
|
| 649 |
+
extra=None, server=None,
|
| 650 |
+
ssl_handshake_timeout=None):
|
| 651 |
+
ssl_protocol = sslproto.SSLProtocol(
|
| 652 |
+
self, protocol, sslcontext, waiter,
|
| 653 |
+
server_side, server_hostname,
|
| 654 |
+
ssl_handshake_timeout=ssl_handshake_timeout)
|
| 655 |
+
_ProactorSocketTransport(self, rawsock, ssl_protocol,
|
| 656 |
+
extra=extra, server=server)
|
| 657 |
+
return ssl_protocol._app_transport
|
| 658 |
+
|
| 659 |
+
def _make_datagram_transport(self, sock, protocol,
|
| 660 |
+
address=None, waiter=None, extra=None):
|
| 661 |
+
return _ProactorDatagramTransport(self, sock, protocol, address,
|
| 662 |
+
waiter, extra)
|
| 663 |
+
|
| 664 |
+
def _make_duplex_pipe_transport(self, sock, protocol, waiter=None,
|
| 665 |
+
extra=None):
|
| 666 |
+
return _ProactorDuplexPipeTransport(self,
|
| 667 |
+
sock, protocol, waiter, extra)
|
| 668 |
+
|
| 669 |
+
def _make_read_pipe_transport(self, sock, protocol, waiter=None,
|
| 670 |
+
extra=None):
|
| 671 |
+
return _ProactorReadPipeTransport(self, sock, protocol, waiter, extra)
|
| 672 |
+
|
| 673 |
+
def _make_write_pipe_transport(self, sock, protocol, waiter=None,
|
| 674 |
+
extra=None):
|
| 675 |
+
# We want connection_lost() to be called when other end closes
|
| 676 |
+
return _ProactorWritePipeTransport(self,
|
| 677 |
+
sock, protocol, waiter, extra)
|
| 678 |
+
|
| 679 |
+
def close(self):
|
| 680 |
+
if self.is_running():
|
| 681 |
+
raise RuntimeError("Cannot close a running event loop")
|
| 682 |
+
if self.is_closed():
|
| 683 |
+
return
|
| 684 |
+
|
| 685 |
+
if threading.current_thread() is threading.main_thread():
|
| 686 |
+
signal.set_wakeup_fd(-1)
|
| 687 |
+
# Call these methods before closing the event loop (before calling
|
| 688 |
+
# BaseEventLoop.close), because they can schedule callbacks with
|
| 689 |
+
# call_soon(), which is forbidden when the event loop is closed.
|
| 690 |
+
self._stop_accept_futures()
|
| 691 |
+
self._close_self_pipe()
|
| 692 |
+
self._proactor.close()
|
| 693 |
+
self._proactor = None
|
| 694 |
+
self._selector = None
|
| 695 |
+
|
| 696 |
+
# Close the event loop
|
| 697 |
+
super().close()
|
| 698 |
+
|
| 699 |
+
async def sock_recv(self, sock, n):
|
| 700 |
+
return await self._proactor.recv(sock, n)
|
| 701 |
+
|
| 702 |
+
async def sock_recv_into(self, sock, buf):
|
| 703 |
+
return await self._proactor.recv_into(sock, buf)
|
| 704 |
+
|
| 705 |
+
async def sock_sendall(self, sock, data):
|
| 706 |
+
return await self._proactor.send(sock, data)
|
| 707 |
+
|
| 708 |
+
async def sock_connect(self, sock, address):
|
| 709 |
+
return await self._proactor.connect(sock, address)
|
| 710 |
+
|
| 711 |
+
async def sock_accept(self, sock):
|
| 712 |
+
return await self._proactor.accept(sock)
|
| 713 |
+
|
| 714 |
+
async def _sock_sendfile_native(self, sock, file, offset, count):
|
| 715 |
+
try:
|
| 716 |
+
fileno = file.fileno()
|
| 717 |
+
except (AttributeError, io.UnsupportedOperation) as err:
|
| 718 |
+
raise exceptions.SendfileNotAvailableError("not a regular file")
|
| 719 |
+
try:
|
| 720 |
+
fsize = os.fstat(fileno).st_size
|
| 721 |
+
except OSError:
|
| 722 |
+
raise exceptions.SendfileNotAvailableError("not a regular file")
|
| 723 |
+
blocksize = count if count else fsize
|
| 724 |
+
if not blocksize:
|
| 725 |
+
return 0 # empty file
|
| 726 |
+
|
| 727 |
+
blocksize = min(blocksize, 0xffff_ffff)
|
| 728 |
+
end_pos = min(offset + count, fsize) if count else fsize
|
| 729 |
+
offset = min(offset, fsize)
|
| 730 |
+
total_sent = 0
|
| 731 |
+
try:
|
| 732 |
+
while True:
|
| 733 |
+
blocksize = min(end_pos - offset, blocksize)
|
| 734 |
+
if blocksize <= 0:
|
| 735 |
+
return total_sent
|
| 736 |
+
await self._proactor.sendfile(sock, file, offset, blocksize)
|
| 737 |
+
offset += blocksize
|
| 738 |
+
total_sent += blocksize
|
| 739 |
+
finally:
|
| 740 |
+
if total_sent > 0:
|
| 741 |
+
file.seek(offset)
|
| 742 |
+
|
| 743 |
+
async def _sendfile_native(self, transp, file, offset, count):
|
| 744 |
+
resume_reading = transp.is_reading()
|
| 745 |
+
transp.pause_reading()
|
| 746 |
+
await transp._make_empty_waiter()
|
| 747 |
+
try:
|
| 748 |
+
return await self.sock_sendfile(transp._sock, file, offset, count,
|
| 749 |
+
fallback=False)
|
| 750 |
+
finally:
|
| 751 |
+
transp._reset_empty_waiter()
|
| 752 |
+
if resume_reading:
|
| 753 |
+
transp.resume_reading()
|
| 754 |
+
|
| 755 |
+
def _close_self_pipe(self):
|
| 756 |
+
if self._self_reading_future is not None:
|
| 757 |
+
self._self_reading_future.cancel()
|
| 758 |
+
self._self_reading_future = None
|
| 759 |
+
self._ssock.close()
|
| 760 |
+
self._ssock = None
|
| 761 |
+
self._csock.close()
|
| 762 |
+
self._csock = None
|
| 763 |
+
self._internal_fds -= 1
|
| 764 |
+
|
| 765 |
+
def _make_self_pipe(self):
|
| 766 |
+
# A self-socket, really. :-)
|
| 767 |
+
self._ssock, self._csock = socket.socketpair()
|
| 768 |
+
self._ssock.setblocking(False)
|
| 769 |
+
self._csock.setblocking(False)
|
| 770 |
+
self._internal_fds += 1
|
| 771 |
+
|
| 772 |
+
def _loop_self_reading(self, f=None):
|
| 773 |
+
try:
|
| 774 |
+
if f is not None:
|
| 775 |
+
f.result() # may raise
|
| 776 |
+
if self._self_reading_future is not f:
|
| 777 |
+
# When we scheduled this Future, we assigned it to
|
| 778 |
+
# _self_reading_future. If it's not there now, something has
|
| 779 |
+
# tried to cancel the loop while this callback was still in the
|
| 780 |
+
# queue (see windows_events.ProactorEventLoop.run_forever). In
|
| 781 |
+
# that case stop here instead of continuing to schedule a new
|
| 782 |
+
# iteration.
|
| 783 |
+
return
|
| 784 |
+
f = self._proactor.recv(self._ssock, 4096)
|
| 785 |
+
except exceptions.CancelledError:
|
| 786 |
+
# _close_self_pipe() has been called, stop waiting for data
|
| 787 |
+
return
|
| 788 |
+
except (SystemExit, KeyboardInterrupt):
|
| 789 |
+
raise
|
| 790 |
+
except BaseException as exc:
|
| 791 |
+
self.call_exception_handler({
|
| 792 |
+
'message': 'Error on reading from the event loop self pipe',
|
| 793 |
+
'exception': exc,
|
| 794 |
+
'loop': self,
|
| 795 |
+
})
|
| 796 |
+
else:
|
| 797 |
+
self._self_reading_future = f
|
| 798 |
+
f.add_done_callback(self._loop_self_reading)
|
| 799 |
+
|
| 800 |
+
def _write_to_self(self):
|
| 801 |
+
# This may be called from a different thread, possibly after
|
| 802 |
+
# _close_self_pipe() has been called or even while it is
|
| 803 |
+
# running. Guard for self._csock being None or closed. When
|
| 804 |
+
# a socket is closed, send() raises OSError (with errno set to
|
| 805 |
+
# EBADF, but let's not rely on the exact error code).
|
| 806 |
+
csock = self._csock
|
| 807 |
+
if csock is None:
|
| 808 |
+
return
|
| 809 |
+
|
| 810 |
+
try:
|
| 811 |
+
csock.send(b'\0')
|
| 812 |
+
except OSError:
|
| 813 |
+
if self._debug:
|
| 814 |
+
logger.debug("Fail to write a null byte into the "
|
| 815 |
+
"self-pipe socket",
|
| 816 |
+
exc_info=True)
|
| 817 |
+
|
| 818 |
+
def _start_serving(self, protocol_factory, sock,
|
| 819 |
+
sslcontext=None, server=None, backlog=100,
|
| 820 |
+
ssl_handshake_timeout=None):
|
| 821 |
+
|
| 822 |
+
def loop(f=None):
|
| 823 |
+
try:
|
| 824 |
+
if f is not None:
|
| 825 |
+
conn, addr = f.result()
|
| 826 |
+
if self._debug:
|
| 827 |
+
logger.debug("%r got a new connection from %r: %r",
|
| 828 |
+
server, addr, conn)
|
| 829 |
+
protocol = protocol_factory()
|
| 830 |
+
if sslcontext is not None:
|
| 831 |
+
self._make_ssl_transport(
|
| 832 |
+
conn, protocol, sslcontext, server_side=True,
|
| 833 |
+
extra={'peername': addr}, server=server,
|
| 834 |
+
ssl_handshake_timeout=ssl_handshake_timeout)
|
| 835 |
+
else:
|
| 836 |
+
self._make_socket_transport(
|
| 837 |
+
conn, protocol,
|
| 838 |
+
extra={'peername': addr}, server=server)
|
| 839 |
+
if self.is_closed():
|
| 840 |
+
return
|
| 841 |
+
f = self._proactor.accept(sock)
|
| 842 |
+
except OSError as exc:
|
| 843 |
+
if sock.fileno() != -1:
|
| 844 |
+
self.call_exception_handler({
|
| 845 |
+
'message': 'Accept failed on a socket',
|
| 846 |
+
'exception': exc,
|
| 847 |
+
'socket': trsock.TransportSocket(sock),
|
| 848 |
+
})
|
| 849 |
+
sock.close()
|
| 850 |
+
elif self._debug:
|
| 851 |
+
logger.debug("Accept failed on socket %r",
|
| 852 |
+
sock, exc_info=True)
|
| 853 |
+
except exceptions.CancelledError:
|
| 854 |
+
sock.close()
|
| 855 |
+
else:
|
| 856 |
+
self._accept_futures[sock.fileno()] = f
|
| 857 |
+
f.add_done_callback(loop)
|
| 858 |
+
|
| 859 |
+
self.call_soon(loop)
|
| 860 |
+
|
| 861 |
+
def _process_events(self, event_list):
|
| 862 |
+
# Events are processed in the IocpProactor._poll() method
|
| 863 |
+
pass
|
| 864 |
+
|
| 865 |
+
def _stop_accept_futures(self):
|
| 866 |
+
for future in self._accept_futures.values():
|
| 867 |
+
future.cancel()
|
| 868 |
+
self._accept_futures.clear()
|
| 869 |
+
|
| 870 |
+
def _stop_serving(self, sock):
|
| 871 |
+
future = self._accept_futures.pop(sock.fileno(), None)
|
| 872 |
+
if future:
|
| 873 |
+
future.cancel()
|
| 874 |
+
self._proactor._stop_serving(sock)
|
| 875 |
+
sock.close()
|
parrot/lib/python3.10/asyncio/queues.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__ = ('Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty')
|
| 2 |
+
|
| 3 |
+
import collections
|
| 4 |
+
import heapq
|
| 5 |
+
from types import GenericAlias
|
| 6 |
+
|
| 7 |
+
from . import locks
|
| 8 |
+
from . import mixins
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class QueueEmpty(Exception):
|
| 12 |
+
"""Raised when Queue.get_nowait() is called on an empty Queue."""
|
| 13 |
+
pass
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class QueueFull(Exception):
|
| 17 |
+
"""Raised when the Queue.put_nowait() method is called on a full Queue."""
|
| 18 |
+
pass
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class Queue(mixins._LoopBoundMixin):
|
| 22 |
+
"""A queue, useful for coordinating producer and consumer coroutines.
|
| 23 |
+
|
| 24 |
+
If maxsize is less than or equal to zero, the queue size is infinite. If it
|
| 25 |
+
is an integer greater than 0, then "await put()" will block when the
|
| 26 |
+
queue reaches maxsize, until an item is removed by get().
|
| 27 |
+
|
| 28 |
+
Unlike the standard library Queue, you can reliably know this Queue's size
|
| 29 |
+
with qsize(), since your single-threaded asyncio application won't be
|
| 30 |
+
interrupted between calling qsize() and doing an operation on the Queue.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def __init__(self, maxsize=0, *, loop=mixins._marker):
|
| 34 |
+
super().__init__(loop=loop)
|
| 35 |
+
self._maxsize = maxsize
|
| 36 |
+
|
| 37 |
+
# Futures.
|
| 38 |
+
self._getters = collections.deque()
|
| 39 |
+
# Futures.
|
| 40 |
+
self._putters = collections.deque()
|
| 41 |
+
self._unfinished_tasks = 0
|
| 42 |
+
self._finished = locks.Event()
|
| 43 |
+
self._finished.set()
|
| 44 |
+
self._init(maxsize)
|
| 45 |
+
|
| 46 |
+
# These three are overridable in subclasses.
|
| 47 |
+
|
| 48 |
+
def _init(self, maxsize):
|
| 49 |
+
self._queue = collections.deque()
|
| 50 |
+
|
| 51 |
+
def _get(self):
|
| 52 |
+
return self._queue.popleft()
|
| 53 |
+
|
| 54 |
+
def _put(self, item):
|
| 55 |
+
self._queue.append(item)
|
| 56 |
+
|
| 57 |
+
# End of the overridable methods.
|
| 58 |
+
|
| 59 |
+
def _wakeup_next(self, waiters):
|
| 60 |
+
# Wake up the next waiter (if any) that isn't cancelled.
|
| 61 |
+
while waiters:
|
| 62 |
+
waiter = waiters.popleft()
|
| 63 |
+
if not waiter.done():
|
| 64 |
+
waiter.set_result(None)
|
| 65 |
+
break
|
| 66 |
+
|
| 67 |
+
def __repr__(self):
|
| 68 |
+
return f'<{type(self).__name__} at {id(self):#x} {self._format()}>'
|
| 69 |
+
|
| 70 |
+
def __str__(self):
|
| 71 |
+
return f'<{type(self).__name__} {self._format()}>'
|
| 72 |
+
|
| 73 |
+
__class_getitem__ = classmethod(GenericAlias)
|
| 74 |
+
|
| 75 |
+
def _format(self):
|
| 76 |
+
result = f'maxsize={self._maxsize!r}'
|
| 77 |
+
if getattr(self, '_queue', None):
|
| 78 |
+
result += f' _queue={list(self._queue)!r}'
|
| 79 |
+
if self._getters:
|
| 80 |
+
result += f' _getters[{len(self._getters)}]'
|
| 81 |
+
if self._putters:
|
| 82 |
+
result += f' _putters[{len(self._putters)}]'
|
| 83 |
+
if self._unfinished_tasks:
|
| 84 |
+
result += f' tasks={self._unfinished_tasks}'
|
| 85 |
+
return result
|
| 86 |
+
|
| 87 |
+
def qsize(self):
|
| 88 |
+
"""Number of items in the queue."""
|
| 89 |
+
return len(self._queue)
|
| 90 |
+
|
| 91 |
+
@property
|
| 92 |
+
def maxsize(self):
|
| 93 |
+
"""Number of items allowed in the queue."""
|
| 94 |
+
return self._maxsize
|
| 95 |
+
|
| 96 |
+
def empty(self):
|
| 97 |
+
"""Return True if the queue is empty, False otherwise."""
|
| 98 |
+
return not self._queue
|
| 99 |
+
|
| 100 |
+
def full(self):
|
| 101 |
+
"""Return True if there are maxsize items in the queue.
|
| 102 |
+
|
| 103 |
+
Note: if the Queue was initialized with maxsize=0 (the default),
|
| 104 |
+
then full() is never True.
|
| 105 |
+
"""
|
| 106 |
+
if self._maxsize <= 0:
|
| 107 |
+
return False
|
| 108 |
+
else:
|
| 109 |
+
return self.qsize() >= self._maxsize
|
| 110 |
+
|
| 111 |
+
async def put(self, item):
|
| 112 |
+
"""Put an item into the queue.
|
| 113 |
+
|
| 114 |
+
Put an item into the queue. If the queue is full, wait until a free
|
| 115 |
+
slot is available before adding item.
|
| 116 |
+
"""
|
| 117 |
+
while self.full():
|
| 118 |
+
putter = self._get_loop().create_future()
|
| 119 |
+
self._putters.append(putter)
|
| 120 |
+
try:
|
| 121 |
+
await putter
|
| 122 |
+
except:
|
| 123 |
+
putter.cancel() # Just in case putter is not done yet.
|
| 124 |
+
try:
|
| 125 |
+
# Clean self._putters from canceled putters.
|
| 126 |
+
self._putters.remove(putter)
|
| 127 |
+
except ValueError:
|
| 128 |
+
# The putter could be removed from self._putters by a
|
| 129 |
+
# previous get_nowait call.
|
| 130 |
+
pass
|
| 131 |
+
if not self.full() and not putter.cancelled():
|
| 132 |
+
# We were woken up by get_nowait(), but can't take
|
| 133 |
+
# the call. Wake up the next in line.
|
| 134 |
+
self._wakeup_next(self._putters)
|
| 135 |
+
raise
|
| 136 |
+
return self.put_nowait(item)
|
| 137 |
+
|
| 138 |
+
def put_nowait(self, item):
|
| 139 |
+
"""Put an item into the queue without blocking.
|
| 140 |
+
|
| 141 |
+
If no free slot is immediately available, raise QueueFull.
|
| 142 |
+
"""
|
| 143 |
+
if self.full():
|
| 144 |
+
raise QueueFull
|
| 145 |
+
self._put(item)
|
| 146 |
+
self._unfinished_tasks += 1
|
| 147 |
+
self._finished.clear()
|
| 148 |
+
self._wakeup_next(self._getters)
|
| 149 |
+
|
| 150 |
+
async def get(self):
|
| 151 |
+
"""Remove and return an item from the queue.
|
| 152 |
+
|
| 153 |
+
If queue is empty, wait until an item is available.
|
| 154 |
+
"""
|
| 155 |
+
while self.empty():
|
| 156 |
+
getter = self._get_loop().create_future()
|
| 157 |
+
self._getters.append(getter)
|
| 158 |
+
try:
|
| 159 |
+
await getter
|
| 160 |
+
except:
|
| 161 |
+
getter.cancel() # Just in case getter is not done yet.
|
| 162 |
+
try:
|
| 163 |
+
# Clean self._getters from canceled getters.
|
| 164 |
+
self._getters.remove(getter)
|
| 165 |
+
except ValueError:
|
| 166 |
+
# The getter could be removed from self._getters by a
|
| 167 |
+
# previous put_nowait call.
|
| 168 |
+
pass
|
| 169 |
+
if not self.empty() and not getter.cancelled():
|
| 170 |
+
# We were woken up by put_nowait(), but can't take
|
| 171 |
+
# the call. Wake up the next in line.
|
| 172 |
+
self._wakeup_next(self._getters)
|
| 173 |
+
raise
|
| 174 |
+
return self.get_nowait()
|
| 175 |
+
|
| 176 |
+
def get_nowait(self):
|
| 177 |
+
"""Remove and return an item from the queue.
|
| 178 |
+
|
| 179 |
+
Return an item if one is immediately available, else raise QueueEmpty.
|
| 180 |
+
"""
|
| 181 |
+
if self.empty():
|
| 182 |
+
raise QueueEmpty
|
| 183 |
+
item = self._get()
|
| 184 |
+
self._wakeup_next(self._putters)
|
| 185 |
+
return item
|
| 186 |
+
|
| 187 |
+
def task_done(self):
|
| 188 |
+
"""Indicate that a formerly enqueued task is complete.
|
| 189 |
+
|
| 190 |
+
Used by queue consumers. For each get() used to fetch a task,
|
| 191 |
+
a subsequent call to task_done() tells the queue that the processing
|
| 192 |
+
on the task is complete.
|
| 193 |
+
|
| 194 |
+
If a join() is currently blocking, it will resume when all items have
|
| 195 |
+
been processed (meaning that a task_done() call was received for every
|
| 196 |
+
item that had been put() into the queue).
|
| 197 |
+
|
| 198 |
+
Raises ValueError if called more times than there were items placed in
|
| 199 |
+
the queue.
|
| 200 |
+
"""
|
| 201 |
+
if self._unfinished_tasks <= 0:
|
| 202 |
+
raise ValueError('task_done() called too many times')
|
| 203 |
+
self._unfinished_tasks -= 1
|
| 204 |
+
if self._unfinished_tasks == 0:
|
| 205 |
+
self._finished.set()
|
| 206 |
+
|
| 207 |
+
async def join(self):
|
| 208 |
+
"""Block until all items in the queue have been gotten and processed.
|
| 209 |
+
|
| 210 |
+
The count of unfinished tasks goes up whenever an item is added to the
|
| 211 |
+
queue. The count goes down whenever a consumer calls task_done() to
|
| 212 |
+
indicate that the item was retrieved and all work on it is complete.
|
| 213 |
+
When the count of unfinished tasks drops to zero, join() unblocks.
|
| 214 |
+
"""
|
| 215 |
+
if self._unfinished_tasks > 0:
|
| 216 |
+
await self._finished.wait()
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
class PriorityQueue(Queue):
|
| 220 |
+
"""A subclass of Queue; retrieves entries in priority order (lowest first).
|
| 221 |
+
|
| 222 |
+
Entries are typically tuples of the form: (priority number, data).
|
| 223 |
+
"""
|
| 224 |
+
|
| 225 |
+
def _init(self, maxsize):
|
| 226 |
+
self._queue = []
|
| 227 |
+
|
| 228 |
+
def _put(self, item, heappush=heapq.heappush):
|
| 229 |
+
heappush(self._queue, item)
|
| 230 |
+
|
| 231 |
+
def _get(self, heappop=heapq.heappop):
|
| 232 |
+
return heappop(self._queue)
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
class LifoQueue(Queue):
|
| 236 |
+
"""A subclass of Queue that retrieves most recently added entries first."""
|
| 237 |
+
|
| 238 |
+
def _init(self, maxsize):
|
| 239 |
+
self._queue = []
|
| 240 |
+
|
| 241 |
+
def _put(self, item):
|
| 242 |
+
self._queue.append(item)
|
| 243 |
+
|
| 244 |
+
def _get(self):
|
| 245 |
+
return self._queue.pop()
|
parrot/lib/python3.10/asyncio/sslproto.py
ADDED
|
@@ -0,0 +1,739 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import warnings
|
| 3 |
+
try:
|
| 4 |
+
import ssl
|
| 5 |
+
except ImportError: # pragma: no cover
|
| 6 |
+
ssl = None
|
| 7 |
+
|
| 8 |
+
from . import constants
|
| 9 |
+
from . import protocols
|
| 10 |
+
from . import transports
|
| 11 |
+
from .log import logger
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _create_transport_context(server_side, server_hostname):
|
| 15 |
+
if server_side:
|
| 16 |
+
raise ValueError('Server side SSL needs a valid SSLContext')
|
| 17 |
+
|
| 18 |
+
# Client side may pass ssl=True to use a default
|
| 19 |
+
# context; in that case the sslcontext passed is None.
|
| 20 |
+
# The default is secure for client connections.
|
| 21 |
+
# Python 3.4+: use up-to-date strong settings.
|
| 22 |
+
sslcontext = ssl.create_default_context()
|
| 23 |
+
if not server_hostname:
|
| 24 |
+
sslcontext.check_hostname = False
|
| 25 |
+
return sslcontext
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# States of an _SSLPipe.
|
| 29 |
+
_UNWRAPPED = "UNWRAPPED"
|
| 30 |
+
_DO_HANDSHAKE = "DO_HANDSHAKE"
|
| 31 |
+
_WRAPPED = "WRAPPED"
|
| 32 |
+
_SHUTDOWN = "SHUTDOWN"
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class _SSLPipe(object):
|
| 36 |
+
"""An SSL "Pipe".
|
| 37 |
+
|
| 38 |
+
An SSL pipe allows you to communicate with an SSL/TLS protocol instance
|
| 39 |
+
through memory buffers. It can be used to implement a security layer for an
|
| 40 |
+
existing connection where you don't have access to the connection's file
|
| 41 |
+
descriptor, or for some reason you don't want to use it.
|
| 42 |
+
|
| 43 |
+
An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode,
|
| 44 |
+
data is passed through untransformed. In wrapped mode, application level
|
| 45 |
+
data is encrypted to SSL record level data and vice versa. The SSL record
|
| 46 |
+
level is the lowest level in the SSL protocol suite and is what travels
|
| 47 |
+
as-is over the wire.
|
| 48 |
+
|
| 49 |
+
An SslPipe initially is in "unwrapped" mode. To start SSL, call
|
| 50 |
+
do_handshake(). To shutdown SSL again, call unwrap().
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
max_size = 256 * 1024 # Buffer size passed to read()
|
| 54 |
+
|
| 55 |
+
def __init__(self, context, server_side, server_hostname=None):
|
| 56 |
+
"""
|
| 57 |
+
The *context* argument specifies the ssl.SSLContext to use.
|
| 58 |
+
|
| 59 |
+
The *server_side* argument indicates whether this is a server side or
|
| 60 |
+
client side transport.
|
| 61 |
+
|
| 62 |
+
The optional *server_hostname* argument can be used to specify the
|
| 63 |
+
hostname you are connecting to. You may only specify this parameter if
|
| 64 |
+
the _ssl module supports Server Name Indication (SNI).
|
| 65 |
+
"""
|
| 66 |
+
self._context = context
|
| 67 |
+
self._server_side = server_side
|
| 68 |
+
self._server_hostname = server_hostname
|
| 69 |
+
self._state = _UNWRAPPED
|
| 70 |
+
self._incoming = ssl.MemoryBIO()
|
| 71 |
+
self._outgoing = ssl.MemoryBIO()
|
| 72 |
+
self._sslobj = None
|
| 73 |
+
self._need_ssldata = False
|
| 74 |
+
self._handshake_cb = None
|
| 75 |
+
self._shutdown_cb = None
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
def context(self):
|
| 79 |
+
"""The SSL context passed to the constructor."""
|
| 80 |
+
return self._context
|
| 81 |
+
|
| 82 |
+
@property
|
| 83 |
+
def ssl_object(self):
|
| 84 |
+
"""The internal ssl.SSLObject instance.
|
| 85 |
+
|
| 86 |
+
Return None if the pipe is not wrapped.
|
| 87 |
+
"""
|
| 88 |
+
return self._sslobj
|
| 89 |
+
|
| 90 |
+
@property
|
| 91 |
+
def need_ssldata(self):
|
| 92 |
+
"""Whether more record level data is needed to complete a handshake
|
| 93 |
+
that is currently in progress."""
|
| 94 |
+
return self._need_ssldata
|
| 95 |
+
|
| 96 |
+
@property
|
| 97 |
+
def wrapped(self):
|
| 98 |
+
"""
|
| 99 |
+
Whether a security layer is currently in effect.
|
| 100 |
+
|
| 101 |
+
Return False during handshake.
|
| 102 |
+
"""
|
| 103 |
+
return self._state == _WRAPPED
|
| 104 |
+
|
| 105 |
+
def do_handshake(self, callback=None):
|
| 106 |
+
"""Start the SSL handshake.
|
| 107 |
+
|
| 108 |
+
Return a list of ssldata. A ssldata element is a list of buffers
|
| 109 |
+
|
| 110 |
+
The optional *callback* argument can be used to install a callback that
|
| 111 |
+
will be called when the handshake is complete. The callback will be
|
| 112 |
+
called with None if successful, else an exception instance.
|
| 113 |
+
"""
|
| 114 |
+
if self._state != _UNWRAPPED:
|
| 115 |
+
raise RuntimeError('handshake in progress or completed')
|
| 116 |
+
self._sslobj = self._context.wrap_bio(
|
| 117 |
+
self._incoming, self._outgoing,
|
| 118 |
+
server_side=self._server_side,
|
| 119 |
+
server_hostname=self._server_hostname)
|
| 120 |
+
self._state = _DO_HANDSHAKE
|
| 121 |
+
self._handshake_cb = callback
|
| 122 |
+
ssldata, appdata = self.feed_ssldata(b'', only_handshake=True)
|
| 123 |
+
assert len(appdata) == 0
|
| 124 |
+
return ssldata
|
| 125 |
+
|
| 126 |
+
def shutdown(self, callback=None):
|
| 127 |
+
"""Start the SSL shutdown sequence.
|
| 128 |
+
|
| 129 |
+
Return a list of ssldata. A ssldata element is a list of buffers
|
| 130 |
+
|
| 131 |
+
The optional *callback* argument can be used to install a callback that
|
| 132 |
+
will be called when the shutdown is complete. The callback will be
|
| 133 |
+
called without arguments.
|
| 134 |
+
"""
|
| 135 |
+
if self._state == _UNWRAPPED:
|
| 136 |
+
raise RuntimeError('no security layer present')
|
| 137 |
+
if self._state == _SHUTDOWN:
|
| 138 |
+
raise RuntimeError('shutdown in progress')
|
| 139 |
+
assert self._state in (_WRAPPED, _DO_HANDSHAKE)
|
| 140 |
+
self._state = _SHUTDOWN
|
| 141 |
+
self._shutdown_cb = callback
|
| 142 |
+
ssldata, appdata = self.feed_ssldata(b'')
|
| 143 |
+
assert appdata == [] or appdata == [b'']
|
| 144 |
+
return ssldata
|
| 145 |
+
|
| 146 |
+
def feed_eof(self):
|
| 147 |
+
"""Send a potentially "ragged" EOF.
|
| 148 |
+
|
| 149 |
+
This method will raise an SSL_ERROR_EOF exception if the EOF is
|
| 150 |
+
unexpected.
|
| 151 |
+
"""
|
| 152 |
+
self._incoming.write_eof()
|
| 153 |
+
ssldata, appdata = self.feed_ssldata(b'')
|
| 154 |
+
assert appdata == [] or appdata == [b'']
|
| 155 |
+
|
| 156 |
+
def feed_ssldata(self, data, only_handshake=False):
|
| 157 |
+
"""Feed SSL record level data into the pipe.
|
| 158 |
+
|
| 159 |
+
The data must be a bytes instance. It is OK to send an empty bytes
|
| 160 |
+
instance. This can be used to get ssldata for a handshake initiated by
|
| 161 |
+
this endpoint.
|
| 162 |
+
|
| 163 |
+
Return a (ssldata, appdata) tuple. The ssldata element is a list of
|
| 164 |
+
buffers containing SSL data that needs to be sent to the remote SSL.
|
| 165 |
+
|
| 166 |
+
The appdata element is a list of buffers containing plaintext data that
|
| 167 |
+
needs to be forwarded to the application. The appdata list may contain
|
| 168 |
+
an empty buffer indicating an SSL "close_notify" alert. This alert must
|
| 169 |
+
be acknowledged by calling shutdown().
|
| 170 |
+
"""
|
| 171 |
+
if self._state == _UNWRAPPED:
|
| 172 |
+
# If unwrapped, pass plaintext data straight through.
|
| 173 |
+
if data:
|
| 174 |
+
appdata = [data]
|
| 175 |
+
else:
|
| 176 |
+
appdata = []
|
| 177 |
+
return ([], appdata)
|
| 178 |
+
|
| 179 |
+
self._need_ssldata = False
|
| 180 |
+
if data:
|
| 181 |
+
self._incoming.write(data)
|
| 182 |
+
|
| 183 |
+
ssldata = []
|
| 184 |
+
appdata = []
|
| 185 |
+
try:
|
| 186 |
+
if self._state == _DO_HANDSHAKE:
|
| 187 |
+
# Call do_handshake() until it doesn't raise anymore.
|
| 188 |
+
self._sslobj.do_handshake()
|
| 189 |
+
self._state = _WRAPPED
|
| 190 |
+
if self._handshake_cb:
|
| 191 |
+
self._handshake_cb(None)
|
| 192 |
+
if only_handshake:
|
| 193 |
+
return (ssldata, appdata)
|
| 194 |
+
# Handshake done: execute the wrapped block
|
| 195 |
+
|
| 196 |
+
if self._state == _WRAPPED:
|
| 197 |
+
# Main state: read data from SSL until close_notify
|
| 198 |
+
while True:
|
| 199 |
+
chunk = self._sslobj.read(self.max_size)
|
| 200 |
+
appdata.append(chunk)
|
| 201 |
+
if not chunk: # close_notify
|
| 202 |
+
break
|
| 203 |
+
|
| 204 |
+
elif self._state == _SHUTDOWN:
|
| 205 |
+
# Call shutdown() until it doesn't raise anymore.
|
| 206 |
+
self._sslobj.unwrap()
|
| 207 |
+
self._sslobj = None
|
| 208 |
+
self._state = _UNWRAPPED
|
| 209 |
+
if self._shutdown_cb:
|
| 210 |
+
self._shutdown_cb()
|
| 211 |
+
|
| 212 |
+
elif self._state == _UNWRAPPED:
|
| 213 |
+
# Drain possible plaintext data after close_notify.
|
| 214 |
+
appdata.append(self._incoming.read())
|
| 215 |
+
except (ssl.SSLError, ssl.CertificateError) as exc:
|
| 216 |
+
exc_errno = getattr(exc, 'errno', None)
|
| 217 |
+
if exc_errno not in (
|
| 218 |
+
ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE,
|
| 219 |
+
ssl.SSL_ERROR_SYSCALL):
|
| 220 |
+
if self._state == _DO_HANDSHAKE and self._handshake_cb:
|
| 221 |
+
self._handshake_cb(exc)
|
| 222 |
+
raise
|
| 223 |
+
self._need_ssldata = (exc_errno == ssl.SSL_ERROR_WANT_READ)
|
| 224 |
+
|
| 225 |
+
# Check for record level data that needs to be sent back.
|
| 226 |
+
# Happens for the initial handshake and renegotiations.
|
| 227 |
+
if self._outgoing.pending:
|
| 228 |
+
ssldata.append(self._outgoing.read())
|
| 229 |
+
return (ssldata, appdata)
|
| 230 |
+
|
| 231 |
+
def feed_appdata(self, data, offset=0):
|
| 232 |
+
"""Feed plaintext data into the pipe.
|
| 233 |
+
|
| 234 |
+
Return an (ssldata, offset) tuple. The ssldata element is a list of
|
| 235 |
+
buffers containing record level data that needs to be sent to the
|
| 236 |
+
remote SSL instance. The offset is the number of plaintext bytes that
|
| 237 |
+
were processed, which may be less than the length of data.
|
| 238 |
+
|
| 239 |
+
NOTE: In case of short writes, this call MUST be retried with the SAME
|
| 240 |
+
buffer passed into the *data* argument (i.e. the id() must be the
|
| 241 |
+
same). This is an OpenSSL requirement. A further particularity is that
|
| 242 |
+
a short write will always have offset == 0, because the _ssl module
|
| 243 |
+
does not enable partial writes. And even though the offset is zero,
|
| 244 |
+
there will still be encrypted data in ssldata.
|
| 245 |
+
"""
|
| 246 |
+
assert 0 <= offset <= len(data)
|
| 247 |
+
if self._state == _UNWRAPPED:
|
| 248 |
+
# pass through data in unwrapped mode
|
| 249 |
+
if offset < len(data):
|
| 250 |
+
ssldata = [data[offset:]]
|
| 251 |
+
else:
|
| 252 |
+
ssldata = []
|
| 253 |
+
return (ssldata, len(data))
|
| 254 |
+
|
| 255 |
+
ssldata = []
|
| 256 |
+
view = memoryview(data)
|
| 257 |
+
while True:
|
| 258 |
+
self._need_ssldata = False
|
| 259 |
+
try:
|
| 260 |
+
if offset < len(view):
|
| 261 |
+
offset += self._sslobj.write(view[offset:])
|
| 262 |
+
except ssl.SSLError as exc:
|
| 263 |
+
# It is not allowed to call write() after unwrap() until the
|
| 264 |
+
# close_notify is acknowledged. We return the condition to the
|
| 265 |
+
# caller as a short write.
|
| 266 |
+
exc_errno = getattr(exc, 'errno', None)
|
| 267 |
+
if exc.reason == 'PROTOCOL_IS_SHUTDOWN':
|
| 268 |
+
exc_errno = exc.errno = ssl.SSL_ERROR_WANT_READ
|
| 269 |
+
if exc_errno not in (ssl.SSL_ERROR_WANT_READ,
|
| 270 |
+
ssl.SSL_ERROR_WANT_WRITE,
|
| 271 |
+
ssl.SSL_ERROR_SYSCALL):
|
| 272 |
+
raise
|
| 273 |
+
self._need_ssldata = (exc_errno == ssl.SSL_ERROR_WANT_READ)
|
| 274 |
+
|
| 275 |
+
# See if there's any record level data back for us.
|
| 276 |
+
if self._outgoing.pending:
|
| 277 |
+
ssldata.append(self._outgoing.read())
|
| 278 |
+
if offset == len(view) or self._need_ssldata:
|
| 279 |
+
break
|
| 280 |
+
return (ssldata, offset)
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class _SSLProtocolTransport(transports._FlowControlMixin,
|
| 284 |
+
transports.Transport):
|
| 285 |
+
|
| 286 |
+
_sendfile_compatible = constants._SendfileMode.FALLBACK
|
| 287 |
+
|
| 288 |
+
def __init__(self, loop, ssl_protocol):
|
| 289 |
+
self._loop = loop
|
| 290 |
+
# SSLProtocol instance
|
| 291 |
+
self._ssl_protocol = ssl_protocol
|
| 292 |
+
self._closed = False
|
| 293 |
+
|
| 294 |
+
def get_extra_info(self, name, default=None):
|
| 295 |
+
"""Get optional transport information."""
|
| 296 |
+
return self._ssl_protocol._get_extra_info(name, default)
|
| 297 |
+
|
| 298 |
+
def set_protocol(self, protocol):
|
| 299 |
+
self._ssl_protocol._set_app_protocol(protocol)
|
| 300 |
+
|
| 301 |
+
def get_protocol(self):
|
| 302 |
+
return self._ssl_protocol._app_protocol
|
| 303 |
+
|
| 304 |
+
def is_closing(self):
|
| 305 |
+
return self._closed
|
| 306 |
+
|
| 307 |
+
def close(self):
|
| 308 |
+
"""Close the transport.
|
| 309 |
+
|
| 310 |
+
Buffered data will be flushed asynchronously. No more data
|
| 311 |
+
will be received. After all buffered data is flushed, the
|
| 312 |
+
protocol's connection_lost() method will (eventually) called
|
| 313 |
+
with None as its argument.
|
| 314 |
+
"""
|
| 315 |
+
self._closed = True
|
| 316 |
+
self._ssl_protocol._start_shutdown()
|
| 317 |
+
|
| 318 |
+
def __del__(self, _warn=warnings.warn):
|
| 319 |
+
if not self._closed:
|
| 320 |
+
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
|
| 321 |
+
self.close()
|
| 322 |
+
|
| 323 |
+
def is_reading(self):
|
| 324 |
+
tr = self._ssl_protocol._transport
|
| 325 |
+
if tr is None:
|
| 326 |
+
raise RuntimeError('SSL transport has not been initialized yet')
|
| 327 |
+
return tr.is_reading()
|
| 328 |
+
|
| 329 |
+
def pause_reading(self):
|
| 330 |
+
"""Pause the receiving end.
|
| 331 |
+
|
| 332 |
+
No data will be passed to the protocol's data_received()
|
| 333 |
+
method until resume_reading() is called.
|
| 334 |
+
"""
|
| 335 |
+
self._ssl_protocol._transport.pause_reading()
|
| 336 |
+
|
| 337 |
+
def resume_reading(self):
|
| 338 |
+
"""Resume the receiving end.
|
| 339 |
+
|
| 340 |
+
Data received will once again be passed to the protocol's
|
| 341 |
+
data_received() method.
|
| 342 |
+
"""
|
| 343 |
+
self._ssl_protocol._transport.resume_reading()
|
| 344 |
+
|
| 345 |
+
def set_write_buffer_limits(self, high=None, low=None):
|
| 346 |
+
"""Set the high- and low-water limits for write flow control.
|
| 347 |
+
|
| 348 |
+
These two values control when to call the protocol's
|
| 349 |
+
pause_writing() and resume_writing() methods. If specified,
|
| 350 |
+
the low-water limit must be less than or equal to the
|
| 351 |
+
high-water limit. Neither value can be negative.
|
| 352 |
+
|
| 353 |
+
The defaults are implementation-specific. If only the
|
| 354 |
+
high-water limit is given, the low-water limit defaults to an
|
| 355 |
+
implementation-specific value less than or equal to the
|
| 356 |
+
high-water limit. Setting high to zero forces low to zero as
|
| 357 |
+
well, and causes pause_writing() to be called whenever the
|
| 358 |
+
buffer becomes non-empty. Setting low to zero causes
|
| 359 |
+
resume_writing() to be called only once the buffer is empty.
|
| 360 |
+
Use of zero for either limit is generally sub-optimal as it
|
| 361 |
+
reduces opportunities for doing I/O and computation
|
| 362 |
+
concurrently.
|
| 363 |
+
"""
|
| 364 |
+
self._ssl_protocol._transport.set_write_buffer_limits(high, low)
|
| 365 |
+
|
| 366 |
+
def get_write_buffer_size(self):
|
| 367 |
+
"""Return the current size of the write buffer."""
|
| 368 |
+
return self._ssl_protocol._transport.get_write_buffer_size()
|
| 369 |
+
|
| 370 |
+
def get_write_buffer_limits(self):
|
| 371 |
+
"""Get the high and low watermarks for write flow control.
|
| 372 |
+
Return a tuple (low, high) where low and high are
|
| 373 |
+
positive number of bytes."""
|
| 374 |
+
return self._ssl_protocol._transport.get_write_buffer_limits()
|
| 375 |
+
|
| 376 |
+
@property
|
| 377 |
+
def _protocol_paused(self):
|
| 378 |
+
# Required for sendfile fallback pause_writing/resume_writing logic
|
| 379 |
+
return self._ssl_protocol._transport._protocol_paused
|
| 380 |
+
|
| 381 |
+
def write(self, data):
|
| 382 |
+
"""Write some data bytes to the transport.
|
| 383 |
+
|
| 384 |
+
This does not block; it buffers the data and arranges for it
|
| 385 |
+
to be sent out asynchronously.
|
| 386 |
+
"""
|
| 387 |
+
if not isinstance(data, (bytes, bytearray, memoryview)):
|
| 388 |
+
raise TypeError(f"data: expecting a bytes-like instance, "
|
| 389 |
+
f"got {type(data).__name__}")
|
| 390 |
+
if not data:
|
| 391 |
+
return
|
| 392 |
+
self._ssl_protocol._write_appdata(data)
|
| 393 |
+
|
| 394 |
+
def can_write_eof(self):
|
| 395 |
+
"""Return True if this transport supports write_eof(), False if not."""
|
| 396 |
+
return False
|
| 397 |
+
|
| 398 |
+
def abort(self):
|
| 399 |
+
"""Close the transport immediately.
|
| 400 |
+
|
| 401 |
+
Buffered data will be lost. No more data will be received.
|
| 402 |
+
The protocol's connection_lost() method will (eventually) be
|
| 403 |
+
called with None as its argument.
|
| 404 |
+
"""
|
| 405 |
+
self._ssl_protocol._abort()
|
| 406 |
+
self._closed = True
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
class SSLProtocol(protocols.Protocol):
|
| 410 |
+
"""SSL protocol.
|
| 411 |
+
|
| 412 |
+
Implementation of SSL on top of a socket using incoming and outgoing
|
| 413 |
+
buffers which are ssl.MemoryBIO objects.
|
| 414 |
+
"""
|
| 415 |
+
|
| 416 |
+
def __init__(self, loop, app_protocol, sslcontext, waiter,
|
| 417 |
+
server_side=False, server_hostname=None,
|
| 418 |
+
call_connection_made=True,
|
| 419 |
+
ssl_handshake_timeout=None):
|
| 420 |
+
if ssl is None:
|
| 421 |
+
raise RuntimeError('stdlib ssl module not available')
|
| 422 |
+
|
| 423 |
+
if ssl_handshake_timeout is None:
|
| 424 |
+
ssl_handshake_timeout = constants.SSL_HANDSHAKE_TIMEOUT
|
| 425 |
+
elif ssl_handshake_timeout <= 0:
|
| 426 |
+
raise ValueError(
|
| 427 |
+
f"ssl_handshake_timeout should be a positive number, "
|
| 428 |
+
f"got {ssl_handshake_timeout}")
|
| 429 |
+
|
| 430 |
+
if not sslcontext:
|
| 431 |
+
sslcontext = _create_transport_context(
|
| 432 |
+
server_side, server_hostname)
|
| 433 |
+
|
| 434 |
+
self._server_side = server_side
|
| 435 |
+
if server_hostname and not server_side:
|
| 436 |
+
self._server_hostname = server_hostname
|
| 437 |
+
else:
|
| 438 |
+
self._server_hostname = None
|
| 439 |
+
self._sslcontext = sslcontext
|
| 440 |
+
# SSL-specific extra info. More info are set when the handshake
|
| 441 |
+
# completes.
|
| 442 |
+
self._extra = dict(sslcontext=sslcontext)
|
| 443 |
+
|
| 444 |
+
# App data write buffering
|
| 445 |
+
self._write_backlog = collections.deque()
|
| 446 |
+
self._write_buffer_size = 0
|
| 447 |
+
|
| 448 |
+
self._waiter = waiter
|
| 449 |
+
self._loop = loop
|
| 450 |
+
self._set_app_protocol(app_protocol)
|
| 451 |
+
self._app_transport = _SSLProtocolTransport(self._loop, self)
|
| 452 |
+
# _SSLPipe instance (None until the connection is made)
|
| 453 |
+
self._sslpipe = None
|
| 454 |
+
self._session_established = False
|
| 455 |
+
self._in_handshake = False
|
| 456 |
+
self._in_shutdown = False
|
| 457 |
+
# transport, ex: SelectorSocketTransport
|
| 458 |
+
self._transport = None
|
| 459 |
+
self._call_connection_made = call_connection_made
|
| 460 |
+
self._ssl_handshake_timeout = ssl_handshake_timeout
|
| 461 |
+
|
| 462 |
+
def _set_app_protocol(self, app_protocol):
|
| 463 |
+
self._app_protocol = app_protocol
|
| 464 |
+
self._app_protocol_is_buffer = \
|
| 465 |
+
isinstance(app_protocol, protocols.BufferedProtocol)
|
| 466 |
+
|
| 467 |
+
def _wakeup_waiter(self, exc=None):
|
| 468 |
+
if self._waiter is None:
|
| 469 |
+
return
|
| 470 |
+
if not self._waiter.cancelled():
|
| 471 |
+
if exc is not None:
|
| 472 |
+
self._waiter.set_exception(exc)
|
| 473 |
+
else:
|
| 474 |
+
self._waiter.set_result(None)
|
| 475 |
+
self._waiter = None
|
| 476 |
+
|
| 477 |
+
def connection_made(self, transport):
|
| 478 |
+
"""Called when the low-level connection is made.
|
| 479 |
+
|
| 480 |
+
Start the SSL handshake.
|
| 481 |
+
"""
|
| 482 |
+
self._transport = transport
|
| 483 |
+
self._sslpipe = _SSLPipe(self._sslcontext,
|
| 484 |
+
self._server_side,
|
| 485 |
+
self._server_hostname)
|
| 486 |
+
self._start_handshake()
|
| 487 |
+
|
| 488 |
+
def connection_lost(self, exc):
|
| 489 |
+
"""Called when the low-level connection is lost or closed.
|
| 490 |
+
|
| 491 |
+
The argument is an exception object or None (the latter
|
| 492 |
+
meaning a regular EOF is received or the connection was
|
| 493 |
+
aborted or closed).
|
| 494 |
+
"""
|
| 495 |
+
if self._session_established:
|
| 496 |
+
self._session_established = False
|
| 497 |
+
self._loop.call_soon(self._app_protocol.connection_lost, exc)
|
| 498 |
+
else:
|
| 499 |
+
# Most likely an exception occurred while in SSL handshake.
|
| 500 |
+
# Just mark the app transport as closed so that its __del__
|
| 501 |
+
# doesn't complain.
|
| 502 |
+
if self._app_transport is not None:
|
| 503 |
+
self._app_transport._closed = True
|
| 504 |
+
self._transport = None
|
| 505 |
+
self._app_transport = None
|
| 506 |
+
if getattr(self, '_handshake_timeout_handle', None):
|
| 507 |
+
self._handshake_timeout_handle.cancel()
|
| 508 |
+
self._wakeup_waiter(exc)
|
| 509 |
+
self._app_protocol = None
|
| 510 |
+
self._sslpipe = None
|
| 511 |
+
|
| 512 |
+
def pause_writing(self):
|
| 513 |
+
"""Called when the low-level transport's buffer goes over
|
| 514 |
+
the high-water mark.
|
| 515 |
+
"""
|
| 516 |
+
self._app_protocol.pause_writing()
|
| 517 |
+
|
| 518 |
+
def resume_writing(self):
|
| 519 |
+
"""Called when the low-level transport's buffer drains below
|
| 520 |
+
the low-water mark.
|
| 521 |
+
"""
|
| 522 |
+
self._app_protocol.resume_writing()
|
| 523 |
+
|
| 524 |
+
def data_received(self, data):
|
| 525 |
+
"""Called when some SSL data is received.
|
| 526 |
+
|
| 527 |
+
The argument is a bytes object.
|
| 528 |
+
"""
|
| 529 |
+
if self._sslpipe is None:
|
| 530 |
+
# transport closing, sslpipe is destroyed
|
| 531 |
+
return
|
| 532 |
+
|
| 533 |
+
try:
|
| 534 |
+
ssldata, appdata = self._sslpipe.feed_ssldata(data)
|
| 535 |
+
except (SystemExit, KeyboardInterrupt):
|
| 536 |
+
raise
|
| 537 |
+
except BaseException as e:
|
| 538 |
+
self._fatal_error(e, 'SSL error in data received')
|
| 539 |
+
return
|
| 540 |
+
|
| 541 |
+
for chunk in ssldata:
|
| 542 |
+
self._transport.write(chunk)
|
| 543 |
+
|
| 544 |
+
for chunk in appdata:
|
| 545 |
+
if chunk:
|
| 546 |
+
try:
|
| 547 |
+
if self._app_protocol_is_buffer:
|
| 548 |
+
protocols._feed_data_to_buffered_proto(
|
| 549 |
+
self._app_protocol, chunk)
|
| 550 |
+
else:
|
| 551 |
+
self._app_protocol.data_received(chunk)
|
| 552 |
+
except (SystemExit, KeyboardInterrupt):
|
| 553 |
+
raise
|
| 554 |
+
except BaseException as ex:
|
| 555 |
+
self._fatal_error(
|
| 556 |
+
ex, 'application protocol failed to receive SSL data')
|
| 557 |
+
return
|
| 558 |
+
else:
|
| 559 |
+
self._start_shutdown()
|
| 560 |
+
break
|
| 561 |
+
|
| 562 |
+
def eof_received(self):
|
| 563 |
+
"""Called when the other end of the low-level stream
|
| 564 |
+
is half-closed.
|
| 565 |
+
|
| 566 |
+
If this returns a false value (including None), the transport
|
| 567 |
+
will close itself. If it returns a true value, closing the
|
| 568 |
+
transport is up to the protocol.
|
| 569 |
+
"""
|
| 570 |
+
try:
|
| 571 |
+
if self._loop.get_debug():
|
| 572 |
+
logger.debug("%r received EOF", self)
|
| 573 |
+
|
| 574 |
+
self._wakeup_waiter(ConnectionResetError)
|
| 575 |
+
|
| 576 |
+
if not self._in_handshake:
|
| 577 |
+
keep_open = self._app_protocol.eof_received()
|
| 578 |
+
if keep_open:
|
| 579 |
+
logger.warning('returning true from eof_received() '
|
| 580 |
+
'has no effect when using ssl')
|
| 581 |
+
finally:
|
| 582 |
+
self._transport.close()
|
| 583 |
+
|
| 584 |
+
def _get_extra_info(self, name, default=None):
|
| 585 |
+
if name in self._extra:
|
| 586 |
+
return self._extra[name]
|
| 587 |
+
elif self._transport is not None:
|
| 588 |
+
return self._transport.get_extra_info(name, default)
|
| 589 |
+
else:
|
| 590 |
+
return default
|
| 591 |
+
|
| 592 |
+
def _start_shutdown(self):
|
| 593 |
+
if self._in_shutdown:
|
| 594 |
+
return
|
| 595 |
+
if self._in_handshake:
|
| 596 |
+
self._abort()
|
| 597 |
+
else:
|
| 598 |
+
self._in_shutdown = True
|
| 599 |
+
self._write_appdata(b'')
|
| 600 |
+
|
| 601 |
+
def _write_appdata(self, data):
|
| 602 |
+
self._write_backlog.append((data, 0))
|
| 603 |
+
self._write_buffer_size += len(data)
|
| 604 |
+
self._process_write_backlog()
|
| 605 |
+
|
| 606 |
+
def _start_handshake(self):
|
| 607 |
+
if self._loop.get_debug():
|
| 608 |
+
logger.debug("%r starts SSL handshake", self)
|
| 609 |
+
self._handshake_start_time = self._loop.time()
|
| 610 |
+
else:
|
| 611 |
+
self._handshake_start_time = None
|
| 612 |
+
self._in_handshake = True
|
| 613 |
+
# (b'', 1) is a special value in _process_write_backlog() to do
|
| 614 |
+
# the SSL handshake
|
| 615 |
+
self._write_backlog.append((b'', 1))
|
| 616 |
+
self._handshake_timeout_handle = \
|
| 617 |
+
self._loop.call_later(self._ssl_handshake_timeout,
|
| 618 |
+
self._check_handshake_timeout)
|
| 619 |
+
self._process_write_backlog()
|
| 620 |
+
|
| 621 |
+
def _check_handshake_timeout(self):
|
| 622 |
+
if self._in_handshake is True:
|
| 623 |
+
msg = (
|
| 624 |
+
f"SSL handshake is taking longer than "
|
| 625 |
+
f"{self._ssl_handshake_timeout} seconds: "
|
| 626 |
+
f"aborting the connection"
|
| 627 |
+
)
|
| 628 |
+
self._fatal_error(ConnectionAbortedError(msg))
|
| 629 |
+
|
| 630 |
+
def _on_handshake_complete(self, handshake_exc):
|
| 631 |
+
self._in_handshake = False
|
| 632 |
+
self._handshake_timeout_handle.cancel()
|
| 633 |
+
|
| 634 |
+
sslobj = self._sslpipe.ssl_object
|
| 635 |
+
try:
|
| 636 |
+
if handshake_exc is not None:
|
| 637 |
+
raise handshake_exc
|
| 638 |
+
|
| 639 |
+
peercert = sslobj.getpeercert()
|
| 640 |
+
except (SystemExit, KeyboardInterrupt):
|
| 641 |
+
raise
|
| 642 |
+
except BaseException as exc:
|
| 643 |
+
if isinstance(exc, ssl.CertificateError):
|
| 644 |
+
msg = 'SSL handshake failed on verifying the certificate'
|
| 645 |
+
else:
|
| 646 |
+
msg = 'SSL handshake failed'
|
| 647 |
+
self._fatal_error(exc, msg)
|
| 648 |
+
return
|
| 649 |
+
|
| 650 |
+
if self._loop.get_debug():
|
| 651 |
+
dt = self._loop.time() - self._handshake_start_time
|
| 652 |
+
logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
|
| 653 |
+
|
| 654 |
+
# Add extra info that becomes available after handshake.
|
| 655 |
+
self._extra.update(peercert=peercert,
|
| 656 |
+
cipher=sslobj.cipher(),
|
| 657 |
+
compression=sslobj.compression(),
|
| 658 |
+
ssl_object=sslobj,
|
| 659 |
+
)
|
| 660 |
+
if self._call_connection_made:
|
| 661 |
+
self._app_protocol.connection_made(self._app_transport)
|
| 662 |
+
self._wakeup_waiter()
|
| 663 |
+
self._session_established = True
|
| 664 |
+
# In case transport.write() was already called. Don't call
|
| 665 |
+
# immediately _process_write_backlog(), but schedule it:
|
| 666 |
+
# _on_handshake_complete() can be called indirectly from
|
| 667 |
+
# _process_write_backlog(), and _process_write_backlog() is not
|
| 668 |
+
# reentrant.
|
| 669 |
+
self._loop.call_soon(self._process_write_backlog)
|
| 670 |
+
|
| 671 |
+
def _process_write_backlog(self):
|
| 672 |
+
# Try to make progress on the write backlog.
|
| 673 |
+
if self._transport is None or self._sslpipe is None:
|
| 674 |
+
return
|
| 675 |
+
|
| 676 |
+
try:
|
| 677 |
+
for i in range(len(self._write_backlog)):
|
| 678 |
+
data, offset = self._write_backlog[0]
|
| 679 |
+
if data:
|
| 680 |
+
ssldata, offset = self._sslpipe.feed_appdata(data, offset)
|
| 681 |
+
elif offset:
|
| 682 |
+
ssldata = self._sslpipe.do_handshake(
|
| 683 |
+
self._on_handshake_complete)
|
| 684 |
+
offset = 1
|
| 685 |
+
else:
|
| 686 |
+
ssldata = self._sslpipe.shutdown(self._finalize)
|
| 687 |
+
offset = 1
|
| 688 |
+
|
| 689 |
+
for chunk in ssldata:
|
| 690 |
+
self._transport.write(chunk)
|
| 691 |
+
|
| 692 |
+
if offset < len(data):
|
| 693 |
+
self._write_backlog[0] = (data, offset)
|
| 694 |
+
# A short write means that a write is blocked on a read
|
| 695 |
+
# We need to enable reading if it is paused!
|
| 696 |
+
assert self._sslpipe.need_ssldata
|
| 697 |
+
if self._transport._paused:
|
| 698 |
+
self._transport.resume_reading()
|
| 699 |
+
break
|
| 700 |
+
|
| 701 |
+
# An entire chunk from the backlog was processed. We can
|
| 702 |
+
# delete it and reduce the outstanding buffer size.
|
| 703 |
+
del self._write_backlog[0]
|
| 704 |
+
self._write_buffer_size -= len(data)
|
| 705 |
+
except (SystemExit, KeyboardInterrupt):
|
| 706 |
+
raise
|
| 707 |
+
except BaseException as exc:
|
| 708 |
+
if self._in_handshake:
|
| 709 |
+
# Exceptions will be re-raised in _on_handshake_complete.
|
| 710 |
+
self._on_handshake_complete(exc)
|
| 711 |
+
else:
|
| 712 |
+
self._fatal_error(exc, 'Fatal error on SSL transport')
|
| 713 |
+
|
| 714 |
+
def _fatal_error(self, exc, message='Fatal error on transport'):
|
| 715 |
+
if isinstance(exc, OSError):
|
| 716 |
+
if self._loop.get_debug():
|
| 717 |
+
logger.debug("%r: %s", self, message, exc_info=True)
|
| 718 |
+
else:
|
| 719 |
+
self._loop.call_exception_handler({
|
| 720 |
+
'message': message,
|
| 721 |
+
'exception': exc,
|
| 722 |
+
'transport': self._transport,
|
| 723 |
+
'protocol': self,
|
| 724 |
+
})
|
| 725 |
+
if self._transport:
|
| 726 |
+
self._transport._force_close(exc)
|
| 727 |
+
|
| 728 |
+
def _finalize(self):
|
| 729 |
+
self._sslpipe = None
|
| 730 |
+
|
| 731 |
+
if self._transport is not None:
|
| 732 |
+
self._transport.close()
|
| 733 |
+
|
| 734 |
+
def _abort(self):
|
| 735 |
+
try:
|
| 736 |
+
if self._transport is not None:
|
| 737 |
+
self._transport.abort()
|
| 738 |
+
finally:
|
| 739 |
+
self._finalize()
|
parrot/lib/python3.10/asyncio/staggered.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Support for running coroutines in parallel with staggered start times."""
|
| 2 |
+
|
| 3 |
+
__all__ = 'staggered_race',
|
| 4 |
+
|
| 5 |
+
import contextlib
|
| 6 |
+
import typing
|
| 7 |
+
|
| 8 |
+
from . import events
|
| 9 |
+
from . import exceptions as exceptions_mod
|
| 10 |
+
from . import locks
|
| 11 |
+
from . import tasks
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
async def staggered_race(
|
| 15 |
+
coro_fns: typing.Iterable[typing.Callable[[], typing.Awaitable]],
|
| 16 |
+
delay: typing.Optional[float],
|
| 17 |
+
*,
|
| 18 |
+
loop: events.AbstractEventLoop = None,
|
| 19 |
+
) -> typing.Tuple[
|
| 20 |
+
typing.Any,
|
| 21 |
+
typing.Optional[int],
|
| 22 |
+
typing.List[typing.Optional[Exception]]
|
| 23 |
+
]:
|
| 24 |
+
"""Run coroutines with staggered start times and take the first to finish.
|
| 25 |
+
|
| 26 |
+
This method takes an iterable of coroutine functions. The first one is
|
| 27 |
+
started immediately. From then on, whenever the immediately preceding one
|
| 28 |
+
fails (raises an exception), or when *delay* seconds has passed, the next
|
| 29 |
+
coroutine is started. This continues until one of the coroutines complete
|
| 30 |
+
successfully, in which case all others are cancelled, or until all
|
| 31 |
+
coroutines fail.
|
| 32 |
+
|
| 33 |
+
The coroutines provided should be well-behaved in the following way:
|
| 34 |
+
|
| 35 |
+
* They should only ``return`` if completed successfully.
|
| 36 |
+
|
| 37 |
+
* They should always raise an exception if they did not complete
|
| 38 |
+
successfully. In particular, if they handle cancellation, they should
|
| 39 |
+
probably reraise, like this::
|
| 40 |
+
|
| 41 |
+
try:
|
| 42 |
+
# do work
|
| 43 |
+
except asyncio.CancelledError:
|
| 44 |
+
# undo partially completed work
|
| 45 |
+
raise
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
coro_fns: an iterable of coroutine functions, i.e. callables that
|
| 49 |
+
return a coroutine object when called. Use ``functools.partial`` or
|
| 50 |
+
lambdas to pass arguments.
|
| 51 |
+
|
| 52 |
+
delay: amount of time, in seconds, between starting coroutines. If
|
| 53 |
+
``None``, the coroutines will run sequentially.
|
| 54 |
+
|
| 55 |
+
loop: the event loop to use.
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
tuple *(winner_result, winner_index, exceptions)* where
|
| 59 |
+
|
| 60 |
+
- *winner_result*: the result of the winning coroutine, or ``None``
|
| 61 |
+
if no coroutines won.
|
| 62 |
+
|
| 63 |
+
- *winner_index*: the index of the winning coroutine in
|
| 64 |
+
``coro_fns``, or ``None`` if no coroutines won. If the winning
|
| 65 |
+
coroutine may return None on success, *winner_index* can be used
|
| 66 |
+
to definitively determine whether any coroutine won.
|
| 67 |
+
|
| 68 |
+
- *exceptions*: list of exceptions returned by the coroutines.
|
| 69 |
+
``len(exceptions)`` is equal to the number of coroutines actually
|
| 70 |
+
started, and the order is the same as in ``coro_fns``. The winning
|
| 71 |
+
coroutine's entry is ``None``.
|
| 72 |
+
|
| 73 |
+
"""
|
| 74 |
+
# TODO: when we have aiter() and anext(), allow async iterables in coro_fns.
|
| 75 |
+
loop = loop or events.get_running_loop()
|
| 76 |
+
enum_coro_fns = enumerate(coro_fns)
|
| 77 |
+
winner_result = None
|
| 78 |
+
winner_index = None
|
| 79 |
+
exceptions = []
|
| 80 |
+
running_tasks = []
|
| 81 |
+
|
| 82 |
+
async def run_one_coro(
|
| 83 |
+
previous_failed: typing.Optional[locks.Event]) -> None:
|
| 84 |
+
# Wait for the previous task to finish, or for delay seconds
|
| 85 |
+
if previous_failed is not None:
|
| 86 |
+
with contextlib.suppress(exceptions_mod.TimeoutError):
|
| 87 |
+
# Use asyncio.wait_for() instead of asyncio.wait() here, so
|
| 88 |
+
# that if we get cancelled at this point, Event.wait() is also
|
| 89 |
+
# cancelled, otherwise there will be a "Task destroyed but it is
|
| 90 |
+
# pending" later.
|
| 91 |
+
await tasks.wait_for(previous_failed.wait(), delay)
|
| 92 |
+
# Get the next coroutine to run
|
| 93 |
+
try:
|
| 94 |
+
this_index, coro_fn = next(enum_coro_fns)
|
| 95 |
+
except StopIteration:
|
| 96 |
+
return
|
| 97 |
+
# Start task that will run the next coroutine
|
| 98 |
+
this_failed = locks.Event()
|
| 99 |
+
next_task = loop.create_task(run_one_coro(this_failed))
|
| 100 |
+
running_tasks.append(next_task)
|
| 101 |
+
assert len(running_tasks) == this_index + 2
|
| 102 |
+
# Prepare place to put this coroutine's exceptions if not won
|
| 103 |
+
exceptions.append(None)
|
| 104 |
+
assert len(exceptions) == this_index + 1
|
| 105 |
+
|
| 106 |
+
try:
|
| 107 |
+
result = await coro_fn()
|
| 108 |
+
except (SystemExit, KeyboardInterrupt):
|
| 109 |
+
raise
|
| 110 |
+
except BaseException as e:
|
| 111 |
+
exceptions[this_index] = e
|
| 112 |
+
this_failed.set() # Kickstart the next coroutine
|
| 113 |
+
else:
|
| 114 |
+
# Store winner's results
|
| 115 |
+
nonlocal winner_index, winner_result
|
| 116 |
+
assert winner_index is None
|
| 117 |
+
winner_index = this_index
|
| 118 |
+
winner_result = result
|
| 119 |
+
# Cancel all other tasks. We take care to not cancel the current
|
| 120 |
+
# task as well. If we do so, then since there is no `await` after
|
| 121 |
+
# here and CancelledError are usually thrown at one, we will
|
| 122 |
+
# encounter a curious corner case where the current task will end
|
| 123 |
+
# up as done() == True, cancelled() == False, exception() ==
|
| 124 |
+
# asyncio.CancelledError. This behavior is specified in
|
| 125 |
+
# https://bugs.python.org/issue30048
|
| 126 |
+
for i, t in enumerate(running_tasks):
|
| 127 |
+
if i != this_index:
|
| 128 |
+
t.cancel()
|
| 129 |
+
|
| 130 |
+
first_task = loop.create_task(run_one_coro(None))
|
| 131 |
+
running_tasks.append(first_task)
|
| 132 |
+
try:
|
| 133 |
+
# Wait for a growing list of tasks to all finish: poor man's version of
|
| 134 |
+
# curio's TaskGroup or trio's nursery
|
| 135 |
+
done_count = 0
|
| 136 |
+
while done_count != len(running_tasks):
|
| 137 |
+
done, _ = await tasks.wait(running_tasks)
|
| 138 |
+
done_count = len(done)
|
| 139 |
+
# If run_one_coro raises an unhandled exception, it's probably a
|
| 140 |
+
# programming error, and I want to see it.
|
| 141 |
+
if __debug__:
|
| 142 |
+
for d in done:
|
| 143 |
+
if d.done() and not d.cancelled() and d.exception():
|
| 144 |
+
raise d.exception()
|
| 145 |
+
return winner_result, winner_index, exceptions
|
| 146 |
+
finally:
|
| 147 |
+
# Make sure no tasks are left running if we leave this function
|
| 148 |
+
for t in running_tasks:
|
| 149 |
+
t.cancel()
|
parrot/lib/python3.10/asyncio/streams.py
ADDED
|
@@ -0,0 +1,726 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__ = (
|
| 2 |
+
'StreamReader', 'StreamWriter', 'StreamReaderProtocol',
|
| 3 |
+
'open_connection', 'start_server')
|
| 4 |
+
|
| 5 |
+
import collections
|
| 6 |
+
import socket
|
| 7 |
+
import sys
|
| 8 |
+
import warnings
|
| 9 |
+
import weakref
|
| 10 |
+
|
| 11 |
+
if hasattr(socket, 'AF_UNIX'):
|
| 12 |
+
__all__ += ('open_unix_connection', 'start_unix_server')
|
| 13 |
+
|
| 14 |
+
from . import coroutines
|
| 15 |
+
from . import events
|
| 16 |
+
from . import exceptions
|
| 17 |
+
from . import format_helpers
|
| 18 |
+
from . import protocols
|
| 19 |
+
from .log import logger
|
| 20 |
+
from .tasks import sleep
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
_DEFAULT_LIMIT = 2 ** 16 # 64 KiB
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
async def open_connection(host=None, port=None, *,
|
| 27 |
+
limit=_DEFAULT_LIMIT, **kwds):
|
| 28 |
+
"""A wrapper for create_connection() returning a (reader, writer) pair.
|
| 29 |
+
|
| 30 |
+
The reader returned is a StreamReader instance; the writer is a
|
| 31 |
+
StreamWriter instance.
|
| 32 |
+
|
| 33 |
+
The arguments are all the usual arguments to create_connection()
|
| 34 |
+
except protocol_factory; most common are positional host and port,
|
| 35 |
+
with various optional keyword arguments following.
|
| 36 |
+
|
| 37 |
+
Additional optional keyword arguments are loop (to set the event loop
|
| 38 |
+
instance to use) and limit (to set the buffer limit passed to the
|
| 39 |
+
StreamReader).
|
| 40 |
+
|
| 41 |
+
(If you want to customize the StreamReader and/or
|
| 42 |
+
StreamReaderProtocol classes, just copy the code -- there's
|
| 43 |
+
really nothing special here except some convenience.)
|
| 44 |
+
"""
|
| 45 |
+
loop = events.get_running_loop()
|
| 46 |
+
reader = StreamReader(limit=limit, loop=loop)
|
| 47 |
+
protocol = StreamReaderProtocol(reader, loop=loop)
|
| 48 |
+
transport, _ = await loop.create_connection(
|
| 49 |
+
lambda: protocol, host, port, **kwds)
|
| 50 |
+
writer = StreamWriter(transport, protocol, reader, loop)
|
| 51 |
+
return reader, writer
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
async def start_server(client_connected_cb, host=None, port=None, *,
|
| 55 |
+
limit=_DEFAULT_LIMIT, **kwds):
|
| 56 |
+
"""Start a socket server, call back for each client connected.
|
| 57 |
+
|
| 58 |
+
The first parameter, `client_connected_cb`, takes two parameters:
|
| 59 |
+
client_reader, client_writer. client_reader is a StreamReader
|
| 60 |
+
object, while client_writer is a StreamWriter object. This
|
| 61 |
+
parameter can either be a plain callback function or a coroutine;
|
| 62 |
+
if it is a coroutine, it will be automatically converted into a
|
| 63 |
+
Task.
|
| 64 |
+
|
| 65 |
+
The rest of the arguments are all the usual arguments to
|
| 66 |
+
loop.create_server() except protocol_factory; most common are
|
| 67 |
+
positional host and port, with various optional keyword arguments
|
| 68 |
+
following. The return value is the same as loop.create_server().
|
| 69 |
+
|
| 70 |
+
Additional optional keyword arguments are loop (to set the event loop
|
| 71 |
+
instance to use) and limit (to set the buffer limit passed to the
|
| 72 |
+
StreamReader).
|
| 73 |
+
|
| 74 |
+
The return value is the same as loop.create_server(), i.e. a
|
| 75 |
+
Server object which can be used to stop the service.
|
| 76 |
+
"""
|
| 77 |
+
loop = events.get_running_loop()
|
| 78 |
+
|
| 79 |
+
def factory():
|
| 80 |
+
reader = StreamReader(limit=limit, loop=loop)
|
| 81 |
+
protocol = StreamReaderProtocol(reader, client_connected_cb,
|
| 82 |
+
loop=loop)
|
| 83 |
+
return protocol
|
| 84 |
+
|
| 85 |
+
return await loop.create_server(factory, host, port, **kwds)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
if hasattr(socket, 'AF_UNIX'):
|
| 89 |
+
# UNIX Domain Sockets are supported on this platform
|
| 90 |
+
|
| 91 |
+
async def open_unix_connection(path=None, *,
|
| 92 |
+
limit=_DEFAULT_LIMIT, **kwds):
|
| 93 |
+
"""Similar to `open_connection` but works with UNIX Domain Sockets."""
|
| 94 |
+
loop = events.get_running_loop()
|
| 95 |
+
|
| 96 |
+
reader = StreamReader(limit=limit, loop=loop)
|
| 97 |
+
protocol = StreamReaderProtocol(reader, loop=loop)
|
| 98 |
+
transport, _ = await loop.create_unix_connection(
|
| 99 |
+
lambda: protocol, path, **kwds)
|
| 100 |
+
writer = StreamWriter(transport, protocol, reader, loop)
|
| 101 |
+
return reader, writer
|
| 102 |
+
|
| 103 |
+
async def start_unix_server(client_connected_cb, path=None, *,
|
| 104 |
+
limit=_DEFAULT_LIMIT, **kwds):
|
| 105 |
+
"""Similar to `start_server` but works with UNIX Domain Sockets."""
|
| 106 |
+
loop = events.get_running_loop()
|
| 107 |
+
|
| 108 |
+
def factory():
|
| 109 |
+
reader = StreamReader(limit=limit, loop=loop)
|
| 110 |
+
protocol = StreamReaderProtocol(reader, client_connected_cb,
|
| 111 |
+
loop=loop)
|
| 112 |
+
return protocol
|
| 113 |
+
|
| 114 |
+
return await loop.create_unix_server(factory, path, **kwds)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class FlowControlMixin(protocols.Protocol):
|
| 118 |
+
"""Reusable flow control logic for StreamWriter.drain().
|
| 119 |
+
|
| 120 |
+
This implements the protocol methods pause_writing(),
|
| 121 |
+
resume_writing() and connection_lost(). If the subclass overrides
|
| 122 |
+
these it must call the super methods.
|
| 123 |
+
|
| 124 |
+
StreamWriter.drain() must wait for _drain_helper() coroutine.
|
| 125 |
+
"""
|
| 126 |
+
|
| 127 |
+
def __init__(self, loop=None):
|
| 128 |
+
if loop is None:
|
| 129 |
+
self._loop = events._get_event_loop(stacklevel=4)
|
| 130 |
+
else:
|
| 131 |
+
self._loop = loop
|
| 132 |
+
self._paused = False
|
| 133 |
+
self._drain_waiters = collections.deque()
|
| 134 |
+
self._connection_lost = False
|
| 135 |
+
|
| 136 |
+
def pause_writing(self):
|
| 137 |
+
assert not self._paused
|
| 138 |
+
self._paused = True
|
| 139 |
+
if self._loop.get_debug():
|
| 140 |
+
logger.debug("%r pauses writing", self)
|
| 141 |
+
|
| 142 |
+
def resume_writing(self):
|
| 143 |
+
assert self._paused
|
| 144 |
+
self._paused = False
|
| 145 |
+
if self._loop.get_debug():
|
| 146 |
+
logger.debug("%r resumes writing", self)
|
| 147 |
+
|
| 148 |
+
for waiter in self._drain_waiters:
|
| 149 |
+
if not waiter.done():
|
| 150 |
+
waiter.set_result(None)
|
| 151 |
+
|
| 152 |
+
def connection_lost(self, exc):
|
| 153 |
+
self._connection_lost = True
|
| 154 |
+
# Wake up the writer(s) if currently paused.
|
| 155 |
+
if not self._paused:
|
| 156 |
+
return
|
| 157 |
+
|
| 158 |
+
for waiter in self._drain_waiters:
|
| 159 |
+
if not waiter.done():
|
| 160 |
+
if exc is None:
|
| 161 |
+
waiter.set_result(None)
|
| 162 |
+
else:
|
| 163 |
+
waiter.set_exception(exc)
|
| 164 |
+
|
| 165 |
+
async def _drain_helper(self):
|
| 166 |
+
if self._connection_lost:
|
| 167 |
+
raise ConnectionResetError('Connection lost')
|
| 168 |
+
if not self._paused:
|
| 169 |
+
return
|
| 170 |
+
waiter = self._loop.create_future()
|
| 171 |
+
self._drain_waiters.append(waiter)
|
| 172 |
+
try:
|
| 173 |
+
await waiter
|
| 174 |
+
finally:
|
| 175 |
+
self._drain_waiters.remove(waiter)
|
| 176 |
+
|
| 177 |
+
def _get_close_waiter(self, stream):
|
| 178 |
+
raise NotImplementedError
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
|
| 182 |
+
"""Helper class to adapt between Protocol and StreamReader.
|
| 183 |
+
|
| 184 |
+
(This is a helper class instead of making StreamReader itself a
|
| 185 |
+
Protocol subclass, because the StreamReader has other potential
|
| 186 |
+
uses, and to prevent the user of the StreamReader to accidentally
|
| 187 |
+
call inappropriate methods of the protocol.)
|
| 188 |
+
"""
|
| 189 |
+
|
| 190 |
+
_source_traceback = None
|
| 191 |
+
|
| 192 |
+
def __init__(self, stream_reader, client_connected_cb=None, loop=None):
|
| 193 |
+
super().__init__(loop=loop)
|
| 194 |
+
if stream_reader is not None:
|
| 195 |
+
self._stream_reader_wr = weakref.ref(stream_reader)
|
| 196 |
+
self._source_traceback = stream_reader._source_traceback
|
| 197 |
+
else:
|
| 198 |
+
self._stream_reader_wr = None
|
| 199 |
+
if client_connected_cb is not None:
|
| 200 |
+
# This is a stream created by the `create_server()` function.
|
| 201 |
+
# Keep a strong reference to the reader until a connection
|
| 202 |
+
# is established.
|
| 203 |
+
self._strong_reader = stream_reader
|
| 204 |
+
self._reject_connection = False
|
| 205 |
+
self._stream_writer = None
|
| 206 |
+
self._task = None
|
| 207 |
+
self._transport = None
|
| 208 |
+
self._client_connected_cb = client_connected_cb
|
| 209 |
+
self._over_ssl = False
|
| 210 |
+
self._closed = self._loop.create_future()
|
| 211 |
+
|
| 212 |
+
@property
|
| 213 |
+
def _stream_reader(self):
|
| 214 |
+
if self._stream_reader_wr is None:
|
| 215 |
+
return None
|
| 216 |
+
return self._stream_reader_wr()
|
| 217 |
+
|
| 218 |
+
def connection_made(self, transport):
|
| 219 |
+
if self._reject_connection:
|
| 220 |
+
context = {
|
| 221 |
+
'message': ('An open stream was garbage collected prior to '
|
| 222 |
+
'establishing network connection; '
|
| 223 |
+
'call "stream.close()" explicitly.')
|
| 224 |
+
}
|
| 225 |
+
if self._source_traceback:
|
| 226 |
+
context['source_traceback'] = self._source_traceback
|
| 227 |
+
self._loop.call_exception_handler(context)
|
| 228 |
+
transport.abort()
|
| 229 |
+
return
|
| 230 |
+
self._transport = transport
|
| 231 |
+
reader = self._stream_reader
|
| 232 |
+
if reader is not None:
|
| 233 |
+
reader.set_transport(transport)
|
| 234 |
+
self._over_ssl = transport.get_extra_info('sslcontext') is not None
|
| 235 |
+
if self._client_connected_cb is not None:
|
| 236 |
+
self._stream_writer = StreamWriter(transport, self,
|
| 237 |
+
reader,
|
| 238 |
+
self._loop)
|
| 239 |
+
res = self._client_connected_cb(reader,
|
| 240 |
+
self._stream_writer)
|
| 241 |
+
if coroutines.iscoroutine(res):
|
| 242 |
+
self._task = self._loop.create_task(res)
|
| 243 |
+
self._strong_reader = None
|
| 244 |
+
|
| 245 |
+
def connection_lost(self, exc):
|
| 246 |
+
reader = self._stream_reader
|
| 247 |
+
if reader is not None:
|
| 248 |
+
if exc is None:
|
| 249 |
+
reader.feed_eof()
|
| 250 |
+
else:
|
| 251 |
+
reader.set_exception(exc)
|
| 252 |
+
if not self._closed.done():
|
| 253 |
+
if exc is None:
|
| 254 |
+
self._closed.set_result(None)
|
| 255 |
+
else:
|
| 256 |
+
self._closed.set_exception(exc)
|
| 257 |
+
super().connection_lost(exc)
|
| 258 |
+
self._stream_reader_wr = None
|
| 259 |
+
self._stream_writer = None
|
| 260 |
+
self._task = None
|
| 261 |
+
self._transport = None
|
| 262 |
+
|
| 263 |
+
def data_received(self, data):
|
| 264 |
+
reader = self._stream_reader
|
| 265 |
+
if reader is not None:
|
| 266 |
+
reader.feed_data(data)
|
| 267 |
+
|
| 268 |
+
def eof_received(self):
|
| 269 |
+
reader = self._stream_reader
|
| 270 |
+
if reader is not None:
|
| 271 |
+
reader.feed_eof()
|
| 272 |
+
if self._over_ssl:
|
| 273 |
+
# Prevent a warning in SSLProtocol.eof_received:
|
| 274 |
+
# "returning true from eof_received()
|
| 275 |
+
# has no effect when using ssl"
|
| 276 |
+
return False
|
| 277 |
+
return True
|
| 278 |
+
|
| 279 |
+
def _get_close_waiter(self, stream):
|
| 280 |
+
return self._closed
|
| 281 |
+
|
| 282 |
+
def __del__(self):
|
| 283 |
+
# Prevent reports about unhandled exceptions.
|
| 284 |
+
# Better than self._closed._log_traceback = False hack
|
| 285 |
+
try:
|
| 286 |
+
closed = self._closed
|
| 287 |
+
except AttributeError:
|
| 288 |
+
pass # failed constructor
|
| 289 |
+
else:
|
| 290 |
+
if closed.done() and not closed.cancelled():
|
| 291 |
+
closed.exception()
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
class StreamWriter:
|
| 295 |
+
"""Wraps a Transport.
|
| 296 |
+
|
| 297 |
+
This exposes write(), writelines(), [can_]write_eof(),
|
| 298 |
+
get_extra_info() and close(). It adds drain() which returns an
|
| 299 |
+
optional Future on which you can wait for flow control. It also
|
| 300 |
+
adds a transport property which references the Transport
|
| 301 |
+
directly.
|
| 302 |
+
"""
|
| 303 |
+
|
| 304 |
+
def __init__(self, transport, protocol, reader, loop):
|
| 305 |
+
self._transport = transport
|
| 306 |
+
self._protocol = protocol
|
| 307 |
+
# drain() expects that the reader has an exception() method
|
| 308 |
+
assert reader is None or isinstance(reader, StreamReader)
|
| 309 |
+
self._reader = reader
|
| 310 |
+
self._loop = loop
|
| 311 |
+
self._complete_fut = self._loop.create_future()
|
| 312 |
+
self._complete_fut.set_result(None)
|
| 313 |
+
|
| 314 |
+
def __repr__(self):
|
| 315 |
+
info = [self.__class__.__name__, f'transport={self._transport!r}']
|
| 316 |
+
if self._reader is not None:
|
| 317 |
+
info.append(f'reader={self._reader!r}')
|
| 318 |
+
return '<{}>'.format(' '.join(info))
|
| 319 |
+
|
| 320 |
+
@property
|
| 321 |
+
def transport(self):
|
| 322 |
+
return self._transport
|
| 323 |
+
|
| 324 |
+
def write(self, data):
|
| 325 |
+
self._transport.write(data)
|
| 326 |
+
|
| 327 |
+
def writelines(self, data):
|
| 328 |
+
self._transport.writelines(data)
|
| 329 |
+
|
| 330 |
+
def write_eof(self):
|
| 331 |
+
return self._transport.write_eof()
|
| 332 |
+
|
| 333 |
+
def can_write_eof(self):
|
| 334 |
+
return self._transport.can_write_eof()
|
| 335 |
+
|
| 336 |
+
def close(self):
|
| 337 |
+
return self._transport.close()
|
| 338 |
+
|
| 339 |
+
def is_closing(self):
|
| 340 |
+
return self._transport.is_closing()
|
| 341 |
+
|
| 342 |
+
async def wait_closed(self):
|
| 343 |
+
await self._protocol._get_close_waiter(self)
|
| 344 |
+
|
| 345 |
+
def get_extra_info(self, name, default=None):
|
| 346 |
+
return self._transport.get_extra_info(name, default)
|
| 347 |
+
|
| 348 |
+
async def drain(self):
|
| 349 |
+
"""Flush the write buffer.
|
| 350 |
+
|
| 351 |
+
The intended use is to write
|
| 352 |
+
|
| 353 |
+
w.write(data)
|
| 354 |
+
await w.drain()
|
| 355 |
+
"""
|
| 356 |
+
if self._reader is not None:
|
| 357 |
+
exc = self._reader.exception()
|
| 358 |
+
if exc is not None:
|
| 359 |
+
raise exc
|
| 360 |
+
if self._transport.is_closing():
|
| 361 |
+
# Wait for protocol.connection_lost() call
|
| 362 |
+
# Raise connection closing error if any,
|
| 363 |
+
# ConnectionResetError otherwise
|
| 364 |
+
# Yield to the event loop so connection_lost() may be
|
| 365 |
+
# called. Without this, _drain_helper() would return
|
| 366 |
+
# immediately, and code that calls
|
| 367 |
+
# write(...); await drain()
|
| 368 |
+
# in a loop would never call connection_lost(), so it
|
| 369 |
+
# would not see an error when the socket is closed.
|
| 370 |
+
await sleep(0)
|
| 371 |
+
await self._protocol._drain_helper()
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
class StreamReader:
|
| 375 |
+
|
| 376 |
+
_source_traceback = None
|
| 377 |
+
|
| 378 |
+
def __init__(self, limit=_DEFAULT_LIMIT, loop=None):
|
| 379 |
+
# The line length limit is a security feature;
|
| 380 |
+
# it also doubles as half the buffer limit.
|
| 381 |
+
|
| 382 |
+
if limit <= 0:
|
| 383 |
+
raise ValueError('Limit cannot be <= 0')
|
| 384 |
+
|
| 385 |
+
self._limit = limit
|
| 386 |
+
if loop is None:
|
| 387 |
+
self._loop = events._get_event_loop()
|
| 388 |
+
else:
|
| 389 |
+
self._loop = loop
|
| 390 |
+
self._buffer = bytearray()
|
| 391 |
+
self._eof = False # Whether we're done.
|
| 392 |
+
self._waiter = None # A future used by _wait_for_data()
|
| 393 |
+
self._exception = None
|
| 394 |
+
self._transport = None
|
| 395 |
+
self._paused = False
|
| 396 |
+
if self._loop.get_debug():
|
| 397 |
+
self._source_traceback = format_helpers.extract_stack(
|
| 398 |
+
sys._getframe(1))
|
| 399 |
+
|
| 400 |
+
def __repr__(self):
|
| 401 |
+
info = ['StreamReader']
|
| 402 |
+
if self._buffer:
|
| 403 |
+
info.append(f'{len(self._buffer)} bytes')
|
| 404 |
+
if self._eof:
|
| 405 |
+
info.append('eof')
|
| 406 |
+
if self._limit != _DEFAULT_LIMIT:
|
| 407 |
+
info.append(f'limit={self._limit}')
|
| 408 |
+
if self._waiter:
|
| 409 |
+
info.append(f'waiter={self._waiter!r}')
|
| 410 |
+
if self._exception:
|
| 411 |
+
info.append(f'exception={self._exception!r}')
|
| 412 |
+
if self._transport:
|
| 413 |
+
info.append(f'transport={self._transport!r}')
|
| 414 |
+
if self._paused:
|
| 415 |
+
info.append('paused')
|
| 416 |
+
return '<{}>'.format(' '.join(info))
|
| 417 |
+
|
| 418 |
+
def exception(self):
|
| 419 |
+
return self._exception
|
| 420 |
+
|
| 421 |
+
def set_exception(self, exc):
|
| 422 |
+
self._exception = exc
|
| 423 |
+
|
| 424 |
+
waiter = self._waiter
|
| 425 |
+
if waiter is not None:
|
| 426 |
+
self._waiter = None
|
| 427 |
+
if not waiter.cancelled():
|
| 428 |
+
waiter.set_exception(exc)
|
| 429 |
+
|
| 430 |
+
def _wakeup_waiter(self):
|
| 431 |
+
"""Wakeup read*() functions waiting for data or EOF."""
|
| 432 |
+
waiter = self._waiter
|
| 433 |
+
if waiter is not None:
|
| 434 |
+
self._waiter = None
|
| 435 |
+
if not waiter.cancelled():
|
| 436 |
+
waiter.set_result(None)
|
| 437 |
+
|
| 438 |
+
def set_transport(self, transport):
|
| 439 |
+
assert self._transport is None, 'Transport already set'
|
| 440 |
+
self._transport = transport
|
| 441 |
+
|
| 442 |
+
def _maybe_resume_transport(self):
|
| 443 |
+
if self._paused and len(self._buffer) <= self._limit:
|
| 444 |
+
self._paused = False
|
| 445 |
+
self._transport.resume_reading()
|
| 446 |
+
|
| 447 |
+
def feed_eof(self):
|
| 448 |
+
self._eof = True
|
| 449 |
+
self._wakeup_waiter()
|
| 450 |
+
|
| 451 |
+
def at_eof(self):
|
| 452 |
+
"""Return True if the buffer is empty and 'feed_eof' was called."""
|
| 453 |
+
return self._eof and not self._buffer
|
| 454 |
+
|
| 455 |
+
def feed_data(self, data):
|
| 456 |
+
assert not self._eof, 'feed_data after feed_eof'
|
| 457 |
+
|
| 458 |
+
if not data:
|
| 459 |
+
return
|
| 460 |
+
|
| 461 |
+
self._buffer.extend(data)
|
| 462 |
+
self._wakeup_waiter()
|
| 463 |
+
|
| 464 |
+
if (self._transport is not None and
|
| 465 |
+
not self._paused and
|
| 466 |
+
len(self._buffer) > 2 * self._limit):
|
| 467 |
+
try:
|
| 468 |
+
self._transport.pause_reading()
|
| 469 |
+
except NotImplementedError:
|
| 470 |
+
# The transport can't be paused.
|
| 471 |
+
# We'll just have to buffer all data.
|
| 472 |
+
# Forget the transport so we don't keep trying.
|
| 473 |
+
self._transport = None
|
| 474 |
+
else:
|
| 475 |
+
self._paused = True
|
| 476 |
+
|
| 477 |
+
async def _wait_for_data(self, func_name):
|
| 478 |
+
"""Wait until feed_data() or feed_eof() is called.
|
| 479 |
+
|
| 480 |
+
If stream was paused, automatically resume it.
|
| 481 |
+
"""
|
| 482 |
+
# StreamReader uses a future to link the protocol feed_data() method
|
| 483 |
+
# to a read coroutine. Running two read coroutines at the same time
|
| 484 |
+
# would have an unexpected behaviour. It would not possible to know
|
| 485 |
+
# which coroutine would get the next data.
|
| 486 |
+
if self._waiter is not None:
|
| 487 |
+
raise RuntimeError(
|
| 488 |
+
f'{func_name}() called while another coroutine is '
|
| 489 |
+
f'already waiting for incoming data')
|
| 490 |
+
|
| 491 |
+
assert not self._eof, '_wait_for_data after EOF'
|
| 492 |
+
|
| 493 |
+
# Waiting for data while paused will make deadlock, so prevent it.
|
| 494 |
+
# This is essential for readexactly(n) for case when n > self._limit.
|
| 495 |
+
if self._paused:
|
| 496 |
+
self._paused = False
|
| 497 |
+
self._transport.resume_reading()
|
| 498 |
+
|
| 499 |
+
self._waiter = self._loop.create_future()
|
| 500 |
+
try:
|
| 501 |
+
await self._waiter
|
| 502 |
+
finally:
|
| 503 |
+
self._waiter = None
|
| 504 |
+
|
| 505 |
+
async def readline(self):
|
| 506 |
+
"""Read chunk of data from the stream until newline (b'\n') is found.
|
| 507 |
+
|
| 508 |
+
On success, return chunk that ends with newline. If only partial
|
| 509 |
+
line can be read due to EOF, return incomplete line without
|
| 510 |
+
terminating newline. When EOF was reached while no bytes read, empty
|
| 511 |
+
bytes object is returned.
|
| 512 |
+
|
| 513 |
+
If limit is reached, ValueError will be raised. In that case, if
|
| 514 |
+
newline was found, complete line including newline will be removed
|
| 515 |
+
from internal buffer. Else, internal buffer will be cleared. Limit is
|
| 516 |
+
compared against part of the line without newline.
|
| 517 |
+
|
| 518 |
+
If stream was paused, this function will automatically resume it if
|
| 519 |
+
needed.
|
| 520 |
+
"""
|
| 521 |
+
sep = b'\n'
|
| 522 |
+
seplen = len(sep)
|
| 523 |
+
try:
|
| 524 |
+
line = await self.readuntil(sep)
|
| 525 |
+
except exceptions.IncompleteReadError as e:
|
| 526 |
+
return e.partial
|
| 527 |
+
except exceptions.LimitOverrunError as e:
|
| 528 |
+
if self._buffer.startswith(sep, e.consumed):
|
| 529 |
+
del self._buffer[:e.consumed + seplen]
|
| 530 |
+
else:
|
| 531 |
+
self._buffer.clear()
|
| 532 |
+
self._maybe_resume_transport()
|
| 533 |
+
raise ValueError(e.args[0])
|
| 534 |
+
return line
|
| 535 |
+
|
| 536 |
+
async def readuntil(self, separator=b'\n'):
|
| 537 |
+
"""Read data from the stream until ``separator`` is found.
|
| 538 |
+
|
| 539 |
+
On success, the data and separator will be removed from the
|
| 540 |
+
internal buffer (consumed). Returned data will include the
|
| 541 |
+
separator at the end.
|
| 542 |
+
|
| 543 |
+
Configured stream limit is used to check result. Limit sets the
|
| 544 |
+
maximal length of data that can be returned, not counting the
|
| 545 |
+
separator.
|
| 546 |
+
|
| 547 |
+
If an EOF occurs and the complete separator is still not found,
|
| 548 |
+
an IncompleteReadError exception will be raised, and the internal
|
| 549 |
+
buffer will be reset. The IncompleteReadError.partial attribute
|
| 550 |
+
may contain the separator partially.
|
| 551 |
+
|
| 552 |
+
If the data cannot be read because of over limit, a
|
| 553 |
+
LimitOverrunError exception will be raised, and the data
|
| 554 |
+
will be left in the internal buffer, so it can be read again.
|
| 555 |
+
"""
|
| 556 |
+
seplen = len(separator)
|
| 557 |
+
if seplen == 0:
|
| 558 |
+
raise ValueError('Separator should be at least one-byte string')
|
| 559 |
+
|
| 560 |
+
if self._exception is not None:
|
| 561 |
+
raise self._exception
|
| 562 |
+
|
| 563 |
+
# Consume whole buffer except last bytes, which length is
|
| 564 |
+
# one less than seplen. Let's check corner cases with
|
| 565 |
+
# separator='SEPARATOR':
|
| 566 |
+
# * we have received almost complete separator (without last
|
| 567 |
+
# byte). i.e buffer='some textSEPARATO'. In this case we
|
| 568 |
+
# can safely consume len(separator) - 1 bytes.
|
| 569 |
+
# * last byte of buffer is first byte of separator, i.e.
|
| 570 |
+
# buffer='abcdefghijklmnopqrS'. We may safely consume
|
| 571 |
+
# everything except that last byte, but this require to
|
| 572 |
+
# analyze bytes of buffer that match partial separator.
|
| 573 |
+
# This is slow and/or require FSM. For this case our
|
| 574 |
+
# implementation is not optimal, since require rescanning
|
| 575 |
+
# of data that is known to not belong to separator. In
|
| 576 |
+
# real world, separator will not be so long to notice
|
| 577 |
+
# performance problems. Even when reading MIME-encoded
|
| 578 |
+
# messages :)
|
| 579 |
+
|
| 580 |
+
# `offset` is the number of bytes from the beginning of the buffer
|
| 581 |
+
# where there is no occurrence of `separator`.
|
| 582 |
+
offset = 0
|
| 583 |
+
|
| 584 |
+
# Loop until we find `separator` in the buffer, exceed the buffer size,
|
| 585 |
+
# or an EOF has happened.
|
| 586 |
+
while True:
|
| 587 |
+
buflen = len(self._buffer)
|
| 588 |
+
|
| 589 |
+
# Check if we now have enough data in the buffer for `separator` to
|
| 590 |
+
# fit.
|
| 591 |
+
if buflen - offset >= seplen:
|
| 592 |
+
isep = self._buffer.find(separator, offset)
|
| 593 |
+
|
| 594 |
+
if isep != -1:
|
| 595 |
+
# `separator` is in the buffer. `isep` will be used later
|
| 596 |
+
# to retrieve the data.
|
| 597 |
+
break
|
| 598 |
+
|
| 599 |
+
# see upper comment for explanation.
|
| 600 |
+
offset = buflen + 1 - seplen
|
| 601 |
+
if offset > self._limit:
|
| 602 |
+
raise exceptions.LimitOverrunError(
|
| 603 |
+
'Separator is not found, and chunk exceed the limit',
|
| 604 |
+
offset)
|
| 605 |
+
|
| 606 |
+
# Complete message (with full separator) may be present in buffer
|
| 607 |
+
# even when EOF flag is set. This may happen when the last chunk
|
| 608 |
+
# adds data which makes separator be found. That's why we check for
|
| 609 |
+
# EOF *ater* inspecting the buffer.
|
| 610 |
+
if self._eof:
|
| 611 |
+
chunk = bytes(self._buffer)
|
| 612 |
+
self._buffer.clear()
|
| 613 |
+
raise exceptions.IncompleteReadError(chunk, None)
|
| 614 |
+
|
| 615 |
+
# _wait_for_data() will resume reading if stream was paused.
|
| 616 |
+
await self._wait_for_data('readuntil')
|
| 617 |
+
|
| 618 |
+
if isep > self._limit:
|
| 619 |
+
raise exceptions.LimitOverrunError(
|
| 620 |
+
'Separator is found, but chunk is longer than limit', isep)
|
| 621 |
+
|
| 622 |
+
chunk = self._buffer[:isep + seplen]
|
| 623 |
+
del self._buffer[:isep + seplen]
|
| 624 |
+
self._maybe_resume_transport()
|
| 625 |
+
return bytes(chunk)
|
| 626 |
+
|
| 627 |
+
async def read(self, n=-1):
|
| 628 |
+
"""Read up to `n` bytes from the stream.
|
| 629 |
+
|
| 630 |
+
If `n` is not provided or set to -1,
|
| 631 |
+
read until EOF, then return all read bytes.
|
| 632 |
+
If EOF was received and the internal buffer is empty,
|
| 633 |
+
return an empty bytes object.
|
| 634 |
+
|
| 635 |
+
If `n` is 0, return an empty bytes object immediately.
|
| 636 |
+
|
| 637 |
+
If `n` is positive, return at most `n` available bytes
|
| 638 |
+
as soon as at least 1 byte is available in the internal buffer.
|
| 639 |
+
If EOF is received before any byte is read, return an empty
|
| 640 |
+
bytes object.
|
| 641 |
+
|
| 642 |
+
Returned value is not limited with limit, configured at stream
|
| 643 |
+
creation.
|
| 644 |
+
|
| 645 |
+
If stream was paused, this function will automatically resume it if
|
| 646 |
+
needed.
|
| 647 |
+
"""
|
| 648 |
+
|
| 649 |
+
if self._exception is not None:
|
| 650 |
+
raise self._exception
|
| 651 |
+
|
| 652 |
+
if n == 0:
|
| 653 |
+
return b''
|
| 654 |
+
|
| 655 |
+
if n < 0:
|
| 656 |
+
# This used to just loop creating a new waiter hoping to
|
| 657 |
+
# collect everything in self._buffer, but that would
|
| 658 |
+
# deadlock if the subprocess sends more than self.limit
|
| 659 |
+
# bytes. So just call self.read(self._limit) until EOF.
|
| 660 |
+
blocks = []
|
| 661 |
+
while True:
|
| 662 |
+
block = await self.read(self._limit)
|
| 663 |
+
if not block:
|
| 664 |
+
break
|
| 665 |
+
blocks.append(block)
|
| 666 |
+
return b''.join(blocks)
|
| 667 |
+
|
| 668 |
+
if not self._buffer and not self._eof:
|
| 669 |
+
await self._wait_for_data('read')
|
| 670 |
+
|
| 671 |
+
# This will work right even if buffer is less than n bytes
|
| 672 |
+
data = bytes(self._buffer[:n])
|
| 673 |
+
del self._buffer[:n]
|
| 674 |
+
|
| 675 |
+
self._maybe_resume_transport()
|
| 676 |
+
return data
|
| 677 |
+
|
| 678 |
+
async def readexactly(self, n):
|
| 679 |
+
"""Read exactly `n` bytes.
|
| 680 |
+
|
| 681 |
+
Raise an IncompleteReadError if EOF is reached before `n` bytes can be
|
| 682 |
+
read. The IncompleteReadError.partial attribute of the exception will
|
| 683 |
+
contain the partial read bytes.
|
| 684 |
+
|
| 685 |
+
if n is zero, return empty bytes object.
|
| 686 |
+
|
| 687 |
+
Returned value is not limited with limit, configured at stream
|
| 688 |
+
creation.
|
| 689 |
+
|
| 690 |
+
If stream was paused, this function will automatically resume it if
|
| 691 |
+
needed.
|
| 692 |
+
"""
|
| 693 |
+
if n < 0:
|
| 694 |
+
raise ValueError('readexactly size can not be less than zero')
|
| 695 |
+
|
| 696 |
+
if self._exception is not None:
|
| 697 |
+
raise self._exception
|
| 698 |
+
|
| 699 |
+
if n == 0:
|
| 700 |
+
return b''
|
| 701 |
+
|
| 702 |
+
while len(self._buffer) < n:
|
| 703 |
+
if self._eof:
|
| 704 |
+
incomplete = bytes(self._buffer)
|
| 705 |
+
self._buffer.clear()
|
| 706 |
+
raise exceptions.IncompleteReadError(incomplete, n)
|
| 707 |
+
|
| 708 |
+
await self._wait_for_data('readexactly')
|
| 709 |
+
|
| 710 |
+
if len(self._buffer) == n:
|
| 711 |
+
data = bytes(self._buffer)
|
| 712 |
+
self._buffer.clear()
|
| 713 |
+
else:
|
| 714 |
+
data = bytes(self._buffer[:n])
|
| 715 |
+
del self._buffer[:n]
|
| 716 |
+
self._maybe_resume_transport()
|
| 717 |
+
return data
|
| 718 |
+
|
| 719 |
+
def __aiter__(self):
|
| 720 |
+
return self
|
| 721 |
+
|
| 722 |
+
async def __anext__(self):
|
| 723 |
+
val = await self.readline()
|
| 724 |
+
if val == b'':
|
| 725 |
+
raise StopAsyncIteration
|
| 726 |
+
return val
|
parrot/lib/python3.10/asyncio/trsock.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import socket
|
| 2 |
+
import warnings
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class TransportSocket:
|
| 6 |
+
|
| 7 |
+
"""A socket-like wrapper for exposing real transport sockets.
|
| 8 |
+
|
| 9 |
+
These objects can be safely returned by APIs like
|
| 10 |
+
`transport.get_extra_info('socket')`. All potentially disruptive
|
| 11 |
+
operations (like "socket.close()") are banned.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
__slots__ = ('_sock',)
|
| 15 |
+
|
| 16 |
+
def __init__(self, sock: socket.socket):
|
| 17 |
+
self._sock = sock
|
| 18 |
+
|
| 19 |
+
def _na(self, what):
|
| 20 |
+
warnings.warn(
|
| 21 |
+
f"Using {what} on sockets returned from get_extra_info('socket') "
|
| 22 |
+
f"will be prohibited in asyncio 3.9. Please report your use case "
|
| 23 |
+
f"to bugs.python.org.",
|
| 24 |
+
DeprecationWarning, source=self)
|
| 25 |
+
|
| 26 |
+
@property
|
| 27 |
+
def family(self):
|
| 28 |
+
return self._sock.family
|
| 29 |
+
|
| 30 |
+
@property
|
| 31 |
+
def type(self):
|
| 32 |
+
return self._sock.type
|
| 33 |
+
|
| 34 |
+
@property
|
| 35 |
+
def proto(self):
|
| 36 |
+
return self._sock.proto
|
| 37 |
+
|
| 38 |
+
def __repr__(self):
|
| 39 |
+
s = (
|
| 40 |
+
f"<asyncio.TransportSocket fd={self.fileno()}, "
|
| 41 |
+
f"family={self.family!s}, type={self.type!s}, "
|
| 42 |
+
f"proto={self.proto}"
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
if self.fileno() != -1:
|
| 46 |
+
try:
|
| 47 |
+
laddr = self.getsockname()
|
| 48 |
+
if laddr:
|
| 49 |
+
s = f"{s}, laddr={laddr}"
|
| 50 |
+
except socket.error:
|
| 51 |
+
pass
|
| 52 |
+
try:
|
| 53 |
+
raddr = self.getpeername()
|
| 54 |
+
if raddr:
|
| 55 |
+
s = f"{s}, raddr={raddr}"
|
| 56 |
+
except socket.error:
|
| 57 |
+
pass
|
| 58 |
+
|
| 59 |
+
return f"{s}>"
|
| 60 |
+
|
| 61 |
+
def __getstate__(self):
|
| 62 |
+
raise TypeError("Cannot serialize asyncio.TransportSocket object")
|
| 63 |
+
|
| 64 |
+
def fileno(self):
|
| 65 |
+
return self._sock.fileno()
|
| 66 |
+
|
| 67 |
+
def dup(self):
|
| 68 |
+
return self._sock.dup()
|
| 69 |
+
|
| 70 |
+
def get_inheritable(self):
|
| 71 |
+
return self._sock.get_inheritable()
|
| 72 |
+
|
| 73 |
+
def shutdown(self, how):
|
| 74 |
+
# asyncio doesn't currently provide a high-level transport API
|
| 75 |
+
# to shutdown the connection.
|
| 76 |
+
self._sock.shutdown(how)
|
| 77 |
+
|
| 78 |
+
def getsockopt(self, *args, **kwargs):
|
| 79 |
+
return self._sock.getsockopt(*args, **kwargs)
|
| 80 |
+
|
| 81 |
+
def setsockopt(self, *args, **kwargs):
|
| 82 |
+
self._sock.setsockopt(*args, **kwargs)
|
| 83 |
+
|
| 84 |
+
def getpeername(self):
|
| 85 |
+
return self._sock.getpeername()
|
| 86 |
+
|
| 87 |
+
def getsockname(self):
|
| 88 |
+
return self._sock.getsockname()
|
| 89 |
+
|
| 90 |
+
def getsockbyname(self):
|
| 91 |
+
return self._sock.getsockbyname()
|
| 92 |
+
|
| 93 |
+
def accept(self):
|
| 94 |
+
self._na('accept() method')
|
| 95 |
+
return self._sock.accept()
|
| 96 |
+
|
| 97 |
+
def connect(self, *args, **kwargs):
|
| 98 |
+
self._na('connect() method')
|
| 99 |
+
return self._sock.connect(*args, **kwargs)
|
| 100 |
+
|
| 101 |
+
def connect_ex(self, *args, **kwargs):
|
| 102 |
+
self._na('connect_ex() method')
|
| 103 |
+
return self._sock.connect_ex(*args, **kwargs)
|
| 104 |
+
|
| 105 |
+
def bind(self, *args, **kwargs):
|
| 106 |
+
self._na('bind() method')
|
| 107 |
+
return self._sock.bind(*args, **kwargs)
|
| 108 |
+
|
| 109 |
+
def ioctl(self, *args, **kwargs):
|
| 110 |
+
self._na('ioctl() method')
|
| 111 |
+
return self._sock.ioctl(*args, **kwargs)
|
| 112 |
+
|
| 113 |
+
def listen(self, *args, **kwargs):
|
| 114 |
+
self._na('listen() method')
|
| 115 |
+
return self._sock.listen(*args, **kwargs)
|
| 116 |
+
|
| 117 |
+
def makefile(self):
|
| 118 |
+
self._na('makefile() method')
|
| 119 |
+
return self._sock.makefile()
|
| 120 |
+
|
| 121 |
+
def sendfile(self, *args, **kwargs):
|
| 122 |
+
self._na('sendfile() method')
|
| 123 |
+
return self._sock.sendfile(*args, **kwargs)
|
| 124 |
+
|
| 125 |
+
def close(self):
|
| 126 |
+
self._na('close() method')
|
| 127 |
+
return self._sock.close()
|
| 128 |
+
|
| 129 |
+
def detach(self):
|
| 130 |
+
self._na('detach() method')
|
| 131 |
+
return self._sock.detach()
|
| 132 |
+
|
| 133 |
+
def sendmsg_afalg(self, *args, **kwargs):
|
| 134 |
+
self._na('sendmsg_afalg() method')
|
| 135 |
+
return self._sock.sendmsg_afalg(*args, **kwargs)
|
| 136 |
+
|
| 137 |
+
def sendmsg(self, *args, **kwargs):
|
| 138 |
+
self._na('sendmsg() method')
|
| 139 |
+
return self._sock.sendmsg(*args, **kwargs)
|
| 140 |
+
|
| 141 |
+
def sendto(self, *args, **kwargs):
|
| 142 |
+
self._na('sendto() method')
|
| 143 |
+
return self._sock.sendto(*args, **kwargs)
|
| 144 |
+
|
| 145 |
+
def send(self, *args, **kwargs):
|
| 146 |
+
self._na('send() method')
|
| 147 |
+
return self._sock.send(*args, **kwargs)
|
| 148 |
+
|
| 149 |
+
def sendall(self, *args, **kwargs):
|
| 150 |
+
self._na('sendall() method')
|
| 151 |
+
return self._sock.sendall(*args, **kwargs)
|
| 152 |
+
|
| 153 |
+
def set_inheritable(self, *args, **kwargs):
|
| 154 |
+
self._na('set_inheritable() method')
|
| 155 |
+
return self._sock.set_inheritable(*args, **kwargs)
|
| 156 |
+
|
| 157 |
+
def share(self, process_id):
|
| 158 |
+
self._na('share() method')
|
| 159 |
+
return self._sock.share(process_id)
|
| 160 |
+
|
| 161 |
+
def recv_into(self, *args, **kwargs):
|
| 162 |
+
self._na('recv_into() method')
|
| 163 |
+
return self._sock.recv_into(*args, **kwargs)
|
| 164 |
+
|
| 165 |
+
def recvfrom_into(self, *args, **kwargs):
|
| 166 |
+
self._na('recvfrom_into() method')
|
| 167 |
+
return self._sock.recvfrom_into(*args, **kwargs)
|
| 168 |
+
|
| 169 |
+
def recvmsg_into(self, *args, **kwargs):
|
| 170 |
+
self._na('recvmsg_into() method')
|
| 171 |
+
return self._sock.recvmsg_into(*args, **kwargs)
|
| 172 |
+
|
| 173 |
+
def recvmsg(self, *args, **kwargs):
|
| 174 |
+
self._na('recvmsg() method')
|
| 175 |
+
return self._sock.recvmsg(*args, **kwargs)
|
| 176 |
+
|
| 177 |
+
def recvfrom(self, *args, **kwargs):
|
| 178 |
+
self._na('recvfrom() method')
|
| 179 |
+
return self._sock.recvfrom(*args, **kwargs)
|
| 180 |
+
|
| 181 |
+
def recv(self, *args, **kwargs):
|
| 182 |
+
self._na('recv() method')
|
| 183 |
+
return self._sock.recv(*args, **kwargs)
|
| 184 |
+
|
| 185 |
+
def settimeout(self, value):
|
| 186 |
+
if value == 0:
|
| 187 |
+
return
|
| 188 |
+
raise ValueError(
|
| 189 |
+
'settimeout(): only 0 timeout is allowed on transport sockets')
|
| 190 |
+
|
| 191 |
+
def gettimeout(self):
|
| 192 |
+
return 0
|
| 193 |
+
|
| 194 |
+
def setblocking(self, flag):
|
| 195 |
+
if not flag:
|
| 196 |
+
return
|
| 197 |
+
raise ValueError(
|
| 198 |
+
'setblocking(): transport sockets cannot be blocking')
|
| 199 |
+
|
| 200 |
+
def __enter__(self):
|
| 201 |
+
self._na('context manager protocol')
|
| 202 |
+
return self._sock.__enter__()
|
| 203 |
+
|
| 204 |
+
def __exit__(self, *err):
|
| 205 |
+
self._na('context manager protocol')
|
| 206 |
+
return self._sock.__exit__(*err)
|
parrot/lib/python3.10/asyncio/unix_events.py
ADDED
|
@@ -0,0 +1,1466 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Selector event loop for Unix with signal handling."""
|
| 2 |
+
|
| 3 |
+
import errno
|
| 4 |
+
import io
|
| 5 |
+
import itertools
|
| 6 |
+
import os
|
| 7 |
+
import selectors
|
| 8 |
+
import signal
|
| 9 |
+
import socket
|
| 10 |
+
import stat
|
| 11 |
+
import subprocess
|
| 12 |
+
import sys
|
| 13 |
+
import threading
|
| 14 |
+
import warnings
|
| 15 |
+
|
| 16 |
+
from . import base_events
|
| 17 |
+
from . import base_subprocess
|
| 18 |
+
from . import constants
|
| 19 |
+
from . import coroutines
|
| 20 |
+
from . import events
|
| 21 |
+
from . import exceptions
|
| 22 |
+
from . import futures
|
| 23 |
+
from . import selector_events
|
| 24 |
+
from . import tasks
|
| 25 |
+
from . import transports
|
| 26 |
+
from .log import logger
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
__all__ = (
|
| 30 |
+
'SelectorEventLoop',
|
| 31 |
+
'AbstractChildWatcher', 'SafeChildWatcher',
|
| 32 |
+
'FastChildWatcher', 'PidfdChildWatcher',
|
| 33 |
+
'MultiLoopChildWatcher', 'ThreadedChildWatcher',
|
| 34 |
+
'DefaultEventLoopPolicy',
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
if sys.platform == 'win32': # pragma: no cover
|
| 39 |
+
raise ImportError('Signals are not really supported on Windows')
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _sighandler_noop(signum, frame):
|
| 43 |
+
"""Dummy signal handler."""
|
| 44 |
+
pass
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def waitstatus_to_exitcode(status):
|
| 48 |
+
try:
|
| 49 |
+
return os.waitstatus_to_exitcode(status)
|
| 50 |
+
except ValueError:
|
| 51 |
+
# The child exited, but we don't understand its status.
|
| 52 |
+
# This shouldn't happen, but if it does, let's just
|
| 53 |
+
# return that status; perhaps that helps debug it.
|
| 54 |
+
return status
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
|
| 58 |
+
"""Unix event loop.
|
| 59 |
+
|
| 60 |
+
Adds signal handling and UNIX Domain Socket support to SelectorEventLoop.
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
def __init__(self, selector=None):
|
| 64 |
+
super().__init__(selector)
|
| 65 |
+
self._signal_handlers = {}
|
| 66 |
+
|
| 67 |
+
def close(self):
|
| 68 |
+
super().close()
|
| 69 |
+
if not sys.is_finalizing():
|
| 70 |
+
for sig in list(self._signal_handlers):
|
| 71 |
+
self.remove_signal_handler(sig)
|
| 72 |
+
else:
|
| 73 |
+
if self._signal_handlers:
|
| 74 |
+
warnings.warn(f"Closing the loop {self!r} "
|
| 75 |
+
f"on interpreter shutdown "
|
| 76 |
+
f"stage, skipping signal handlers removal",
|
| 77 |
+
ResourceWarning,
|
| 78 |
+
source=self)
|
| 79 |
+
self._signal_handlers.clear()
|
| 80 |
+
|
| 81 |
+
def _process_self_data(self, data):
|
| 82 |
+
for signum in data:
|
| 83 |
+
if not signum:
|
| 84 |
+
# ignore null bytes written by _write_to_self()
|
| 85 |
+
continue
|
| 86 |
+
self._handle_signal(signum)
|
| 87 |
+
|
| 88 |
+
def add_signal_handler(self, sig, callback, *args):
|
| 89 |
+
"""Add a handler for a signal. UNIX only.
|
| 90 |
+
|
| 91 |
+
Raise ValueError if the signal number is invalid or uncatchable.
|
| 92 |
+
Raise RuntimeError if there is a problem setting up the handler.
|
| 93 |
+
"""
|
| 94 |
+
if (coroutines.iscoroutine(callback) or
|
| 95 |
+
coroutines.iscoroutinefunction(callback)):
|
| 96 |
+
raise TypeError("coroutines cannot be used "
|
| 97 |
+
"with add_signal_handler()")
|
| 98 |
+
self._check_signal(sig)
|
| 99 |
+
self._check_closed()
|
| 100 |
+
try:
|
| 101 |
+
# set_wakeup_fd() raises ValueError if this is not the
|
| 102 |
+
# main thread. By calling it early we ensure that an
|
| 103 |
+
# event loop running in another thread cannot add a signal
|
| 104 |
+
# handler.
|
| 105 |
+
signal.set_wakeup_fd(self._csock.fileno())
|
| 106 |
+
except (ValueError, OSError) as exc:
|
| 107 |
+
raise RuntimeError(str(exc))
|
| 108 |
+
|
| 109 |
+
handle = events.Handle(callback, args, self, None)
|
| 110 |
+
self._signal_handlers[sig] = handle
|
| 111 |
+
|
| 112 |
+
try:
|
| 113 |
+
# Register a dummy signal handler to ask Python to write the signal
|
| 114 |
+
# number in the wakeup file descriptor. _process_self_data() will
|
| 115 |
+
# read signal numbers from this file descriptor to handle signals.
|
| 116 |
+
signal.signal(sig, _sighandler_noop)
|
| 117 |
+
|
| 118 |
+
# Set SA_RESTART to limit EINTR occurrences.
|
| 119 |
+
signal.siginterrupt(sig, False)
|
| 120 |
+
except OSError as exc:
|
| 121 |
+
del self._signal_handlers[sig]
|
| 122 |
+
if not self._signal_handlers:
|
| 123 |
+
try:
|
| 124 |
+
signal.set_wakeup_fd(-1)
|
| 125 |
+
except (ValueError, OSError) as nexc:
|
| 126 |
+
logger.info('set_wakeup_fd(-1) failed: %s', nexc)
|
| 127 |
+
|
| 128 |
+
if exc.errno == errno.EINVAL:
|
| 129 |
+
raise RuntimeError(f'sig {sig} cannot be caught')
|
| 130 |
+
else:
|
| 131 |
+
raise
|
| 132 |
+
|
| 133 |
+
def _handle_signal(self, sig):
|
| 134 |
+
"""Internal helper that is the actual signal handler."""
|
| 135 |
+
handle = self._signal_handlers.get(sig)
|
| 136 |
+
if handle is None:
|
| 137 |
+
return # Assume it's some race condition.
|
| 138 |
+
if handle._cancelled:
|
| 139 |
+
self.remove_signal_handler(sig) # Remove it properly.
|
| 140 |
+
else:
|
| 141 |
+
self._add_callback_signalsafe(handle)
|
| 142 |
+
|
| 143 |
+
def remove_signal_handler(self, sig):
|
| 144 |
+
"""Remove a handler for a signal. UNIX only.
|
| 145 |
+
|
| 146 |
+
Return True if a signal handler was removed, False if not.
|
| 147 |
+
"""
|
| 148 |
+
self._check_signal(sig)
|
| 149 |
+
try:
|
| 150 |
+
del self._signal_handlers[sig]
|
| 151 |
+
except KeyError:
|
| 152 |
+
return False
|
| 153 |
+
|
| 154 |
+
if sig == signal.SIGINT:
|
| 155 |
+
handler = signal.default_int_handler
|
| 156 |
+
else:
|
| 157 |
+
handler = signal.SIG_DFL
|
| 158 |
+
|
| 159 |
+
try:
|
| 160 |
+
signal.signal(sig, handler)
|
| 161 |
+
except OSError as exc:
|
| 162 |
+
if exc.errno == errno.EINVAL:
|
| 163 |
+
raise RuntimeError(f'sig {sig} cannot be caught')
|
| 164 |
+
else:
|
| 165 |
+
raise
|
| 166 |
+
|
| 167 |
+
if not self._signal_handlers:
|
| 168 |
+
try:
|
| 169 |
+
signal.set_wakeup_fd(-1)
|
| 170 |
+
except (ValueError, OSError) as exc:
|
| 171 |
+
logger.info('set_wakeup_fd(-1) failed: %s', exc)
|
| 172 |
+
|
| 173 |
+
return True
|
| 174 |
+
|
| 175 |
+
def _check_signal(self, sig):
|
| 176 |
+
"""Internal helper to validate a signal.
|
| 177 |
+
|
| 178 |
+
Raise ValueError if the signal number is invalid or uncatchable.
|
| 179 |
+
Raise RuntimeError if there is a problem setting up the handler.
|
| 180 |
+
"""
|
| 181 |
+
if not isinstance(sig, int):
|
| 182 |
+
raise TypeError(f'sig must be an int, not {sig!r}')
|
| 183 |
+
|
| 184 |
+
if sig not in signal.valid_signals():
|
| 185 |
+
raise ValueError(f'invalid signal number {sig}')
|
| 186 |
+
|
| 187 |
+
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
|
| 188 |
+
extra=None):
|
| 189 |
+
return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra)
|
| 190 |
+
|
| 191 |
+
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
|
| 192 |
+
extra=None):
|
| 193 |
+
return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra)
|
| 194 |
+
|
| 195 |
+
async def _make_subprocess_transport(self, protocol, args, shell,
|
| 196 |
+
stdin, stdout, stderr, bufsize,
|
| 197 |
+
extra=None, **kwargs):
|
| 198 |
+
with events.get_child_watcher() as watcher:
|
| 199 |
+
if not watcher.is_active():
|
| 200 |
+
# Check early.
|
| 201 |
+
# Raising exception before process creation
|
| 202 |
+
# prevents subprocess execution if the watcher
|
| 203 |
+
# is not ready to handle it.
|
| 204 |
+
raise RuntimeError("asyncio.get_child_watcher() is not activated, "
|
| 205 |
+
"subprocess support is not installed.")
|
| 206 |
+
waiter = self.create_future()
|
| 207 |
+
transp = _UnixSubprocessTransport(self, protocol, args, shell,
|
| 208 |
+
stdin, stdout, stderr, bufsize,
|
| 209 |
+
waiter=waiter, extra=extra,
|
| 210 |
+
**kwargs)
|
| 211 |
+
|
| 212 |
+
watcher.add_child_handler(transp.get_pid(),
|
| 213 |
+
self._child_watcher_callback, transp)
|
| 214 |
+
try:
|
| 215 |
+
await waiter
|
| 216 |
+
except (SystemExit, KeyboardInterrupt):
|
| 217 |
+
raise
|
| 218 |
+
except BaseException:
|
| 219 |
+
transp.close()
|
| 220 |
+
await transp._wait()
|
| 221 |
+
raise
|
| 222 |
+
|
| 223 |
+
return transp
|
| 224 |
+
|
| 225 |
+
def _child_watcher_callback(self, pid, returncode, transp):
|
| 226 |
+
# Skip one iteration for callbacks to be executed
|
| 227 |
+
self.call_soon_threadsafe(self.call_soon, transp._process_exited, returncode)
|
| 228 |
+
|
| 229 |
+
async def create_unix_connection(
|
| 230 |
+
self, protocol_factory, path=None, *,
|
| 231 |
+
ssl=None, sock=None,
|
| 232 |
+
server_hostname=None,
|
| 233 |
+
ssl_handshake_timeout=None):
|
| 234 |
+
assert server_hostname is None or isinstance(server_hostname, str)
|
| 235 |
+
if ssl:
|
| 236 |
+
if server_hostname is None:
|
| 237 |
+
raise ValueError(
|
| 238 |
+
'you have to pass server_hostname when using ssl')
|
| 239 |
+
else:
|
| 240 |
+
if server_hostname is not None:
|
| 241 |
+
raise ValueError('server_hostname is only meaningful with ssl')
|
| 242 |
+
if ssl_handshake_timeout is not None:
|
| 243 |
+
raise ValueError(
|
| 244 |
+
'ssl_handshake_timeout is only meaningful with ssl')
|
| 245 |
+
|
| 246 |
+
if path is not None:
|
| 247 |
+
if sock is not None:
|
| 248 |
+
raise ValueError(
|
| 249 |
+
'path and sock can not be specified at the same time')
|
| 250 |
+
|
| 251 |
+
path = os.fspath(path)
|
| 252 |
+
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
|
| 253 |
+
try:
|
| 254 |
+
sock.setblocking(False)
|
| 255 |
+
await self.sock_connect(sock, path)
|
| 256 |
+
except:
|
| 257 |
+
sock.close()
|
| 258 |
+
raise
|
| 259 |
+
|
| 260 |
+
else:
|
| 261 |
+
if sock is None:
|
| 262 |
+
raise ValueError('no path and sock were specified')
|
| 263 |
+
if (sock.family != socket.AF_UNIX or
|
| 264 |
+
sock.type != socket.SOCK_STREAM):
|
| 265 |
+
raise ValueError(
|
| 266 |
+
f'A UNIX Domain Stream Socket was expected, got {sock!r}')
|
| 267 |
+
sock.setblocking(False)
|
| 268 |
+
|
| 269 |
+
transport, protocol = await self._create_connection_transport(
|
| 270 |
+
sock, protocol_factory, ssl, server_hostname,
|
| 271 |
+
ssl_handshake_timeout=ssl_handshake_timeout)
|
| 272 |
+
return transport, protocol
|
| 273 |
+
|
| 274 |
+
async def create_unix_server(
|
| 275 |
+
self, protocol_factory, path=None, *,
|
| 276 |
+
sock=None, backlog=100, ssl=None,
|
| 277 |
+
ssl_handshake_timeout=None,
|
| 278 |
+
start_serving=True):
|
| 279 |
+
if isinstance(ssl, bool):
|
| 280 |
+
raise TypeError('ssl argument must be an SSLContext or None')
|
| 281 |
+
|
| 282 |
+
if ssl_handshake_timeout is not None and not ssl:
|
| 283 |
+
raise ValueError(
|
| 284 |
+
'ssl_handshake_timeout is only meaningful with ssl')
|
| 285 |
+
|
| 286 |
+
if path is not None:
|
| 287 |
+
if sock is not None:
|
| 288 |
+
raise ValueError(
|
| 289 |
+
'path and sock can not be specified at the same time')
|
| 290 |
+
|
| 291 |
+
path = os.fspath(path)
|
| 292 |
+
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
| 293 |
+
|
| 294 |
+
# Check for abstract socket. `str` and `bytes` paths are supported.
|
| 295 |
+
if path[0] not in (0, '\x00'):
|
| 296 |
+
try:
|
| 297 |
+
if stat.S_ISSOCK(os.stat(path).st_mode):
|
| 298 |
+
os.remove(path)
|
| 299 |
+
except FileNotFoundError:
|
| 300 |
+
pass
|
| 301 |
+
except OSError as err:
|
| 302 |
+
# Directory may have permissions only to create socket.
|
| 303 |
+
logger.error('Unable to check or remove stale UNIX socket '
|
| 304 |
+
'%r: %r', path, err)
|
| 305 |
+
|
| 306 |
+
try:
|
| 307 |
+
sock.bind(path)
|
| 308 |
+
except OSError as exc:
|
| 309 |
+
sock.close()
|
| 310 |
+
if exc.errno == errno.EADDRINUSE:
|
| 311 |
+
# Let's improve the error message by adding
|
| 312 |
+
# with what exact address it occurs.
|
| 313 |
+
msg = f'Address {path!r} is already in use'
|
| 314 |
+
raise OSError(errno.EADDRINUSE, msg) from None
|
| 315 |
+
else:
|
| 316 |
+
raise
|
| 317 |
+
except:
|
| 318 |
+
sock.close()
|
| 319 |
+
raise
|
| 320 |
+
else:
|
| 321 |
+
if sock is None:
|
| 322 |
+
raise ValueError(
|
| 323 |
+
'path was not specified, and no sock specified')
|
| 324 |
+
|
| 325 |
+
if (sock.family != socket.AF_UNIX or
|
| 326 |
+
sock.type != socket.SOCK_STREAM):
|
| 327 |
+
raise ValueError(
|
| 328 |
+
f'A UNIX Domain Stream Socket was expected, got {sock!r}')
|
| 329 |
+
|
| 330 |
+
sock.setblocking(False)
|
| 331 |
+
server = base_events.Server(self, [sock], protocol_factory,
|
| 332 |
+
ssl, backlog, ssl_handshake_timeout)
|
| 333 |
+
if start_serving:
|
| 334 |
+
server._start_serving()
|
| 335 |
+
# Skip one loop iteration so that all 'loop.add_reader'
|
| 336 |
+
# go through.
|
| 337 |
+
await tasks.sleep(0)
|
| 338 |
+
|
| 339 |
+
return server
|
| 340 |
+
|
| 341 |
+
async def _sock_sendfile_native(self, sock, file, offset, count):
|
| 342 |
+
try:
|
| 343 |
+
os.sendfile
|
| 344 |
+
except AttributeError:
|
| 345 |
+
raise exceptions.SendfileNotAvailableError(
|
| 346 |
+
"os.sendfile() is not available")
|
| 347 |
+
try:
|
| 348 |
+
fileno = file.fileno()
|
| 349 |
+
except (AttributeError, io.UnsupportedOperation) as err:
|
| 350 |
+
raise exceptions.SendfileNotAvailableError("not a regular file")
|
| 351 |
+
try:
|
| 352 |
+
fsize = os.fstat(fileno).st_size
|
| 353 |
+
except OSError:
|
| 354 |
+
raise exceptions.SendfileNotAvailableError("not a regular file")
|
| 355 |
+
blocksize = count if count else fsize
|
| 356 |
+
if not blocksize:
|
| 357 |
+
return 0 # empty file
|
| 358 |
+
|
| 359 |
+
fut = self.create_future()
|
| 360 |
+
self._sock_sendfile_native_impl(fut, None, sock, fileno,
|
| 361 |
+
offset, count, blocksize, 0)
|
| 362 |
+
return await fut
|
| 363 |
+
|
| 364 |
+
def _sock_sendfile_native_impl(self, fut, registered_fd, sock, fileno,
|
| 365 |
+
offset, count, blocksize, total_sent):
|
| 366 |
+
fd = sock.fileno()
|
| 367 |
+
if registered_fd is not None:
|
| 368 |
+
# Remove the callback early. It should be rare that the
|
| 369 |
+
# selector says the fd is ready but the call still returns
|
| 370 |
+
# EAGAIN, and I am willing to take a hit in that case in
|
| 371 |
+
# order to simplify the common case.
|
| 372 |
+
self.remove_writer(registered_fd)
|
| 373 |
+
if fut.cancelled():
|
| 374 |
+
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
|
| 375 |
+
return
|
| 376 |
+
if count:
|
| 377 |
+
blocksize = count - total_sent
|
| 378 |
+
if blocksize <= 0:
|
| 379 |
+
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
|
| 380 |
+
fut.set_result(total_sent)
|
| 381 |
+
return
|
| 382 |
+
|
| 383 |
+
try:
|
| 384 |
+
sent = os.sendfile(fd, fileno, offset, blocksize)
|
| 385 |
+
except (BlockingIOError, InterruptedError):
|
| 386 |
+
if registered_fd is None:
|
| 387 |
+
self._sock_add_cancellation_callback(fut, sock)
|
| 388 |
+
self.add_writer(fd, self._sock_sendfile_native_impl, fut,
|
| 389 |
+
fd, sock, fileno,
|
| 390 |
+
offset, count, blocksize, total_sent)
|
| 391 |
+
except OSError as exc:
|
| 392 |
+
if (registered_fd is not None and
|
| 393 |
+
exc.errno == errno.ENOTCONN and
|
| 394 |
+
type(exc) is not ConnectionError):
|
| 395 |
+
# If we have an ENOTCONN and this isn't a first call to
|
| 396 |
+
# sendfile(), i.e. the connection was closed in the middle
|
| 397 |
+
# of the operation, normalize the error to ConnectionError
|
| 398 |
+
# to make it consistent across all Posix systems.
|
| 399 |
+
new_exc = ConnectionError(
|
| 400 |
+
"socket is not connected", errno.ENOTCONN)
|
| 401 |
+
new_exc.__cause__ = exc
|
| 402 |
+
exc = new_exc
|
| 403 |
+
if total_sent == 0:
|
| 404 |
+
# We can get here for different reasons, the main
|
| 405 |
+
# one being 'file' is not a regular mmap(2)-like
|
| 406 |
+
# file, in which case we'll fall back on using
|
| 407 |
+
# plain send().
|
| 408 |
+
err = exceptions.SendfileNotAvailableError(
|
| 409 |
+
"os.sendfile call failed")
|
| 410 |
+
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
|
| 411 |
+
fut.set_exception(err)
|
| 412 |
+
else:
|
| 413 |
+
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
|
| 414 |
+
fut.set_exception(exc)
|
| 415 |
+
except (SystemExit, KeyboardInterrupt):
|
| 416 |
+
raise
|
| 417 |
+
except BaseException as exc:
|
| 418 |
+
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
|
| 419 |
+
fut.set_exception(exc)
|
| 420 |
+
else:
|
| 421 |
+
if sent == 0:
|
| 422 |
+
# EOF
|
| 423 |
+
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
|
| 424 |
+
fut.set_result(total_sent)
|
| 425 |
+
else:
|
| 426 |
+
offset += sent
|
| 427 |
+
total_sent += sent
|
| 428 |
+
if registered_fd is None:
|
| 429 |
+
self._sock_add_cancellation_callback(fut, sock)
|
| 430 |
+
self.add_writer(fd, self._sock_sendfile_native_impl, fut,
|
| 431 |
+
fd, sock, fileno,
|
| 432 |
+
offset, count, blocksize, total_sent)
|
| 433 |
+
|
| 434 |
+
def _sock_sendfile_update_filepos(self, fileno, offset, total_sent):
|
| 435 |
+
if total_sent > 0:
|
| 436 |
+
os.lseek(fileno, offset, os.SEEK_SET)
|
| 437 |
+
|
| 438 |
+
def _sock_add_cancellation_callback(self, fut, sock):
|
| 439 |
+
def cb(fut):
|
| 440 |
+
if fut.cancelled():
|
| 441 |
+
fd = sock.fileno()
|
| 442 |
+
if fd != -1:
|
| 443 |
+
self.remove_writer(fd)
|
| 444 |
+
fut.add_done_callback(cb)
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
class _UnixReadPipeTransport(transports.ReadTransport):
|
| 448 |
+
|
| 449 |
+
max_size = 256 * 1024 # max bytes we read in one event loop iteration
|
| 450 |
+
|
| 451 |
+
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
|
| 452 |
+
super().__init__(extra)
|
| 453 |
+
self._extra['pipe'] = pipe
|
| 454 |
+
self._loop = loop
|
| 455 |
+
self._pipe = pipe
|
| 456 |
+
self._fileno = pipe.fileno()
|
| 457 |
+
self._protocol = protocol
|
| 458 |
+
self._closing = False
|
| 459 |
+
self._paused = False
|
| 460 |
+
|
| 461 |
+
mode = os.fstat(self._fileno).st_mode
|
| 462 |
+
if not (stat.S_ISFIFO(mode) or
|
| 463 |
+
stat.S_ISSOCK(mode) or
|
| 464 |
+
stat.S_ISCHR(mode)):
|
| 465 |
+
self._pipe = None
|
| 466 |
+
self._fileno = None
|
| 467 |
+
self._protocol = None
|
| 468 |
+
raise ValueError("Pipe transport is for pipes/sockets only.")
|
| 469 |
+
|
| 470 |
+
os.set_blocking(self._fileno, False)
|
| 471 |
+
|
| 472 |
+
self._loop.call_soon(self._protocol.connection_made, self)
|
| 473 |
+
# only start reading when connection_made() has been called
|
| 474 |
+
self._loop.call_soon(self._loop._add_reader,
|
| 475 |
+
self._fileno, self._read_ready)
|
| 476 |
+
if waiter is not None:
|
| 477 |
+
# only wake up the waiter when connection_made() has been called
|
| 478 |
+
self._loop.call_soon(futures._set_result_unless_cancelled,
|
| 479 |
+
waiter, None)
|
| 480 |
+
|
| 481 |
+
def __repr__(self):
|
| 482 |
+
info = [self.__class__.__name__]
|
| 483 |
+
if self._pipe is None:
|
| 484 |
+
info.append('closed')
|
| 485 |
+
elif self._closing:
|
| 486 |
+
info.append('closing')
|
| 487 |
+
info.append(f'fd={self._fileno}')
|
| 488 |
+
selector = getattr(self._loop, '_selector', None)
|
| 489 |
+
if self._pipe is not None and selector is not None:
|
| 490 |
+
polling = selector_events._test_selector_event(
|
| 491 |
+
selector, self._fileno, selectors.EVENT_READ)
|
| 492 |
+
if polling:
|
| 493 |
+
info.append('polling')
|
| 494 |
+
else:
|
| 495 |
+
info.append('idle')
|
| 496 |
+
elif self._pipe is not None:
|
| 497 |
+
info.append('open')
|
| 498 |
+
else:
|
| 499 |
+
info.append('closed')
|
| 500 |
+
return '<{}>'.format(' '.join(info))
|
| 501 |
+
|
| 502 |
+
def _read_ready(self):
|
| 503 |
+
try:
|
| 504 |
+
data = os.read(self._fileno, self.max_size)
|
| 505 |
+
except (BlockingIOError, InterruptedError):
|
| 506 |
+
pass
|
| 507 |
+
except OSError as exc:
|
| 508 |
+
self._fatal_error(exc, 'Fatal read error on pipe transport')
|
| 509 |
+
else:
|
| 510 |
+
if data:
|
| 511 |
+
self._protocol.data_received(data)
|
| 512 |
+
else:
|
| 513 |
+
if self._loop.get_debug():
|
| 514 |
+
logger.info("%r was closed by peer", self)
|
| 515 |
+
self._closing = True
|
| 516 |
+
self._loop._remove_reader(self._fileno)
|
| 517 |
+
self._loop.call_soon(self._protocol.eof_received)
|
| 518 |
+
self._loop.call_soon(self._call_connection_lost, None)
|
| 519 |
+
|
| 520 |
+
def pause_reading(self):
|
| 521 |
+
if self._closing or self._paused:
|
| 522 |
+
return
|
| 523 |
+
self._paused = True
|
| 524 |
+
self._loop._remove_reader(self._fileno)
|
| 525 |
+
if self._loop.get_debug():
|
| 526 |
+
logger.debug("%r pauses reading", self)
|
| 527 |
+
|
| 528 |
+
def resume_reading(self):
|
| 529 |
+
if self._closing or not self._paused:
|
| 530 |
+
return
|
| 531 |
+
self._paused = False
|
| 532 |
+
self._loop._add_reader(self._fileno, self._read_ready)
|
| 533 |
+
if self._loop.get_debug():
|
| 534 |
+
logger.debug("%r resumes reading", self)
|
| 535 |
+
|
| 536 |
+
def set_protocol(self, protocol):
|
| 537 |
+
self._protocol = protocol
|
| 538 |
+
|
| 539 |
+
def get_protocol(self):
|
| 540 |
+
return self._protocol
|
| 541 |
+
|
| 542 |
+
def is_closing(self):
|
| 543 |
+
return self._closing
|
| 544 |
+
|
| 545 |
+
def close(self):
|
| 546 |
+
if not self._closing:
|
| 547 |
+
self._close(None)
|
| 548 |
+
|
| 549 |
+
def __del__(self, _warn=warnings.warn):
|
| 550 |
+
if self._pipe is not None:
|
| 551 |
+
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
|
| 552 |
+
self._pipe.close()
|
| 553 |
+
|
| 554 |
+
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
|
| 555 |
+
# should be called by exception handler only
|
| 556 |
+
if (isinstance(exc, OSError) and exc.errno == errno.EIO):
|
| 557 |
+
if self._loop.get_debug():
|
| 558 |
+
logger.debug("%r: %s", self, message, exc_info=True)
|
| 559 |
+
else:
|
| 560 |
+
self._loop.call_exception_handler({
|
| 561 |
+
'message': message,
|
| 562 |
+
'exception': exc,
|
| 563 |
+
'transport': self,
|
| 564 |
+
'protocol': self._protocol,
|
| 565 |
+
})
|
| 566 |
+
self._close(exc)
|
| 567 |
+
|
| 568 |
+
def _close(self, exc):
|
| 569 |
+
self._closing = True
|
| 570 |
+
self._loop._remove_reader(self._fileno)
|
| 571 |
+
self._loop.call_soon(self._call_connection_lost, exc)
|
| 572 |
+
|
| 573 |
+
def _call_connection_lost(self, exc):
|
| 574 |
+
try:
|
| 575 |
+
self._protocol.connection_lost(exc)
|
| 576 |
+
finally:
|
| 577 |
+
self._pipe.close()
|
| 578 |
+
self._pipe = None
|
| 579 |
+
self._protocol = None
|
| 580 |
+
self._loop = None
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
class _UnixWritePipeTransport(transports._FlowControlMixin,
|
| 584 |
+
transports.WriteTransport):
|
| 585 |
+
|
| 586 |
+
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
|
| 587 |
+
super().__init__(extra, loop)
|
| 588 |
+
self._extra['pipe'] = pipe
|
| 589 |
+
self._pipe = pipe
|
| 590 |
+
self._fileno = pipe.fileno()
|
| 591 |
+
self._protocol = protocol
|
| 592 |
+
self._buffer = bytearray()
|
| 593 |
+
self._conn_lost = 0
|
| 594 |
+
self._closing = False # Set when close() or write_eof() called.
|
| 595 |
+
|
| 596 |
+
mode = os.fstat(self._fileno).st_mode
|
| 597 |
+
is_char = stat.S_ISCHR(mode)
|
| 598 |
+
is_fifo = stat.S_ISFIFO(mode)
|
| 599 |
+
is_socket = stat.S_ISSOCK(mode)
|
| 600 |
+
if not (is_char or is_fifo or is_socket):
|
| 601 |
+
self._pipe = None
|
| 602 |
+
self._fileno = None
|
| 603 |
+
self._protocol = None
|
| 604 |
+
raise ValueError("Pipe transport is only for "
|
| 605 |
+
"pipes, sockets and character devices")
|
| 606 |
+
|
| 607 |
+
os.set_blocking(self._fileno, False)
|
| 608 |
+
self._loop.call_soon(self._protocol.connection_made, self)
|
| 609 |
+
|
| 610 |
+
# On AIX, the reader trick (to be notified when the read end of the
|
| 611 |
+
# socket is closed) only works for sockets. On other platforms it
|
| 612 |
+
# works for pipes and sockets. (Exception: OS X 10.4? Issue #19294.)
|
| 613 |
+
if is_socket or (is_fifo and not sys.platform.startswith("aix")):
|
| 614 |
+
# only start reading when connection_made() has been called
|
| 615 |
+
self._loop.call_soon(self._loop._add_reader,
|
| 616 |
+
self._fileno, self._read_ready)
|
| 617 |
+
|
| 618 |
+
if waiter is not None:
|
| 619 |
+
# only wake up the waiter when connection_made() has been called
|
| 620 |
+
self._loop.call_soon(futures._set_result_unless_cancelled,
|
| 621 |
+
waiter, None)
|
| 622 |
+
|
| 623 |
+
def __repr__(self):
|
| 624 |
+
info = [self.__class__.__name__]
|
| 625 |
+
if self._pipe is None:
|
| 626 |
+
info.append('closed')
|
| 627 |
+
elif self._closing:
|
| 628 |
+
info.append('closing')
|
| 629 |
+
info.append(f'fd={self._fileno}')
|
| 630 |
+
selector = getattr(self._loop, '_selector', None)
|
| 631 |
+
if self._pipe is not None and selector is not None:
|
| 632 |
+
polling = selector_events._test_selector_event(
|
| 633 |
+
selector, self._fileno, selectors.EVENT_WRITE)
|
| 634 |
+
if polling:
|
| 635 |
+
info.append('polling')
|
| 636 |
+
else:
|
| 637 |
+
info.append('idle')
|
| 638 |
+
|
| 639 |
+
bufsize = self.get_write_buffer_size()
|
| 640 |
+
info.append(f'bufsize={bufsize}')
|
| 641 |
+
elif self._pipe is not None:
|
| 642 |
+
info.append('open')
|
| 643 |
+
else:
|
| 644 |
+
info.append('closed')
|
| 645 |
+
return '<{}>'.format(' '.join(info))
|
| 646 |
+
|
| 647 |
+
def get_write_buffer_size(self):
|
| 648 |
+
return len(self._buffer)
|
| 649 |
+
|
| 650 |
+
def _read_ready(self):
|
| 651 |
+
# Pipe was closed by peer.
|
| 652 |
+
if self._loop.get_debug():
|
| 653 |
+
logger.info("%r was closed by peer", self)
|
| 654 |
+
if self._buffer:
|
| 655 |
+
self._close(BrokenPipeError())
|
| 656 |
+
else:
|
| 657 |
+
self._close()
|
| 658 |
+
|
| 659 |
+
def write(self, data):
|
| 660 |
+
assert isinstance(data, (bytes, bytearray, memoryview)), repr(data)
|
| 661 |
+
if isinstance(data, bytearray):
|
| 662 |
+
data = memoryview(data)
|
| 663 |
+
if not data:
|
| 664 |
+
return
|
| 665 |
+
|
| 666 |
+
if self._conn_lost or self._closing:
|
| 667 |
+
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
|
| 668 |
+
logger.warning('pipe closed by peer or '
|
| 669 |
+
'os.write(pipe, data) raised exception.')
|
| 670 |
+
self._conn_lost += 1
|
| 671 |
+
return
|
| 672 |
+
|
| 673 |
+
if not self._buffer:
|
| 674 |
+
# Attempt to send it right away first.
|
| 675 |
+
try:
|
| 676 |
+
n = os.write(self._fileno, data)
|
| 677 |
+
except (BlockingIOError, InterruptedError):
|
| 678 |
+
n = 0
|
| 679 |
+
except (SystemExit, KeyboardInterrupt):
|
| 680 |
+
raise
|
| 681 |
+
except BaseException as exc:
|
| 682 |
+
self._conn_lost += 1
|
| 683 |
+
self._fatal_error(exc, 'Fatal write error on pipe transport')
|
| 684 |
+
return
|
| 685 |
+
if n == len(data):
|
| 686 |
+
return
|
| 687 |
+
elif n > 0:
|
| 688 |
+
data = memoryview(data)[n:]
|
| 689 |
+
self._loop._add_writer(self._fileno, self._write_ready)
|
| 690 |
+
|
| 691 |
+
self._buffer += data
|
| 692 |
+
self._maybe_pause_protocol()
|
| 693 |
+
|
| 694 |
+
def _write_ready(self):
|
| 695 |
+
assert self._buffer, 'Data should not be empty'
|
| 696 |
+
|
| 697 |
+
try:
|
| 698 |
+
n = os.write(self._fileno, self._buffer)
|
| 699 |
+
except (BlockingIOError, InterruptedError):
|
| 700 |
+
pass
|
| 701 |
+
except (SystemExit, KeyboardInterrupt):
|
| 702 |
+
raise
|
| 703 |
+
except BaseException as exc:
|
| 704 |
+
self._buffer.clear()
|
| 705 |
+
self._conn_lost += 1
|
| 706 |
+
# Remove writer here, _fatal_error() doesn't it
|
| 707 |
+
# because _buffer is empty.
|
| 708 |
+
self._loop._remove_writer(self._fileno)
|
| 709 |
+
self._fatal_error(exc, 'Fatal write error on pipe transport')
|
| 710 |
+
else:
|
| 711 |
+
if n == len(self._buffer):
|
| 712 |
+
self._buffer.clear()
|
| 713 |
+
self._loop._remove_writer(self._fileno)
|
| 714 |
+
self._maybe_resume_protocol() # May append to buffer.
|
| 715 |
+
if self._closing:
|
| 716 |
+
self._loop._remove_reader(self._fileno)
|
| 717 |
+
self._call_connection_lost(None)
|
| 718 |
+
return
|
| 719 |
+
elif n > 0:
|
| 720 |
+
del self._buffer[:n]
|
| 721 |
+
|
| 722 |
+
def can_write_eof(self):
|
| 723 |
+
return True
|
| 724 |
+
|
| 725 |
+
def write_eof(self):
|
| 726 |
+
if self._closing:
|
| 727 |
+
return
|
| 728 |
+
assert self._pipe
|
| 729 |
+
self._closing = True
|
| 730 |
+
if not self._buffer:
|
| 731 |
+
self._loop._remove_reader(self._fileno)
|
| 732 |
+
self._loop.call_soon(self._call_connection_lost, None)
|
| 733 |
+
|
| 734 |
+
def set_protocol(self, protocol):
|
| 735 |
+
self._protocol = protocol
|
| 736 |
+
|
| 737 |
+
def get_protocol(self):
|
| 738 |
+
return self._protocol
|
| 739 |
+
|
| 740 |
+
def is_closing(self):
|
| 741 |
+
return self._closing
|
| 742 |
+
|
| 743 |
+
def close(self):
|
| 744 |
+
if self._pipe is not None and not self._closing:
|
| 745 |
+
# write_eof is all what we needed to close the write pipe
|
| 746 |
+
self.write_eof()
|
| 747 |
+
|
| 748 |
+
def __del__(self, _warn=warnings.warn):
|
| 749 |
+
if self._pipe is not None:
|
| 750 |
+
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
|
| 751 |
+
self._pipe.close()
|
| 752 |
+
|
| 753 |
+
def abort(self):
|
| 754 |
+
self._close(None)
|
| 755 |
+
|
| 756 |
+
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
|
| 757 |
+
# should be called by exception handler only
|
| 758 |
+
if isinstance(exc, OSError):
|
| 759 |
+
if self._loop.get_debug():
|
| 760 |
+
logger.debug("%r: %s", self, message, exc_info=True)
|
| 761 |
+
else:
|
| 762 |
+
self._loop.call_exception_handler({
|
| 763 |
+
'message': message,
|
| 764 |
+
'exception': exc,
|
| 765 |
+
'transport': self,
|
| 766 |
+
'protocol': self._protocol,
|
| 767 |
+
})
|
| 768 |
+
self._close(exc)
|
| 769 |
+
|
| 770 |
+
def _close(self, exc=None):
|
| 771 |
+
self._closing = True
|
| 772 |
+
if self._buffer:
|
| 773 |
+
self._loop._remove_writer(self._fileno)
|
| 774 |
+
self._buffer.clear()
|
| 775 |
+
self._loop._remove_reader(self._fileno)
|
| 776 |
+
self._loop.call_soon(self._call_connection_lost, exc)
|
| 777 |
+
|
| 778 |
+
def _call_connection_lost(self, exc):
|
| 779 |
+
try:
|
| 780 |
+
self._protocol.connection_lost(exc)
|
| 781 |
+
finally:
|
| 782 |
+
self._pipe.close()
|
| 783 |
+
self._pipe = None
|
| 784 |
+
self._protocol = None
|
| 785 |
+
self._loop = None
|
| 786 |
+
|
| 787 |
+
|
| 788 |
+
class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport):
|
| 789 |
+
|
| 790 |
+
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
|
| 791 |
+
stdin_w = None
|
| 792 |
+
if stdin == subprocess.PIPE and sys.platform.startswith('aix'):
|
| 793 |
+
# Use a socket pair for stdin on AIX, since it does not
|
| 794 |
+
# support selecting read events on the write end of a
|
| 795 |
+
# socket (which we use in order to detect closing of the
|
| 796 |
+
# other end).
|
| 797 |
+
stdin, stdin_w = socket.socketpair()
|
| 798 |
+
try:
|
| 799 |
+
self._proc = subprocess.Popen(
|
| 800 |
+
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
|
| 801 |
+
universal_newlines=False, bufsize=bufsize, **kwargs)
|
| 802 |
+
if stdin_w is not None:
|
| 803 |
+
stdin.close()
|
| 804 |
+
self._proc.stdin = open(stdin_w.detach(), 'wb', buffering=bufsize)
|
| 805 |
+
stdin_w = None
|
| 806 |
+
finally:
|
| 807 |
+
if stdin_w is not None:
|
| 808 |
+
stdin.close()
|
| 809 |
+
stdin_w.close()
|
| 810 |
+
|
| 811 |
+
|
| 812 |
+
class AbstractChildWatcher:
|
| 813 |
+
"""Abstract base class for monitoring child processes.
|
| 814 |
+
|
| 815 |
+
Objects derived from this class monitor a collection of subprocesses and
|
| 816 |
+
report their termination or interruption by a signal.
|
| 817 |
+
|
| 818 |
+
New callbacks are registered with .add_child_handler(). Starting a new
|
| 819 |
+
process must be done within a 'with' block to allow the watcher to suspend
|
| 820 |
+
its activity until the new process if fully registered (this is needed to
|
| 821 |
+
prevent a race condition in some implementations).
|
| 822 |
+
|
| 823 |
+
Example:
|
| 824 |
+
with watcher:
|
| 825 |
+
proc = subprocess.Popen("sleep 1")
|
| 826 |
+
watcher.add_child_handler(proc.pid, callback)
|
| 827 |
+
|
| 828 |
+
Notes:
|
| 829 |
+
Implementations of this class must be thread-safe.
|
| 830 |
+
|
| 831 |
+
Since child watcher objects may catch the SIGCHLD signal and call
|
| 832 |
+
waitpid(-1), there should be only one active object per process.
|
| 833 |
+
"""
|
| 834 |
+
|
| 835 |
+
def add_child_handler(self, pid, callback, *args):
|
| 836 |
+
"""Register a new child handler.
|
| 837 |
+
|
| 838 |
+
Arrange for callback(pid, returncode, *args) to be called when
|
| 839 |
+
process 'pid' terminates. Specifying another callback for the same
|
| 840 |
+
process replaces the previous handler.
|
| 841 |
+
|
| 842 |
+
Note: callback() must be thread-safe.
|
| 843 |
+
"""
|
| 844 |
+
raise NotImplementedError()
|
| 845 |
+
|
| 846 |
+
def remove_child_handler(self, pid):
|
| 847 |
+
"""Removes the handler for process 'pid'.
|
| 848 |
+
|
| 849 |
+
The function returns True if the handler was successfully removed,
|
| 850 |
+
False if there was nothing to remove."""
|
| 851 |
+
|
| 852 |
+
raise NotImplementedError()
|
| 853 |
+
|
| 854 |
+
def attach_loop(self, loop):
|
| 855 |
+
"""Attach the watcher to an event loop.
|
| 856 |
+
|
| 857 |
+
If the watcher was previously attached to an event loop, then it is
|
| 858 |
+
first detached before attaching to the new loop.
|
| 859 |
+
|
| 860 |
+
Note: loop may be None.
|
| 861 |
+
"""
|
| 862 |
+
raise NotImplementedError()
|
| 863 |
+
|
| 864 |
+
def close(self):
|
| 865 |
+
"""Close the watcher.
|
| 866 |
+
|
| 867 |
+
This must be called to make sure that any underlying resource is freed.
|
| 868 |
+
"""
|
| 869 |
+
raise NotImplementedError()
|
| 870 |
+
|
| 871 |
+
def is_active(self):
|
| 872 |
+
"""Return ``True`` if the watcher is active and is used by the event loop.
|
| 873 |
+
|
| 874 |
+
Return True if the watcher is installed and ready to handle process exit
|
| 875 |
+
notifications.
|
| 876 |
+
|
| 877 |
+
"""
|
| 878 |
+
raise NotImplementedError()
|
| 879 |
+
|
| 880 |
+
def __enter__(self):
|
| 881 |
+
"""Enter the watcher's context and allow starting new processes
|
| 882 |
+
|
| 883 |
+
This function must return self"""
|
| 884 |
+
raise NotImplementedError()
|
| 885 |
+
|
| 886 |
+
def __exit__(self, a, b, c):
|
| 887 |
+
"""Exit the watcher's context"""
|
| 888 |
+
raise NotImplementedError()
|
| 889 |
+
|
| 890 |
+
|
| 891 |
+
class PidfdChildWatcher(AbstractChildWatcher):
|
| 892 |
+
"""Child watcher implementation using Linux's pid file descriptors.
|
| 893 |
+
|
| 894 |
+
This child watcher polls process file descriptors (pidfds) to await child
|
| 895 |
+
process termination. In some respects, PidfdChildWatcher is a "Goldilocks"
|
| 896 |
+
child watcher implementation. It doesn't require signals or threads, doesn't
|
| 897 |
+
interfere with any processes launched outside the event loop, and scales
|
| 898 |
+
linearly with the number of subprocesses launched by the event loop. The
|
| 899 |
+
main disadvantage is that pidfds are specific to Linux, and only work on
|
| 900 |
+
recent (5.3+) kernels.
|
| 901 |
+
"""
|
| 902 |
+
|
| 903 |
+
def __init__(self):
|
| 904 |
+
self._loop = None
|
| 905 |
+
self._callbacks = {}
|
| 906 |
+
|
| 907 |
+
def __enter__(self):
|
| 908 |
+
return self
|
| 909 |
+
|
| 910 |
+
def __exit__(self, exc_type, exc_value, exc_traceback):
|
| 911 |
+
pass
|
| 912 |
+
|
| 913 |
+
def is_active(self):
|
| 914 |
+
return self._loop is not None and self._loop.is_running()
|
| 915 |
+
|
| 916 |
+
def close(self):
|
| 917 |
+
self.attach_loop(None)
|
| 918 |
+
|
| 919 |
+
def attach_loop(self, loop):
|
| 920 |
+
if self._loop is not None and loop is None and self._callbacks:
|
| 921 |
+
warnings.warn(
|
| 922 |
+
'A loop is being detached '
|
| 923 |
+
'from a child watcher with pending handlers',
|
| 924 |
+
RuntimeWarning)
|
| 925 |
+
for pidfd, _, _ in self._callbacks.values():
|
| 926 |
+
self._loop._remove_reader(pidfd)
|
| 927 |
+
os.close(pidfd)
|
| 928 |
+
self._callbacks.clear()
|
| 929 |
+
self._loop = loop
|
| 930 |
+
|
| 931 |
+
def add_child_handler(self, pid, callback, *args):
|
| 932 |
+
existing = self._callbacks.get(pid)
|
| 933 |
+
if existing is not None:
|
| 934 |
+
self._callbacks[pid] = existing[0], callback, args
|
| 935 |
+
else:
|
| 936 |
+
pidfd = os.pidfd_open(pid)
|
| 937 |
+
self._loop._add_reader(pidfd, self._do_wait, pid)
|
| 938 |
+
self._callbacks[pid] = pidfd, callback, args
|
| 939 |
+
|
| 940 |
+
def _do_wait(self, pid):
|
| 941 |
+
pidfd, callback, args = self._callbacks.pop(pid)
|
| 942 |
+
self._loop._remove_reader(pidfd)
|
| 943 |
+
try:
|
| 944 |
+
_, status = os.waitpid(pid, 0)
|
| 945 |
+
except ChildProcessError:
|
| 946 |
+
# The child process is already reaped
|
| 947 |
+
# (may happen if waitpid() is called elsewhere).
|
| 948 |
+
returncode = 255
|
| 949 |
+
logger.warning(
|
| 950 |
+
"child process pid %d exit status already read: "
|
| 951 |
+
" will report returncode 255",
|
| 952 |
+
pid)
|
| 953 |
+
else:
|
| 954 |
+
returncode = waitstatus_to_exitcode(status)
|
| 955 |
+
|
| 956 |
+
os.close(pidfd)
|
| 957 |
+
callback(pid, returncode, *args)
|
| 958 |
+
|
| 959 |
+
def remove_child_handler(self, pid):
|
| 960 |
+
try:
|
| 961 |
+
pidfd, _, _ = self._callbacks.pop(pid)
|
| 962 |
+
except KeyError:
|
| 963 |
+
return False
|
| 964 |
+
self._loop._remove_reader(pidfd)
|
| 965 |
+
os.close(pidfd)
|
| 966 |
+
return True
|
| 967 |
+
|
| 968 |
+
|
| 969 |
+
class BaseChildWatcher(AbstractChildWatcher):
|
| 970 |
+
|
| 971 |
+
def __init__(self):
|
| 972 |
+
self._loop = None
|
| 973 |
+
self._callbacks = {}
|
| 974 |
+
|
| 975 |
+
def close(self):
|
| 976 |
+
self.attach_loop(None)
|
| 977 |
+
|
| 978 |
+
def is_active(self):
|
| 979 |
+
return self._loop is not None and self._loop.is_running()
|
| 980 |
+
|
| 981 |
+
def _do_waitpid(self, expected_pid):
|
| 982 |
+
raise NotImplementedError()
|
| 983 |
+
|
| 984 |
+
def _do_waitpid_all(self):
|
| 985 |
+
raise NotImplementedError()
|
| 986 |
+
|
| 987 |
+
def attach_loop(self, loop):
|
| 988 |
+
assert loop is None or isinstance(loop, events.AbstractEventLoop)
|
| 989 |
+
|
| 990 |
+
if self._loop is not None and loop is None and self._callbacks:
|
| 991 |
+
warnings.warn(
|
| 992 |
+
'A loop is being detached '
|
| 993 |
+
'from a child watcher with pending handlers',
|
| 994 |
+
RuntimeWarning)
|
| 995 |
+
|
| 996 |
+
if self._loop is not None:
|
| 997 |
+
self._loop.remove_signal_handler(signal.SIGCHLD)
|
| 998 |
+
|
| 999 |
+
self._loop = loop
|
| 1000 |
+
if loop is not None:
|
| 1001 |
+
loop.add_signal_handler(signal.SIGCHLD, self._sig_chld)
|
| 1002 |
+
|
| 1003 |
+
# Prevent a race condition in case a child terminated
|
| 1004 |
+
# during the switch.
|
| 1005 |
+
self._do_waitpid_all()
|
| 1006 |
+
|
| 1007 |
+
def _sig_chld(self):
|
| 1008 |
+
try:
|
| 1009 |
+
self._do_waitpid_all()
|
| 1010 |
+
except (SystemExit, KeyboardInterrupt):
|
| 1011 |
+
raise
|
| 1012 |
+
except BaseException as exc:
|
| 1013 |
+
# self._loop should always be available here
|
| 1014 |
+
# as '_sig_chld' is added as a signal handler
|
| 1015 |
+
# in 'attach_loop'
|
| 1016 |
+
self._loop.call_exception_handler({
|
| 1017 |
+
'message': 'Unknown exception in SIGCHLD handler',
|
| 1018 |
+
'exception': exc,
|
| 1019 |
+
})
|
| 1020 |
+
|
| 1021 |
+
|
| 1022 |
+
class SafeChildWatcher(BaseChildWatcher):
|
| 1023 |
+
"""'Safe' child watcher implementation.
|
| 1024 |
+
|
| 1025 |
+
This implementation avoids disrupting other code spawning processes by
|
| 1026 |
+
polling explicitly each process in the SIGCHLD handler instead of calling
|
| 1027 |
+
os.waitpid(-1).
|
| 1028 |
+
|
| 1029 |
+
This is a safe solution but it has a significant overhead when handling a
|
| 1030 |
+
big number of children (O(n) each time SIGCHLD is raised)
|
| 1031 |
+
"""
|
| 1032 |
+
|
| 1033 |
+
def close(self):
|
| 1034 |
+
self._callbacks.clear()
|
| 1035 |
+
super().close()
|
| 1036 |
+
|
| 1037 |
+
def __enter__(self):
|
| 1038 |
+
return self
|
| 1039 |
+
|
| 1040 |
+
def __exit__(self, a, b, c):
|
| 1041 |
+
pass
|
| 1042 |
+
|
| 1043 |
+
def add_child_handler(self, pid, callback, *args):
|
| 1044 |
+
self._callbacks[pid] = (callback, args)
|
| 1045 |
+
|
| 1046 |
+
# Prevent a race condition in case the child is already terminated.
|
| 1047 |
+
self._do_waitpid(pid)
|
| 1048 |
+
|
| 1049 |
+
def remove_child_handler(self, pid):
|
| 1050 |
+
try:
|
| 1051 |
+
del self._callbacks[pid]
|
| 1052 |
+
return True
|
| 1053 |
+
except KeyError:
|
| 1054 |
+
return False
|
| 1055 |
+
|
| 1056 |
+
def _do_waitpid_all(self):
|
| 1057 |
+
|
| 1058 |
+
for pid in list(self._callbacks):
|
| 1059 |
+
self._do_waitpid(pid)
|
| 1060 |
+
|
| 1061 |
+
def _do_waitpid(self, expected_pid):
|
| 1062 |
+
assert expected_pid > 0
|
| 1063 |
+
|
| 1064 |
+
try:
|
| 1065 |
+
pid, status = os.waitpid(expected_pid, os.WNOHANG)
|
| 1066 |
+
except ChildProcessError:
|
| 1067 |
+
# The child process is already reaped
|
| 1068 |
+
# (may happen if waitpid() is called elsewhere).
|
| 1069 |
+
pid = expected_pid
|
| 1070 |
+
returncode = 255
|
| 1071 |
+
logger.warning(
|
| 1072 |
+
"Unknown child process pid %d, will report returncode 255",
|
| 1073 |
+
pid)
|
| 1074 |
+
else:
|
| 1075 |
+
if pid == 0:
|
| 1076 |
+
# The child process is still alive.
|
| 1077 |
+
return
|
| 1078 |
+
|
| 1079 |
+
returncode = waitstatus_to_exitcode(status)
|
| 1080 |
+
if self._loop.get_debug():
|
| 1081 |
+
logger.debug('process %s exited with returncode %s',
|
| 1082 |
+
expected_pid, returncode)
|
| 1083 |
+
|
| 1084 |
+
try:
|
| 1085 |
+
callback, args = self._callbacks.pop(pid)
|
| 1086 |
+
except KeyError: # pragma: no cover
|
| 1087 |
+
# May happen if .remove_child_handler() is called
|
| 1088 |
+
# after os.waitpid() returns.
|
| 1089 |
+
if self._loop.get_debug():
|
| 1090 |
+
logger.warning("Child watcher got an unexpected pid: %r",
|
| 1091 |
+
pid, exc_info=True)
|
| 1092 |
+
else:
|
| 1093 |
+
callback(pid, returncode, *args)
|
| 1094 |
+
|
| 1095 |
+
|
| 1096 |
+
class FastChildWatcher(BaseChildWatcher):
|
| 1097 |
+
"""'Fast' child watcher implementation.
|
| 1098 |
+
|
| 1099 |
+
This implementation reaps every terminated processes by calling
|
| 1100 |
+
os.waitpid(-1) directly, possibly breaking other code spawning processes
|
| 1101 |
+
and waiting for their termination.
|
| 1102 |
+
|
| 1103 |
+
There is no noticeable overhead when handling a big number of children
|
| 1104 |
+
(O(1) each time a child terminates).
|
| 1105 |
+
"""
|
| 1106 |
+
def __init__(self):
|
| 1107 |
+
super().__init__()
|
| 1108 |
+
self._lock = threading.Lock()
|
| 1109 |
+
self._zombies = {}
|
| 1110 |
+
self._forks = 0
|
| 1111 |
+
|
| 1112 |
+
def close(self):
|
| 1113 |
+
self._callbacks.clear()
|
| 1114 |
+
self._zombies.clear()
|
| 1115 |
+
super().close()
|
| 1116 |
+
|
| 1117 |
+
def __enter__(self):
|
| 1118 |
+
with self._lock:
|
| 1119 |
+
self._forks += 1
|
| 1120 |
+
|
| 1121 |
+
return self
|
| 1122 |
+
|
| 1123 |
+
def __exit__(self, a, b, c):
|
| 1124 |
+
with self._lock:
|
| 1125 |
+
self._forks -= 1
|
| 1126 |
+
|
| 1127 |
+
if self._forks or not self._zombies:
|
| 1128 |
+
return
|
| 1129 |
+
|
| 1130 |
+
collateral_victims = str(self._zombies)
|
| 1131 |
+
self._zombies.clear()
|
| 1132 |
+
|
| 1133 |
+
logger.warning(
|
| 1134 |
+
"Caught subprocesses termination from unknown pids: %s",
|
| 1135 |
+
collateral_victims)
|
| 1136 |
+
|
| 1137 |
+
def add_child_handler(self, pid, callback, *args):
|
| 1138 |
+
assert self._forks, "Must use the context manager"
|
| 1139 |
+
|
| 1140 |
+
with self._lock:
|
| 1141 |
+
try:
|
| 1142 |
+
returncode = self._zombies.pop(pid)
|
| 1143 |
+
except KeyError:
|
| 1144 |
+
# The child is running.
|
| 1145 |
+
self._callbacks[pid] = callback, args
|
| 1146 |
+
return
|
| 1147 |
+
|
| 1148 |
+
# The child is dead already. We can fire the callback.
|
| 1149 |
+
callback(pid, returncode, *args)
|
| 1150 |
+
|
| 1151 |
+
def remove_child_handler(self, pid):
|
| 1152 |
+
try:
|
| 1153 |
+
del self._callbacks[pid]
|
| 1154 |
+
return True
|
| 1155 |
+
except KeyError:
|
| 1156 |
+
return False
|
| 1157 |
+
|
| 1158 |
+
def _do_waitpid_all(self):
|
| 1159 |
+
# Because of signal coalescing, we must keep calling waitpid() as
|
| 1160 |
+
# long as we're able to reap a child.
|
| 1161 |
+
while True:
|
| 1162 |
+
try:
|
| 1163 |
+
pid, status = os.waitpid(-1, os.WNOHANG)
|
| 1164 |
+
except ChildProcessError:
|
| 1165 |
+
# No more child processes exist.
|
| 1166 |
+
return
|
| 1167 |
+
else:
|
| 1168 |
+
if pid == 0:
|
| 1169 |
+
# A child process is still alive.
|
| 1170 |
+
return
|
| 1171 |
+
|
| 1172 |
+
returncode = waitstatus_to_exitcode(status)
|
| 1173 |
+
|
| 1174 |
+
with self._lock:
|
| 1175 |
+
try:
|
| 1176 |
+
callback, args = self._callbacks.pop(pid)
|
| 1177 |
+
except KeyError:
|
| 1178 |
+
# unknown child
|
| 1179 |
+
if self._forks:
|
| 1180 |
+
# It may not be registered yet.
|
| 1181 |
+
self._zombies[pid] = returncode
|
| 1182 |
+
if self._loop.get_debug():
|
| 1183 |
+
logger.debug('unknown process %s exited '
|
| 1184 |
+
'with returncode %s',
|
| 1185 |
+
pid, returncode)
|
| 1186 |
+
continue
|
| 1187 |
+
callback = None
|
| 1188 |
+
else:
|
| 1189 |
+
if self._loop.get_debug():
|
| 1190 |
+
logger.debug('process %s exited with returncode %s',
|
| 1191 |
+
pid, returncode)
|
| 1192 |
+
|
| 1193 |
+
if callback is None:
|
| 1194 |
+
logger.warning(
|
| 1195 |
+
"Caught subprocess termination from unknown pid: "
|
| 1196 |
+
"%d -> %d", pid, returncode)
|
| 1197 |
+
else:
|
| 1198 |
+
callback(pid, returncode, *args)
|
| 1199 |
+
|
| 1200 |
+
|
| 1201 |
+
class MultiLoopChildWatcher(AbstractChildWatcher):
|
| 1202 |
+
"""A watcher that doesn't require running loop in the main thread.
|
| 1203 |
+
|
| 1204 |
+
This implementation registers a SIGCHLD signal handler on
|
| 1205 |
+
instantiation (which may conflict with other code that
|
| 1206 |
+
install own handler for this signal).
|
| 1207 |
+
|
| 1208 |
+
The solution is safe but it has a significant overhead when
|
| 1209 |
+
handling a big number of processes (*O(n)* each time a
|
| 1210 |
+
SIGCHLD is received).
|
| 1211 |
+
"""
|
| 1212 |
+
|
| 1213 |
+
# Implementation note:
|
| 1214 |
+
# The class keeps compatibility with AbstractChildWatcher ABC
|
| 1215 |
+
# To achieve this it has empty attach_loop() method
|
| 1216 |
+
# and doesn't accept explicit loop argument
|
| 1217 |
+
# for add_child_handler()/remove_child_handler()
|
| 1218 |
+
# but retrieves the current loop by get_running_loop()
|
| 1219 |
+
|
| 1220 |
+
def __init__(self):
|
| 1221 |
+
self._callbacks = {}
|
| 1222 |
+
self._saved_sighandler = None
|
| 1223 |
+
|
| 1224 |
+
def is_active(self):
|
| 1225 |
+
return self._saved_sighandler is not None
|
| 1226 |
+
|
| 1227 |
+
def close(self):
|
| 1228 |
+
self._callbacks.clear()
|
| 1229 |
+
if self._saved_sighandler is None:
|
| 1230 |
+
return
|
| 1231 |
+
|
| 1232 |
+
handler = signal.getsignal(signal.SIGCHLD)
|
| 1233 |
+
if handler != self._sig_chld:
|
| 1234 |
+
logger.warning("SIGCHLD handler was changed by outside code")
|
| 1235 |
+
else:
|
| 1236 |
+
signal.signal(signal.SIGCHLD, self._saved_sighandler)
|
| 1237 |
+
self._saved_sighandler = None
|
| 1238 |
+
|
| 1239 |
+
def __enter__(self):
|
| 1240 |
+
return self
|
| 1241 |
+
|
| 1242 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 1243 |
+
pass
|
| 1244 |
+
|
| 1245 |
+
def add_child_handler(self, pid, callback, *args):
|
| 1246 |
+
loop = events.get_running_loop()
|
| 1247 |
+
self._callbacks[pid] = (loop, callback, args)
|
| 1248 |
+
|
| 1249 |
+
# Prevent a race condition in case the child is already terminated.
|
| 1250 |
+
self._do_waitpid(pid)
|
| 1251 |
+
|
| 1252 |
+
def remove_child_handler(self, pid):
|
| 1253 |
+
try:
|
| 1254 |
+
del self._callbacks[pid]
|
| 1255 |
+
return True
|
| 1256 |
+
except KeyError:
|
| 1257 |
+
return False
|
| 1258 |
+
|
| 1259 |
+
def attach_loop(self, loop):
|
| 1260 |
+
# Don't save the loop but initialize itself if called first time
|
| 1261 |
+
# The reason to do it here is that attach_loop() is called from
|
| 1262 |
+
# unix policy only for the main thread.
|
| 1263 |
+
# Main thread is required for subscription on SIGCHLD signal
|
| 1264 |
+
if self._saved_sighandler is not None:
|
| 1265 |
+
return
|
| 1266 |
+
|
| 1267 |
+
self._saved_sighandler = signal.signal(signal.SIGCHLD, self._sig_chld)
|
| 1268 |
+
if self._saved_sighandler is None:
|
| 1269 |
+
logger.warning("Previous SIGCHLD handler was set by non-Python code, "
|
| 1270 |
+
"restore to default handler on watcher close.")
|
| 1271 |
+
self._saved_sighandler = signal.SIG_DFL
|
| 1272 |
+
|
| 1273 |
+
# Set SA_RESTART to limit EINTR occurrences.
|
| 1274 |
+
signal.siginterrupt(signal.SIGCHLD, False)
|
| 1275 |
+
|
| 1276 |
+
def _do_waitpid_all(self):
|
| 1277 |
+
for pid in list(self._callbacks):
|
| 1278 |
+
self._do_waitpid(pid)
|
| 1279 |
+
|
| 1280 |
+
def _do_waitpid(self, expected_pid):
|
| 1281 |
+
assert expected_pid > 0
|
| 1282 |
+
|
| 1283 |
+
try:
|
| 1284 |
+
pid, status = os.waitpid(expected_pid, os.WNOHANG)
|
| 1285 |
+
except ChildProcessError:
|
| 1286 |
+
# The child process is already reaped
|
| 1287 |
+
# (may happen if waitpid() is called elsewhere).
|
| 1288 |
+
pid = expected_pid
|
| 1289 |
+
returncode = 255
|
| 1290 |
+
logger.warning(
|
| 1291 |
+
"Unknown child process pid %d, will report returncode 255",
|
| 1292 |
+
pid)
|
| 1293 |
+
debug_log = False
|
| 1294 |
+
else:
|
| 1295 |
+
if pid == 0:
|
| 1296 |
+
# The child process is still alive.
|
| 1297 |
+
return
|
| 1298 |
+
|
| 1299 |
+
returncode = waitstatus_to_exitcode(status)
|
| 1300 |
+
debug_log = True
|
| 1301 |
+
try:
|
| 1302 |
+
loop, callback, args = self._callbacks.pop(pid)
|
| 1303 |
+
except KeyError: # pragma: no cover
|
| 1304 |
+
# May happen if .remove_child_handler() is called
|
| 1305 |
+
# after os.waitpid() returns.
|
| 1306 |
+
logger.warning("Child watcher got an unexpected pid: %r",
|
| 1307 |
+
pid, exc_info=True)
|
| 1308 |
+
else:
|
| 1309 |
+
if loop.is_closed():
|
| 1310 |
+
logger.warning("Loop %r that handles pid %r is closed", loop, pid)
|
| 1311 |
+
else:
|
| 1312 |
+
if debug_log and loop.get_debug():
|
| 1313 |
+
logger.debug('process %s exited with returncode %s',
|
| 1314 |
+
expected_pid, returncode)
|
| 1315 |
+
loop.call_soon_threadsafe(callback, pid, returncode, *args)
|
| 1316 |
+
|
| 1317 |
+
def _sig_chld(self, signum, frame):
|
| 1318 |
+
try:
|
| 1319 |
+
self._do_waitpid_all()
|
| 1320 |
+
except (SystemExit, KeyboardInterrupt):
|
| 1321 |
+
raise
|
| 1322 |
+
except BaseException:
|
| 1323 |
+
logger.warning('Unknown exception in SIGCHLD handler', exc_info=True)
|
| 1324 |
+
|
| 1325 |
+
|
| 1326 |
+
class ThreadedChildWatcher(AbstractChildWatcher):
|
| 1327 |
+
"""Threaded child watcher implementation.
|
| 1328 |
+
|
| 1329 |
+
The watcher uses a thread per process
|
| 1330 |
+
for waiting for the process finish.
|
| 1331 |
+
|
| 1332 |
+
It doesn't require subscription on POSIX signal
|
| 1333 |
+
but a thread creation is not free.
|
| 1334 |
+
|
| 1335 |
+
The watcher has O(1) complexity, its performance doesn't depend
|
| 1336 |
+
on amount of spawn processes.
|
| 1337 |
+
"""
|
| 1338 |
+
|
| 1339 |
+
def __init__(self):
|
| 1340 |
+
self._pid_counter = itertools.count(0)
|
| 1341 |
+
self._threads = {}
|
| 1342 |
+
|
| 1343 |
+
def is_active(self):
|
| 1344 |
+
return True
|
| 1345 |
+
|
| 1346 |
+
def close(self):
|
| 1347 |
+
self._join_threads()
|
| 1348 |
+
|
| 1349 |
+
def _join_threads(self):
|
| 1350 |
+
"""Internal: Join all non-daemon threads"""
|
| 1351 |
+
threads = [thread for thread in list(self._threads.values())
|
| 1352 |
+
if thread.is_alive() and not thread.daemon]
|
| 1353 |
+
for thread in threads:
|
| 1354 |
+
thread.join()
|
| 1355 |
+
|
| 1356 |
+
def __enter__(self):
|
| 1357 |
+
return self
|
| 1358 |
+
|
| 1359 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 1360 |
+
pass
|
| 1361 |
+
|
| 1362 |
+
def __del__(self, _warn=warnings.warn):
|
| 1363 |
+
threads = [thread for thread in list(self._threads.values())
|
| 1364 |
+
if thread.is_alive()]
|
| 1365 |
+
if threads:
|
| 1366 |
+
_warn(f"{self.__class__} has registered but not finished child processes",
|
| 1367 |
+
ResourceWarning,
|
| 1368 |
+
source=self)
|
| 1369 |
+
|
| 1370 |
+
def add_child_handler(self, pid, callback, *args):
|
| 1371 |
+
loop = events.get_running_loop()
|
| 1372 |
+
thread = threading.Thread(target=self._do_waitpid,
|
| 1373 |
+
name=f"waitpid-{next(self._pid_counter)}",
|
| 1374 |
+
args=(loop, pid, callback, args),
|
| 1375 |
+
daemon=True)
|
| 1376 |
+
self._threads[pid] = thread
|
| 1377 |
+
thread.start()
|
| 1378 |
+
|
| 1379 |
+
def remove_child_handler(self, pid):
|
| 1380 |
+
# asyncio never calls remove_child_handler() !!!
|
| 1381 |
+
# The method is no-op but is implemented because
|
| 1382 |
+
# abstract base classes require it.
|
| 1383 |
+
return True
|
| 1384 |
+
|
| 1385 |
+
def attach_loop(self, loop):
|
| 1386 |
+
pass
|
| 1387 |
+
|
| 1388 |
+
def _do_waitpid(self, loop, expected_pid, callback, args):
|
| 1389 |
+
assert expected_pid > 0
|
| 1390 |
+
|
| 1391 |
+
try:
|
| 1392 |
+
pid, status = os.waitpid(expected_pid, 0)
|
| 1393 |
+
except ChildProcessError:
|
| 1394 |
+
# The child process is already reaped
|
| 1395 |
+
# (may happen if waitpid() is called elsewhere).
|
| 1396 |
+
pid = expected_pid
|
| 1397 |
+
returncode = 255
|
| 1398 |
+
logger.warning(
|
| 1399 |
+
"Unknown child process pid %d, will report returncode 255",
|
| 1400 |
+
pid)
|
| 1401 |
+
else:
|
| 1402 |
+
returncode = waitstatus_to_exitcode(status)
|
| 1403 |
+
if loop.get_debug():
|
| 1404 |
+
logger.debug('process %s exited with returncode %s',
|
| 1405 |
+
expected_pid, returncode)
|
| 1406 |
+
|
| 1407 |
+
if loop.is_closed():
|
| 1408 |
+
logger.warning("Loop %r that handles pid %r is closed", loop, pid)
|
| 1409 |
+
else:
|
| 1410 |
+
loop.call_soon_threadsafe(callback, pid, returncode, *args)
|
| 1411 |
+
|
| 1412 |
+
self._threads.pop(expected_pid)
|
| 1413 |
+
|
| 1414 |
+
|
| 1415 |
+
class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
|
| 1416 |
+
"""UNIX event loop policy with a watcher for child processes."""
|
| 1417 |
+
_loop_factory = _UnixSelectorEventLoop
|
| 1418 |
+
|
| 1419 |
+
def __init__(self):
|
| 1420 |
+
super().__init__()
|
| 1421 |
+
self._watcher = None
|
| 1422 |
+
|
| 1423 |
+
def _init_watcher(self):
|
| 1424 |
+
with events._lock:
|
| 1425 |
+
if self._watcher is None: # pragma: no branch
|
| 1426 |
+
self._watcher = ThreadedChildWatcher()
|
| 1427 |
+
if threading.current_thread() is threading.main_thread():
|
| 1428 |
+
self._watcher.attach_loop(self._local._loop)
|
| 1429 |
+
|
| 1430 |
+
def set_event_loop(self, loop):
|
| 1431 |
+
"""Set the event loop.
|
| 1432 |
+
|
| 1433 |
+
As a side effect, if a child watcher was set before, then calling
|
| 1434 |
+
.set_event_loop() from the main thread will call .attach_loop(loop) on
|
| 1435 |
+
the child watcher.
|
| 1436 |
+
"""
|
| 1437 |
+
|
| 1438 |
+
super().set_event_loop(loop)
|
| 1439 |
+
|
| 1440 |
+
if (self._watcher is not None and
|
| 1441 |
+
threading.current_thread() is threading.main_thread()):
|
| 1442 |
+
self._watcher.attach_loop(loop)
|
| 1443 |
+
|
| 1444 |
+
def get_child_watcher(self):
|
| 1445 |
+
"""Get the watcher for child processes.
|
| 1446 |
+
|
| 1447 |
+
If not yet set, a ThreadedChildWatcher object is automatically created.
|
| 1448 |
+
"""
|
| 1449 |
+
if self._watcher is None:
|
| 1450 |
+
self._init_watcher()
|
| 1451 |
+
|
| 1452 |
+
return self._watcher
|
| 1453 |
+
|
| 1454 |
+
def set_child_watcher(self, watcher):
|
| 1455 |
+
"""Set the watcher for child processes."""
|
| 1456 |
+
|
| 1457 |
+
assert watcher is None or isinstance(watcher, AbstractChildWatcher)
|
| 1458 |
+
|
| 1459 |
+
if self._watcher is not None:
|
| 1460 |
+
self._watcher.close()
|
| 1461 |
+
|
| 1462 |
+
self._watcher = watcher
|
| 1463 |
+
|
| 1464 |
+
|
| 1465 |
+
SelectorEventLoop = _UnixSelectorEventLoop
|
| 1466 |
+
DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy
|
parrot/lib/python3.10/html/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (3.32 kB). View file
|
|
|
parrot/lib/python3.10/pydoc_data/__init__.py
ADDED
|
File without changes
|
parrot/lib/python3.10/pydoc_data/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (389 Bytes). View file
|
|
|
parrot/lib/python3.10/pydoc_data/_pydoc.css
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
CSS file for pydoc.
|
| 3 |
+
|
| 4 |
+
Contents of this file are subject to change without notice.
|
| 5 |
+
|
| 6 |
+
*/
|
parrot/lib/python3.10/unittest/__main__.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Main entry point"""
|
| 2 |
+
|
| 3 |
+
import sys
|
| 4 |
+
if sys.argv[0].endswith("__main__.py"):
|
| 5 |
+
import os.path
|
| 6 |
+
# We change sys.argv[0] to make help message more useful
|
| 7 |
+
# use executable without path, unquoted
|
| 8 |
+
# (it's just a hint anyway)
|
| 9 |
+
# (if you have spaces in your executable you get what you deserve!)
|
| 10 |
+
executable = os.path.basename(sys.executable)
|
| 11 |
+
sys.argv[0] = executable + " -m unittest"
|
| 12 |
+
del os
|
| 13 |
+
|
| 14 |
+
__unittest = True
|
| 15 |
+
|
| 16 |
+
from .main import main
|
| 17 |
+
|
| 18 |
+
main(module=None)
|
parrot/lib/python3.10/unittest/__pycache__/__main__.cpython-310.pyc
ADDED
|
Binary file (397 Bytes). View file
|
|
|
parrot/lib/python3.10/unittest/__pycache__/async_case.cpython-310.pyc
ADDED
|
Binary file (4.74 kB). View file
|
|
|
parrot/lib/python3.10/unittest/__pycache__/loader.cpython-310.pyc
ADDED
|
Binary file (14.7 kB). View file
|
|
|
parrot/lib/python3.10/unittest/__pycache__/mock.cpython-310.pyc
ADDED
|
Binary file (79.8 kB). View file
|
|
|
parrot/lib/python3.10/unittest/__pycache__/result.cpython-310.pyc
ADDED
|
Binary file (8.29 kB). View file
|
|
|
parrot/lib/python3.10/unittest/__pycache__/runner.cpython-310.pyc
ADDED
|
Binary file (7.21 kB). View file
|
|
|
parrot/lib/python3.10/unittest/__pycache__/signals.cpython-310.pyc
ADDED
|
Binary file (2.51 kB). View file
|
|
|
parrot/lib/python3.10/unittest/async_case.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import inspect
|
| 3 |
+
|
| 4 |
+
from .case import TestCase
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class IsolatedAsyncioTestCase(TestCase):
|
| 8 |
+
# Names intentionally have a long prefix
|
| 9 |
+
# to reduce a chance of clashing with user-defined attributes
|
| 10 |
+
# from inherited test case
|
| 11 |
+
#
|
| 12 |
+
# The class doesn't call loop.run_until_complete(self.setUp()) and family
|
| 13 |
+
# but uses a different approach:
|
| 14 |
+
# 1. create a long-running task that reads self.setUp()
|
| 15 |
+
# awaitable from queue along with a future
|
| 16 |
+
# 2. await the awaitable object passing in and set the result
|
| 17 |
+
# into the future object
|
| 18 |
+
# 3. Outer code puts the awaitable and the future object into a queue
|
| 19 |
+
# with waiting for the future
|
| 20 |
+
# The trick is necessary because every run_until_complete() call
|
| 21 |
+
# creates a new task with embedded ContextVar context.
|
| 22 |
+
# To share contextvars between setUp(), test and tearDown() we need to execute
|
| 23 |
+
# them inside the same task.
|
| 24 |
+
|
| 25 |
+
# Note: the test case modifies event loop policy if the policy was not instantiated
|
| 26 |
+
# yet.
|
| 27 |
+
# asyncio.get_event_loop_policy() creates a default policy on demand but never
|
| 28 |
+
# returns None
|
| 29 |
+
# I believe this is not an issue in user level tests but python itself for testing
|
| 30 |
+
# should reset a policy in every test module
|
| 31 |
+
# by calling asyncio.set_event_loop_policy(None) in tearDownModule()
|
| 32 |
+
|
| 33 |
+
def __init__(self, methodName='runTest'):
|
| 34 |
+
super().__init__(methodName)
|
| 35 |
+
self._asyncioTestLoop = None
|
| 36 |
+
self._asyncioCallsQueue = None
|
| 37 |
+
|
| 38 |
+
async def asyncSetUp(self):
|
| 39 |
+
pass
|
| 40 |
+
|
| 41 |
+
async def asyncTearDown(self):
|
| 42 |
+
pass
|
| 43 |
+
|
| 44 |
+
def addAsyncCleanup(self, func, /, *args, **kwargs):
|
| 45 |
+
# A trivial trampoline to addCleanup()
|
| 46 |
+
# the function exists because it has a different semantics
|
| 47 |
+
# and signature:
|
| 48 |
+
# addCleanup() accepts regular functions
|
| 49 |
+
# but addAsyncCleanup() accepts coroutines
|
| 50 |
+
#
|
| 51 |
+
# We intentionally don't add inspect.iscoroutinefunction() check
|
| 52 |
+
# for func argument because there is no way
|
| 53 |
+
# to check for async function reliably:
|
| 54 |
+
# 1. It can be "async def func()" itself
|
| 55 |
+
# 2. Class can implement "async def __call__()" method
|
| 56 |
+
# 3. Regular "def func()" that returns awaitable object
|
| 57 |
+
self.addCleanup(*(func, *args), **kwargs)
|
| 58 |
+
|
| 59 |
+
def _callSetUp(self):
|
| 60 |
+
self.setUp()
|
| 61 |
+
self._callAsync(self.asyncSetUp)
|
| 62 |
+
|
| 63 |
+
def _callTestMethod(self, method):
|
| 64 |
+
self._callMaybeAsync(method)
|
| 65 |
+
|
| 66 |
+
def _callTearDown(self):
|
| 67 |
+
self._callAsync(self.asyncTearDown)
|
| 68 |
+
self.tearDown()
|
| 69 |
+
|
| 70 |
+
def _callCleanup(self, function, *args, **kwargs):
|
| 71 |
+
self._callMaybeAsync(function, *args, **kwargs)
|
| 72 |
+
|
| 73 |
+
def _callAsync(self, func, /, *args, **kwargs):
|
| 74 |
+
assert self._asyncioTestLoop is not None, 'asyncio test loop is not initialized'
|
| 75 |
+
ret = func(*args, **kwargs)
|
| 76 |
+
assert inspect.isawaitable(ret), f'{func!r} returned non-awaitable'
|
| 77 |
+
fut = self._asyncioTestLoop.create_future()
|
| 78 |
+
self._asyncioCallsQueue.put_nowait((fut, ret))
|
| 79 |
+
return self._asyncioTestLoop.run_until_complete(fut)
|
| 80 |
+
|
| 81 |
+
def _callMaybeAsync(self, func, /, *args, **kwargs):
|
| 82 |
+
assert self._asyncioTestLoop is not None, 'asyncio test loop is not initialized'
|
| 83 |
+
ret = func(*args, **kwargs)
|
| 84 |
+
if inspect.isawaitable(ret):
|
| 85 |
+
fut = self._asyncioTestLoop.create_future()
|
| 86 |
+
self._asyncioCallsQueue.put_nowait((fut, ret))
|
| 87 |
+
return self._asyncioTestLoop.run_until_complete(fut)
|
| 88 |
+
else:
|
| 89 |
+
return ret
|
| 90 |
+
|
| 91 |
+
async def _asyncioLoopRunner(self, fut):
|
| 92 |
+
self._asyncioCallsQueue = queue = asyncio.Queue()
|
| 93 |
+
fut.set_result(None)
|
| 94 |
+
while True:
|
| 95 |
+
query = await queue.get()
|
| 96 |
+
queue.task_done()
|
| 97 |
+
if query is None:
|
| 98 |
+
return
|
| 99 |
+
fut, awaitable = query
|
| 100 |
+
try:
|
| 101 |
+
ret = await awaitable
|
| 102 |
+
if not fut.cancelled():
|
| 103 |
+
fut.set_result(ret)
|
| 104 |
+
except (SystemExit, KeyboardInterrupt):
|
| 105 |
+
raise
|
| 106 |
+
except (BaseException, asyncio.CancelledError) as ex:
|
| 107 |
+
if not fut.cancelled():
|
| 108 |
+
fut.set_exception(ex)
|
| 109 |
+
|
| 110 |
+
def _setupAsyncioLoop(self):
|
| 111 |
+
assert self._asyncioTestLoop is None, 'asyncio test loop already initialized'
|
| 112 |
+
loop = asyncio.new_event_loop()
|
| 113 |
+
asyncio.set_event_loop(loop)
|
| 114 |
+
loop.set_debug(True)
|
| 115 |
+
self._asyncioTestLoop = loop
|
| 116 |
+
fut = loop.create_future()
|
| 117 |
+
self._asyncioCallsTask = loop.create_task(self._asyncioLoopRunner(fut))
|
| 118 |
+
loop.run_until_complete(fut)
|
| 119 |
+
|
| 120 |
+
def _tearDownAsyncioLoop(self):
|
| 121 |
+
assert self._asyncioTestLoop is not None, 'asyncio test loop is not initialized'
|
| 122 |
+
loop = self._asyncioTestLoop
|
| 123 |
+
self._asyncioTestLoop = None
|
| 124 |
+
self._asyncioCallsQueue.put_nowait(None)
|
| 125 |
+
loop.run_until_complete(self._asyncioCallsQueue.join())
|
| 126 |
+
|
| 127 |
+
try:
|
| 128 |
+
# cancel all tasks
|
| 129 |
+
to_cancel = asyncio.all_tasks(loop)
|
| 130 |
+
if not to_cancel:
|
| 131 |
+
return
|
| 132 |
+
|
| 133 |
+
for task in to_cancel:
|
| 134 |
+
task.cancel()
|
| 135 |
+
|
| 136 |
+
loop.run_until_complete(
|
| 137 |
+
asyncio.gather(*to_cancel, return_exceptions=True))
|
| 138 |
+
|
| 139 |
+
for task in to_cancel:
|
| 140 |
+
if task.cancelled():
|
| 141 |
+
continue
|
| 142 |
+
if task.exception() is not None:
|
| 143 |
+
loop.call_exception_handler({
|
| 144 |
+
'message': 'unhandled exception during test shutdown',
|
| 145 |
+
'exception': task.exception(),
|
| 146 |
+
'task': task,
|
| 147 |
+
})
|
| 148 |
+
# shutdown asyncgens
|
| 149 |
+
loop.run_until_complete(loop.shutdown_asyncgens())
|
| 150 |
+
finally:
|
| 151 |
+
# Prevent our executor environment from leaking to future tests.
|
| 152 |
+
loop.run_until_complete(loop.shutdown_default_executor())
|
| 153 |
+
asyncio.set_event_loop(None)
|
| 154 |
+
loop.close()
|
| 155 |
+
|
| 156 |
+
def run(self, result=None):
|
| 157 |
+
self._setupAsyncioLoop()
|
| 158 |
+
try:
|
| 159 |
+
return super().run(result)
|
| 160 |
+
finally:
|
| 161 |
+
self._tearDownAsyncioLoop()
|
| 162 |
+
|
| 163 |
+
def debug(self):
|
| 164 |
+
self._setupAsyncioLoop()
|
| 165 |
+
super().debug()
|
| 166 |
+
self._tearDownAsyncioLoop()
|
| 167 |
+
|
| 168 |
+
def __del__(self):
|
| 169 |
+
if self._asyncioTestLoop is not None:
|
| 170 |
+
self._tearDownAsyncioLoop()
|