ZTWHHH commited on
Commit
79c17b7
·
verified ·
1 Parent(s): 47d94fe

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. parrot/lib/libsqlite3.so.0 +3 -0
  3. parrot/lib/python3.10/site-packages/anyio/__pycache__/__init__.cpython-310.pyc +0 -0
  4. parrot/lib/python3.10/site-packages/anyio/__pycache__/from_thread.cpython-310.pyc +0 -0
  5. parrot/lib/python3.10/site-packages/anyio/__pycache__/lowlevel.cpython-310.pyc +0 -0
  6. parrot/lib/python3.10/site-packages/anyio/__pycache__/pytest_plugin.cpython-310.pyc +0 -0
  7. parrot/lib/python3.10/site-packages/anyio/__pycache__/to_process.cpython-310.pyc +0 -0
  8. parrot/lib/python3.10/site-packages/anyio/__pycache__/to_thread.cpython-310.pyc +0 -0
  9. parrot/lib/python3.10/site-packages/anyio/_backends/__init__.py +0 -0
  10. parrot/lib/python3.10/site-packages/anyio/_backends/__pycache__/__init__.cpython-310.pyc +0 -0
  11. parrot/lib/python3.10/site-packages/anyio/_backends/__pycache__/_asyncio.cpython-310.pyc +0 -0
  12. parrot/lib/python3.10/site-packages/anyio/_backends/__pycache__/_trio.cpython-310.pyc +0 -0
  13. parrot/lib/python3.10/site-packages/anyio/_backends/_asyncio.py +2117 -0
  14. parrot/lib/python3.10/site-packages/anyio/_backends/_trio.py +996 -0
  15. parrot/lib/python3.10/site-packages/anyio/_core/__init__.py +0 -0
  16. parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/__init__.cpython-310.pyc +0 -0
  17. parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_compat.cpython-310.pyc +0 -0
  18. parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_eventloop.cpython-310.pyc +0 -0
  19. parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_exceptions.cpython-310.pyc +0 -0
  20. parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_fileio.cpython-310.pyc +0 -0
  21. parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_resources.cpython-310.pyc +0 -0
  22. parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_signals.cpython-310.pyc +0 -0
  23. parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_sockets.cpython-310.pyc +0 -0
  24. parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_streams.cpython-310.pyc +0 -0
  25. parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_subprocesses.cpython-310.pyc +0 -0
  26. parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_synchronization.cpython-310.pyc +0 -0
  27. parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_tasks.cpython-310.pyc +0 -0
  28. parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_testing.cpython-310.pyc +0 -0
  29. parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_typedattr.cpython-310.pyc +0 -0
  30. parrot/lib/python3.10/site-packages/anyio/_core/_compat.py +217 -0
  31. parrot/lib/python3.10/site-packages/anyio/_core/_eventloop.py +153 -0
  32. parrot/lib/python3.10/site-packages/anyio/_core/_exceptions.py +94 -0
  33. parrot/lib/python3.10/site-packages/anyio/_core/_fileio.py +603 -0
  34. parrot/lib/python3.10/site-packages/anyio/_core/_resources.py +18 -0
  35. parrot/lib/python3.10/site-packages/anyio/_core/_signals.py +26 -0
  36. parrot/lib/python3.10/site-packages/anyio/_core/_sockets.py +607 -0
  37. parrot/lib/python3.10/site-packages/anyio/_core/_streams.py +47 -0
  38. parrot/lib/python3.10/site-packages/anyio/_core/_subprocesses.py +135 -0
  39. parrot/lib/python3.10/site-packages/anyio/_core/_synchronization.py +596 -0
  40. parrot/lib/python3.10/site-packages/anyio/_core/_tasks.py +180 -0
  41. parrot/lib/python3.10/site-packages/anyio/_core/_testing.py +82 -0
  42. parrot/lib/python3.10/site-packages/anyio/_core/_typedattr.py +83 -0
  43. parrot/lib/python3.10/site-packages/anyio/abc/__pycache__/__init__.cpython-310.pyc +0 -0
  44. parrot/lib/python3.10/site-packages/anyio/abc/__pycache__/_resources.cpython-310.pyc +0 -0
  45. parrot/lib/python3.10/site-packages/anyio/abc/__pycache__/_sockets.cpython-310.pyc +0 -0
  46. parrot/lib/python3.10/site-packages/anyio/abc/__pycache__/_streams.cpython-310.pyc +0 -0
  47. parrot/lib/python3.10/site-packages/anyio/abc/__pycache__/_subprocesses.cpython-310.pyc +0 -0
  48. parrot/lib/python3.10/site-packages/anyio/abc/__pycache__/_tasks.cpython-310.pyc +0 -0
  49. parrot/lib/python3.10/site-packages/anyio/abc/_tasks.py +119 -0
  50. parrot/lib/python3.10/site-packages/anyio/abc/_testing.py +70 -0
.gitattributes CHANGED
@@ -160,3 +160,4 @@ parrot/lib/libquadmath.so.0 filter=lfs diff=lfs merge=lfs -text
160
  parrot/lib/libitm.so.1 filter=lfs diff=lfs merge=lfs -text
161
  parrot/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
162
  parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
160
  parrot/lib/libitm.so.1 filter=lfs diff=lfs merge=lfs -text
161
  parrot/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
162
  parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
163
+ parrot/lib/libsqlite3.so.0 filter=lfs diff=lfs merge=lfs -text
parrot/lib/libsqlite3.so.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71932eb5bf89092fbd2c900601fc9f24aa184d65038aaec2445fd11b1d923327
3
+ size 1543808
parrot/lib/python3.10/site-packages/anyio/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.21 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/__pycache__/from_thread.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/__pycache__/lowlevel.cpython-310.pyc ADDED
Binary file (5.31 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/__pycache__/pytest_plugin.cpython-310.pyc ADDED
Binary file (4.9 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/__pycache__/to_process.cpython-310.pyc ADDED
Binary file (6.3 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/__pycache__/to_thread.cpython-310.pyc ADDED
Binary file (2.34 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/_backends/__init__.py ADDED
File without changes
parrot/lib/python3.10/site-packages/anyio/_backends/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (167 Bytes). View file
 
parrot/lib/python3.10/site-packages/anyio/_backends/__pycache__/_asyncio.cpython-310.pyc ADDED
Binary file (58.7 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/_backends/__pycache__/_trio.cpython-310.pyc ADDED
Binary file (33.3 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/_backends/_asyncio.py ADDED
@@ -0,0 +1,2117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import array
4
+ import asyncio
5
+ import concurrent.futures
6
+ import math
7
+ import socket
8
+ import sys
9
+ from asyncio.base_events import _run_until_complete_cb # type: ignore[attr-defined]
10
+ from collections import OrderedDict, deque
11
+ from concurrent.futures import Future
12
+ from contextvars import Context, copy_context
13
+ from dataclasses import dataclass
14
+ from functools import partial, wraps
15
+ from inspect import (
16
+ CORO_RUNNING,
17
+ CORO_SUSPENDED,
18
+ GEN_RUNNING,
19
+ GEN_SUSPENDED,
20
+ getcoroutinestate,
21
+ getgeneratorstate,
22
+ )
23
+ from io import IOBase
24
+ from os import PathLike
25
+ from queue import Queue
26
+ from socket import AddressFamily, SocketKind
27
+ from threading import Thread
28
+ from types import TracebackType
29
+ from typing import (
30
+ IO,
31
+ Any,
32
+ AsyncGenerator,
33
+ Awaitable,
34
+ Callable,
35
+ Collection,
36
+ Coroutine,
37
+ Generator,
38
+ Iterable,
39
+ Mapping,
40
+ Optional,
41
+ Sequence,
42
+ Tuple,
43
+ TypeVar,
44
+ Union,
45
+ cast,
46
+ )
47
+ from weakref import WeakKeyDictionary
48
+
49
+ import sniffio
50
+
51
+ from .. import CapacityLimiterStatistics, EventStatistics, TaskInfo, abc
52
+ from .._core._compat import DeprecatedAsyncContextManager, DeprecatedAwaitable
53
+ from .._core._eventloop import claim_worker_thread, threadlocals
54
+ from .._core._exceptions import (
55
+ BrokenResourceError,
56
+ BusyResourceError,
57
+ ClosedResourceError,
58
+ EndOfStream,
59
+ WouldBlock,
60
+ )
61
+ from .._core._exceptions import ExceptionGroup as BaseExceptionGroup
62
+ from .._core._sockets import GetAddrInfoReturnType, convert_ipv6_sockaddr
63
+ from .._core._synchronization import CapacityLimiter as BaseCapacityLimiter
64
+ from .._core._synchronization import Event as BaseEvent
65
+ from .._core._synchronization import ResourceGuard
66
+ from .._core._tasks import CancelScope as BaseCancelScope
67
+ from ..abc import IPSockAddrType, UDPPacketType
68
+ from ..lowlevel import RunVar
69
+
70
+ if sys.version_info >= (3, 8):
71
+
72
+ def get_coro(task: asyncio.Task) -> Generator | Awaitable[Any]:
73
+ return task.get_coro()
74
+
75
+ else:
76
+
77
+ def get_coro(task: asyncio.Task) -> Generator | Awaitable[Any]:
78
+ return task._coro
79
+
80
+
81
+ from asyncio import all_tasks, create_task, current_task, get_running_loop
82
+ from asyncio import run as native_run
83
+
84
+
85
+ def _get_task_callbacks(task: asyncio.Task) -> Iterable[Callable]:
86
+ return [cb for cb, context in task._callbacks]
87
+
88
+
89
+ T_Retval = TypeVar("T_Retval")
90
+ T_contra = TypeVar("T_contra", contravariant=True)
91
+
92
+ # Check whether there is native support for task names in asyncio (3.8+)
93
+ _native_task_names = hasattr(asyncio.Task, "get_name")
94
+
95
+
96
+ _root_task: RunVar[asyncio.Task | None] = RunVar("_root_task")
97
+
98
+
99
+ def find_root_task() -> asyncio.Task:
100
+ root_task = _root_task.get(None)
101
+ if root_task is not None and not root_task.done():
102
+ return root_task
103
+
104
+ # Look for a task that has been started via run_until_complete()
105
+ for task in all_tasks():
106
+ if task._callbacks and not task.done():
107
+ for cb in _get_task_callbacks(task):
108
+ if (
109
+ cb is _run_until_complete_cb
110
+ or getattr(cb, "__module__", None) == "uvloop.loop"
111
+ ):
112
+ _root_task.set(task)
113
+ return task
114
+
115
+ # Look up the topmost task in the AnyIO task tree, if possible
116
+ task = cast(asyncio.Task, current_task())
117
+ state = _task_states.get(task)
118
+ if state:
119
+ cancel_scope = state.cancel_scope
120
+ while cancel_scope and cancel_scope._parent_scope is not None:
121
+ cancel_scope = cancel_scope._parent_scope
122
+
123
+ if cancel_scope is not None:
124
+ return cast(asyncio.Task, cancel_scope._host_task)
125
+
126
+ return task
127
+
128
+
129
+ def get_callable_name(func: Callable) -> str:
130
+ module = getattr(func, "__module__", None)
131
+ qualname = getattr(func, "__qualname__", None)
132
+ return ".".join([x for x in (module, qualname) if x])
133
+
134
+
135
+ #
136
+ # Event loop
137
+ #
138
+
139
+ _run_vars = (
140
+ WeakKeyDictionary()
141
+ ) # type: WeakKeyDictionary[asyncio.AbstractEventLoop, Any]
142
+
143
+ current_token = get_running_loop
144
+
145
+
146
+ def _task_started(task: asyncio.Task) -> bool:
147
+ """Return ``True`` if the task has been started and has not finished."""
148
+ coro = cast(Coroutine[Any, Any, Any], get_coro(task))
149
+ try:
150
+ return getcoroutinestate(coro) in (CORO_RUNNING, CORO_SUSPENDED)
151
+ except AttributeError:
152
+ try:
153
+ return getgeneratorstate(cast(Generator, coro)) in (
154
+ GEN_RUNNING,
155
+ GEN_SUSPENDED,
156
+ )
157
+ except AttributeError:
158
+ # task coro is async_genenerator_asend https://bugs.python.org/issue37771
159
+ raise Exception(f"Cannot determine if task {task} has started or not")
160
+
161
+
162
+ def _maybe_set_event_loop_policy(
163
+ policy: asyncio.AbstractEventLoopPolicy | None, use_uvloop: bool
164
+ ) -> None:
165
+ # On CPython, use uvloop when possible if no other policy has been given and if not
166
+ # explicitly disabled
167
+ if policy is None and use_uvloop and sys.implementation.name == "cpython":
168
+ try:
169
+ import uvloop
170
+ except ImportError:
171
+ pass
172
+ else:
173
+ # Test for missing shutdown_default_executor() (uvloop 0.14.0 and earlier)
174
+ if not hasattr(
175
+ asyncio.AbstractEventLoop, "shutdown_default_executor"
176
+ ) or hasattr(uvloop.loop.Loop, "shutdown_default_executor"):
177
+ policy = uvloop.EventLoopPolicy()
178
+
179
+ if policy is not None:
180
+ asyncio.set_event_loop_policy(policy)
181
+
182
+
183
+ def run(
184
+ func: Callable[..., Awaitable[T_Retval]],
185
+ *args: object,
186
+ debug: bool = False,
187
+ use_uvloop: bool = False,
188
+ policy: asyncio.AbstractEventLoopPolicy | None = None,
189
+ ) -> T_Retval:
190
+ @wraps(func)
191
+ async def wrapper() -> T_Retval:
192
+ task = cast(asyncio.Task, current_task())
193
+ task_state = TaskState(None, get_callable_name(func), None)
194
+ _task_states[task] = task_state
195
+ if _native_task_names:
196
+ task.set_name(task_state.name)
197
+
198
+ try:
199
+ return await func(*args)
200
+ finally:
201
+ del _task_states[task]
202
+
203
+ _maybe_set_event_loop_policy(policy, use_uvloop)
204
+ return native_run(wrapper(), debug=debug)
205
+
206
+
207
+ #
208
+ # Miscellaneous
209
+ #
210
+
211
+ sleep = asyncio.sleep
212
+
213
+
214
+ #
215
+ # Timeouts and cancellation
216
+ #
217
+
218
+ CancelledError = asyncio.CancelledError
219
+
220
+
221
+ class CancelScope(BaseCancelScope):
222
+ def __new__(
223
+ cls, *, deadline: float = math.inf, shield: bool = False
224
+ ) -> CancelScope:
225
+ return object.__new__(cls)
226
+
227
+ def __init__(self, deadline: float = math.inf, shield: bool = False):
228
+ self._deadline = deadline
229
+ self._shield = shield
230
+ self._parent_scope: CancelScope | None = None
231
+ self._cancel_called = False
232
+ self._active = False
233
+ self._timeout_handle: asyncio.TimerHandle | None = None
234
+ self._cancel_handle: asyncio.Handle | None = None
235
+ self._tasks: set[asyncio.Task] = set()
236
+ self._host_task: asyncio.Task | None = None
237
+ self._timeout_expired = False
238
+ self._cancel_calls: int = 0
239
+
240
+ def __enter__(self) -> CancelScope:
241
+ if self._active:
242
+ raise RuntimeError(
243
+ "Each CancelScope may only be used for a single 'with' block"
244
+ )
245
+
246
+ self._host_task = host_task = cast(asyncio.Task, current_task())
247
+ self._tasks.add(host_task)
248
+ try:
249
+ task_state = _task_states[host_task]
250
+ except KeyError:
251
+ task_name = host_task.get_name() if _native_task_names else None
252
+ task_state = TaskState(None, task_name, self)
253
+ _task_states[host_task] = task_state
254
+ else:
255
+ self._parent_scope = task_state.cancel_scope
256
+ task_state.cancel_scope = self
257
+
258
+ self._timeout()
259
+ self._active = True
260
+
261
+ # Start cancelling the host task if the scope was cancelled before entering
262
+ if self._cancel_called:
263
+ self._deliver_cancellation()
264
+
265
+ return self
266
+
267
+ def __exit__(
268
+ self,
269
+ exc_type: type[BaseException] | None,
270
+ exc_val: BaseException | None,
271
+ exc_tb: TracebackType | None,
272
+ ) -> bool | None:
273
+ if not self._active:
274
+ raise RuntimeError("This cancel scope is not active")
275
+ if current_task() is not self._host_task:
276
+ raise RuntimeError(
277
+ "Attempted to exit cancel scope in a different task than it was "
278
+ "entered in"
279
+ )
280
+
281
+ assert self._host_task is not None
282
+ host_task_state = _task_states.get(self._host_task)
283
+ if host_task_state is None or host_task_state.cancel_scope is not self:
284
+ raise RuntimeError(
285
+ "Attempted to exit a cancel scope that isn't the current tasks's "
286
+ "current cancel scope"
287
+ )
288
+
289
+ self._active = False
290
+ if self._timeout_handle:
291
+ self._timeout_handle.cancel()
292
+ self._timeout_handle = None
293
+
294
+ self._tasks.remove(self._host_task)
295
+
296
+ host_task_state.cancel_scope = self._parent_scope
297
+
298
+ # Restart the cancellation effort in the farthest directly cancelled parent scope if this
299
+ # one was shielded
300
+ if self._shield:
301
+ self._deliver_cancellation_to_parent()
302
+
303
+ if exc_val is not None:
304
+ exceptions = (
305
+ exc_val.exceptions if isinstance(exc_val, ExceptionGroup) else [exc_val]
306
+ )
307
+ if all(isinstance(exc, CancelledError) for exc in exceptions):
308
+ if self._timeout_expired:
309
+ return self._uncancel()
310
+ elif not self._cancel_called:
311
+ # Task was cancelled natively
312
+ return None
313
+ elif not self._parent_cancelled():
314
+ # This scope was directly cancelled
315
+ return self._uncancel()
316
+
317
+ return None
318
+
319
+ def _uncancel(self) -> bool:
320
+ if sys.version_info < (3, 11) or self._host_task is None:
321
+ self._cancel_calls = 0
322
+ return True
323
+
324
+ # Uncancel all AnyIO cancellations
325
+ for i in range(self._cancel_calls):
326
+ self._host_task.uncancel()
327
+
328
+ self._cancel_calls = 0
329
+ return not self._host_task.cancelling()
330
+
331
+ def _timeout(self) -> None:
332
+ if self._deadline != math.inf:
333
+ loop = get_running_loop()
334
+ if loop.time() >= self._deadline:
335
+ self._timeout_expired = True
336
+ self.cancel()
337
+ else:
338
+ self._timeout_handle = loop.call_at(self._deadline, self._timeout)
339
+
340
+ def _deliver_cancellation(self) -> None:
341
+ """
342
+ Deliver cancellation to directly contained tasks and nested cancel scopes.
343
+
344
+ Schedule another run at the end if we still have tasks eligible for cancellation.
345
+ """
346
+ should_retry = False
347
+ current = current_task()
348
+ for task in self._tasks:
349
+ if task._must_cancel: # type: ignore[attr-defined]
350
+ continue
351
+
352
+ # The task is eligible for cancellation if it has started and is not in a cancel
353
+ # scope shielded from this one
354
+ cancel_scope = _task_states[task].cancel_scope
355
+ while cancel_scope is not self:
356
+ if cancel_scope is None or cancel_scope._shield:
357
+ break
358
+ else:
359
+ cancel_scope = cancel_scope._parent_scope
360
+ else:
361
+ should_retry = True
362
+ if task is not current and (
363
+ task is self._host_task or _task_started(task)
364
+ ):
365
+ self._cancel_calls += 1
366
+ task.cancel()
367
+
368
+ # Schedule another callback if there are still tasks left
369
+ if should_retry:
370
+ self._cancel_handle = get_running_loop().call_soon(
371
+ self._deliver_cancellation
372
+ )
373
+ else:
374
+ self._cancel_handle = None
375
+
376
+ def _deliver_cancellation_to_parent(self) -> None:
377
+ """Start cancellation effort in the farthest directly cancelled parent scope"""
378
+ scope = self._parent_scope
379
+ scope_to_cancel: CancelScope | None = None
380
+ while scope is not None:
381
+ if scope._cancel_called and scope._cancel_handle is None:
382
+ scope_to_cancel = scope
383
+
384
+ # No point in looking beyond any shielded scope
385
+ if scope._shield:
386
+ break
387
+
388
+ scope = scope._parent_scope
389
+
390
+ if scope_to_cancel is not None:
391
+ scope_to_cancel._deliver_cancellation()
392
+
393
+ def _parent_cancelled(self) -> bool:
394
+ # Check whether any parent has been cancelled
395
+ cancel_scope = self._parent_scope
396
+ while cancel_scope is not None and not cancel_scope._shield:
397
+ if cancel_scope._cancel_called:
398
+ return True
399
+ else:
400
+ cancel_scope = cancel_scope._parent_scope
401
+
402
+ return False
403
+
404
+ def cancel(self) -> DeprecatedAwaitable:
405
+ if not self._cancel_called:
406
+ if self._timeout_handle:
407
+ self._timeout_handle.cancel()
408
+ self._timeout_handle = None
409
+
410
+ self._cancel_called = True
411
+ if self._host_task is not None:
412
+ self._deliver_cancellation()
413
+
414
+ return DeprecatedAwaitable(self.cancel)
415
+
416
+ @property
417
+ def deadline(self) -> float:
418
+ return self._deadline
419
+
420
+ @deadline.setter
421
+ def deadline(self, value: float) -> None:
422
+ self._deadline = float(value)
423
+ if self._timeout_handle is not None:
424
+ self._timeout_handle.cancel()
425
+ self._timeout_handle = None
426
+
427
+ if self._active and not self._cancel_called:
428
+ self._timeout()
429
+
430
+ @property
431
+ def cancel_called(self) -> bool:
432
+ return self._cancel_called
433
+
434
+ @property
435
+ def shield(self) -> bool:
436
+ return self._shield
437
+
438
+ @shield.setter
439
+ def shield(self, value: bool) -> None:
440
+ if self._shield != value:
441
+ self._shield = value
442
+ if not value:
443
+ self._deliver_cancellation_to_parent()
444
+
445
+
446
+ async def checkpoint() -> None:
447
+ await sleep(0)
448
+
449
+
450
+ async def checkpoint_if_cancelled() -> None:
451
+ task = current_task()
452
+ if task is None:
453
+ return
454
+
455
+ try:
456
+ cancel_scope = _task_states[task].cancel_scope
457
+ except KeyError:
458
+ return
459
+
460
+ while cancel_scope:
461
+ if cancel_scope.cancel_called:
462
+ await sleep(0)
463
+ elif cancel_scope.shield:
464
+ break
465
+ else:
466
+ cancel_scope = cancel_scope._parent_scope
467
+
468
+
469
+ async def cancel_shielded_checkpoint() -> None:
470
+ with CancelScope(shield=True):
471
+ await sleep(0)
472
+
473
+
474
+ def current_effective_deadline() -> float:
475
+ try:
476
+ cancel_scope = _task_states[current_task()].cancel_scope # type: ignore[index]
477
+ except KeyError:
478
+ return math.inf
479
+
480
+ deadline = math.inf
481
+ while cancel_scope:
482
+ deadline = min(deadline, cancel_scope.deadline)
483
+ if cancel_scope._cancel_called:
484
+ deadline = -math.inf
485
+ break
486
+ elif cancel_scope.shield:
487
+ break
488
+ else:
489
+ cancel_scope = cancel_scope._parent_scope
490
+
491
+ return deadline
492
+
493
+
494
+ def current_time() -> float:
495
+ return get_running_loop().time()
496
+
497
+
498
+ #
499
+ # Task states
500
+ #
501
+
502
+
503
+ class TaskState:
504
+ """
505
+ Encapsulates auxiliary task information that cannot be added to the Task instance itself
506
+ because there are no guarantees about its implementation.
507
+ """
508
+
509
+ __slots__ = "parent_id", "name", "cancel_scope"
510
+
511
+ def __init__(
512
+ self,
513
+ parent_id: int | None,
514
+ name: str | None,
515
+ cancel_scope: CancelScope | None,
516
+ ):
517
+ self.parent_id = parent_id
518
+ self.name = name
519
+ self.cancel_scope = cancel_scope
520
+
521
+
522
+ _task_states = WeakKeyDictionary() # type: WeakKeyDictionary[asyncio.Task, TaskState]
523
+
524
+
525
+ #
526
+ # Task groups
527
+ #
528
+
529
+
530
+ class ExceptionGroup(BaseExceptionGroup):
531
+ def __init__(self, exceptions: list[BaseException]):
532
+ super().__init__()
533
+ self.exceptions = exceptions
534
+
535
+
536
+ class _AsyncioTaskStatus(abc.TaskStatus):
537
+ def __init__(self, future: asyncio.Future, parent_id: int):
538
+ self._future = future
539
+ self._parent_id = parent_id
540
+
541
+ def started(self, value: T_contra | None = None) -> None:
542
+ try:
543
+ self._future.set_result(value)
544
+ except asyncio.InvalidStateError:
545
+ raise RuntimeError(
546
+ "called 'started' twice on the same task status"
547
+ ) from None
548
+
549
+ task = cast(asyncio.Task, current_task())
550
+ _task_states[task].parent_id = self._parent_id
551
+
552
+
553
+ class TaskGroup(abc.TaskGroup):
554
+ def __init__(self) -> None:
555
+ self.cancel_scope: CancelScope = CancelScope()
556
+ self._active = False
557
+ self._exceptions: list[BaseException] = []
558
+
559
+ async def __aenter__(self) -> TaskGroup:
560
+ self.cancel_scope.__enter__()
561
+ self._active = True
562
+ return self
563
+
564
+ async def __aexit__(
565
+ self,
566
+ exc_type: type[BaseException] | None,
567
+ exc_val: BaseException | None,
568
+ exc_tb: TracebackType | None,
569
+ ) -> bool | None:
570
+ ignore_exception = self.cancel_scope.__exit__(exc_type, exc_val, exc_tb)
571
+ if exc_val is not None:
572
+ self.cancel_scope.cancel()
573
+ self._exceptions.append(exc_val)
574
+
575
+ while self.cancel_scope._tasks:
576
+ try:
577
+ await asyncio.wait(self.cancel_scope._tasks)
578
+ except asyncio.CancelledError:
579
+ self.cancel_scope.cancel()
580
+
581
+ self._active = False
582
+ if not self.cancel_scope._parent_cancelled():
583
+ exceptions = self._filter_cancellation_errors(self._exceptions)
584
+ else:
585
+ exceptions = self._exceptions
586
+
587
+ try:
588
+ if len(exceptions) > 1:
589
+ if all(
590
+ isinstance(e, CancelledError) and not e.args for e in exceptions
591
+ ):
592
+ # Tasks were cancelled natively, without a cancellation message
593
+ raise CancelledError
594
+ else:
595
+ raise ExceptionGroup(exceptions)
596
+ elif exceptions and exceptions[0] is not exc_val:
597
+ raise exceptions[0]
598
+ except BaseException as exc:
599
+ # Clear the context here, as it can only be done in-flight.
600
+ # If the context is not cleared, it can result in recursive tracebacks (see #145).
601
+ exc.__context__ = None
602
+ raise
603
+
604
+ return ignore_exception
605
+
606
+ @staticmethod
607
+ def _filter_cancellation_errors(
608
+ exceptions: Sequence[BaseException],
609
+ ) -> list[BaseException]:
610
+ filtered_exceptions: list[BaseException] = []
611
+ for exc in exceptions:
612
+ if isinstance(exc, ExceptionGroup):
613
+ new_exceptions = TaskGroup._filter_cancellation_errors(exc.exceptions)
614
+ if len(new_exceptions) > 1:
615
+ filtered_exceptions.append(exc)
616
+ elif len(new_exceptions) == 1:
617
+ filtered_exceptions.append(new_exceptions[0])
618
+ elif new_exceptions:
619
+ new_exc = ExceptionGroup(new_exceptions)
620
+ new_exc.__cause__ = exc.__cause__
621
+ new_exc.__context__ = exc.__context__
622
+ new_exc.__traceback__ = exc.__traceback__
623
+ filtered_exceptions.append(new_exc)
624
+ elif not isinstance(exc, CancelledError) or exc.args:
625
+ filtered_exceptions.append(exc)
626
+
627
+ return filtered_exceptions
628
+
629
+ async def _run_wrapped_task(
630
+ self, coro: Coroutine, task_status_future: asyncio.Future | None
631
+ ) -> None:
632
+ # This is the code path for Python 3.7 on which asyncio freaks out if a task
633
+ # raises a BaseException.
634
+ __traceback_hide__ = __tracebackhide__ = True # noqa: F841
635
+ task = cast(asyncio.Task, current_task())
636
+ try:
637
+ await coro
638
+ except BaseException as exc:
639
+ if task_status_future is None or task_status_future.done():
640
+ self._exceptions.append(exc)
641
+ self.cancel_scope.cancel()
642
+ else:
643
+ task_status_future.set_exception(exc)
644
+ else:
645
+ if task_status_future is not None and not task_status_future.done():
646
+ task_status_future.set_exception(
647
+ RuntimeError("Child exited without calling task_status.started()")
648
+ )
649
+ finally:
650
+ if task in self.cancel_scope._tasks:
651
+ self.cancel_scope._tasks.remove(task)
652
+ del _task_states[task]
653
+
654
+ def _spawn(
655
+ self,
656
+ func: Callable[..., Awaitable[Any]],
657
+ args: tuple,
658
+ name: object,
659
+ task_status_future: asyncio.Future | None = None,
660
+ ) -> asyncio.Task:
661
+ def task_done(_task: asyncio.Task) -> None:
662
+ # This is the code path for Python 3.8+
663
+ assert _task in self.cancel_scope._tasks
664
+ self.cancel_scope._tasks.remove(_task)
665
+ del _task_states[_task]
666
+
667
+ try:
668
+ exc = _task.exception()
669
+ except CancelledError as e:
670
+ while isinstance(e.__context__, CancelledError):
671
+ e = e.__context__
672
+
673
+ exc = e
674
+
675
+ if exc is not None:
676
+ if task_status_future is None or task_status_future.done():
677
+ self._exceptions.append(exc)
678
+ self.cancel_scope.cancel()
679
+ else:
680
+ task_status_future.set_exception(exc)
681
+ elif task_status_future is not None and not task_status_future.done():
682
+ task_status_future.set_exception(
683
+ RuntimeError("Child exited without calling task_status.started()")
684
+ )
685
+
686
+ if not self._active:
687
+ raise RuntimeError(
688
+ "This task group is not active; no new tasks can be started."
689
+ )
690
+
691
+ options: dict[str, Any] = {}
692
+ name = get_callable_name(func) if name is None else str(name)
693
+ if _native_task_names:
694
+ options["name"] = name
695
+
696
+ kwargs = {}
697
+ if task_status_future:
698
+ parent_id = id(current_task())
699
+ kwargs["task_status"] = _AsyncioTaskStatus(
700
+ task_status_future, id(self.cancel_scope._host_task)
701
+ )
702
+ else:
703
+ parent_id = id(self.cancel_scope._host_task)
704
+
705
+ coro = func(*args, **kwargs)
706
+ if not asyncio.iscoroutine(coro):
707
+ raise TypeError(
708
+ f"Expected an async function, but {func} appears to be synchronous"
709
+ )
710
+
711
+ foreign_coro = not hasattr(coro, "cr_frame") and not hasattr(coro, "gi_frame")
712
+ if foreign_coro or sys.version_info < (3, 8):
713
+ coro = self._run_wrapped_task(coro, task_status_future)
714
+
715
+ task = create_task(coro, **options)
716
+ if not foreign_coro and sys.version_info >= (3, 8):
717
+ task.add_done_callback(task_done)
718
+
719
+ # Make the spawned task inherit the task group's cancel scope
720
+ _task_states[task] = TaskState(
721
+ parent_id=parent_id, name=name, cancel_scope=self.cancel_scope
722
+ )
723
+ self.cancel_scope._tasks.add(task)
724
+ return task
725
+
726
+ def start_soon(
727
+ self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
728
+ ) -> None:
729
+ self._spawn(func, args, name)
730
+
731
+ async def start(
732
+ self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
733
+ ) -> None:
734
+ future: asyncio.Future = asyncio.Future()
735
+ task = self._spawn(func, args, name, future)
736
+
737
+ # If the task raises an exception after sending a start value without a switch point
738
+ # between, the task group is cancelled and this method never proceeds to process the
739
+ # completed future. That's why we have to have a shielded cancel scope here.
740
+ with CancelScope(shield=True):
741
+ try:
742
+ return await future
743
+ except CancelledError:
744
+ task.cancel()
745
+ raise
746
+
747
+
748
+ #
749
+ # Threads
750
+ #
751
+
752
+ _Retval_Queue_Type = Tuple[Optional[T_Retval], Optional[BaseException]]
753
+
754
+
755
+ class WorkerThread(Thread):
756
+ MAX_IDLE_TIME = 10 # seconds
757
+
758
+ def __init__(
759
+ self,
760
+ root_task: asyncio.Task,
761
+ workers: set[WorkerThread],
762
+ idle_workers: deque[WorkerThread],
763
+ ):
764
+ super().__init__(name="AnyIO worker thread")
765
+ self.root_task = root_task
766
+ self.workers = workers
767
+ self.idle_workers = idle_workers
768
+ self.loop = root_task._loop
769
+ self.queue: Queue[
770
+ tuple[Context, Callable, tuple, asyncio.Future] | None
771
+ ] = Queue(2)
772
+ self.idle_since = current_time()
773
+ self.stopping = False
774
+
775
+ def _report_result(
776
+ self, future: asyncio.Future, result: Any, exc: BaseException | None
777
+ ) -> None:
778
+ self.idle_since = current_time()
779
+ if not self.stopping:
780
+ self.idle_workers.append(self)
781
+
782
+ if not future.cancelled():
783
+ if exc is not None:
784
+ if isinstance(exc, StopIteration):
785
+ new_exc = RuntimeError("coroutine raised StopIteration")
786
+ new_exc.__cause__ = exc
787
+ exc = new_exc
788
+
789
+ future.set_exception(exc)
790
+ else:
791
+ future.set_result(result)
792
+
793
+ def run(self) -> None:
794
+ with claim_worker_thread("asyncio"):
795
+ threadlocals.loop = self.loop
796
+ while True:
797
+ item = self.queue.get()
798
+ if item is None:
799
+ # Shutdown command received
800
+ return
801
+
802
+ context, func, args, future = item
803
+ if not future.cancelled():
804
+ result = None
805
+ exception: BaseException | None = None
806
+ try:
807
+ result = context.run(func, *args)
808
+ except BaseException as exc:
809
+ exception = exc
810
+
811
+ if not self.loop.is_closed():
812
+ self.loop.call_soon_threadsafe(
813
+ self._report_result, future, result, exception
814
+ )
815
+
816
+ self.queue.task_done()
817
+
818
+ def stop(self, f: asyncio.Task | None = None) -> None:
819
+ self.stopping = True
820
+ self.queue.put_nowait(None)
821
+ self.workers.discard(self)
822
+ try:
823
+ self.idle_workers.remove(self)
824
+ except ValueError:
825
+ pass
826
+
827
+
828
+ _threadpool_idle_workers: RunVar[deque[WorkerThread]] = RunVar(
829
+ "_threadpool_idle_workers"
830
+ )
831
+ _threadpool_workers: RunVar[set[WorkerThread]] = RunVar("_threadpool_workers")
832
+
833
+
834
+ async def run_sync_in_worker_thread(
835
+ func: Callable[..., T_Retval],
836
+ *args: object,
837
+ cancellable: bool = False,
838
+ limiter: CapacityLimiter | None = None,
839
+ ) -> T_Retval:
840
+ await checkpoint()
841
+
842
+ # If this is the first run in this event loop thread, set up the necessary variables
843
+ try:
844
+ idle_workers = _threadpool_idle_workers.get()
845
+ workers = _threadpool_workers.get()
846
+ except LookupError:
847
+ idle_workers = deque()
848
+ workers = set()
849
+ _threadpool_idle_workers.set(idle_workers)
850
+ _threadpool_workers.set(workers)
851
+
852
+ async with (limiter or current_default_thread_limiter()):
853
+ with CancelScope(shield=not cancellable):
854
+ future: asyncio.Future = asyncio.Future()
855
+ root_task = find_root_task()
856
+ if not idle_workers:
857
+ worker = WorkerThread(root_task, workers, idle_workers)
858
+ worker.start()
859
+ workers.add(worker)
860
+ root_task.add_done_callback(worker.stop)
861
+ else:
862
+ worker = idle_workers.pop()
863
+
864
+ # Prune any other workers that have been idle for MAX_IDLE_TIME seconds or longer
865
+ now = current_time()
866
+ while idle_workers:
867
+ if now - idle_workers[0].idle_since < WorkerThread.MAX_IDLE_TIME:
868
+ break
869
+
870
+ expired_worker = idle_workers.popleft()
871
+ expired_worker.root_task.remove_done_callback(expired_worker.stop)
872
+ expired_worker.stop()
873
+
874
+ context = copy_context()
875
+ context.run(sniffio.current_async_library_cvar.set, None)
876
+ worker.queue.put_nowait((context, func, args, future))
877
+ return await future
878
+
879
+
880
+ def run_sync_from_thread(
881
+ func: Callable[..., T_Retval],
882
+ *args: object,
883
+ loop: asyncio.AbstractEventLoop | None = None,
884
+ ) -> T_Retval:
885
+ @wraps(func)
886
+ def wrapper() -> None:
887
+ try:
888
+ f.set_result(func(*args))
889
+ except BaseException as exc:
890
+ f.set_exception(exc)
891
+ if not isinstance(exc, Exception):
892
+ raise
893
+
894
+ f: concurrent.futures.Future[T_Retval] = Future()
895
+ loop = loop or threadlocals.loop
896
+ loop.call_soon_threadsafe(wrapper)
897
+ return f.result()
898
+
899
+
900
+ def run_async_from_thread(
901
+ func: Callable[..., Awaitable[T_Retval]], *args: object
902
+ ) -> T_Retval:
903
+ f: concurrent.futures.Future[T_Retval] = asyncio.run_coroutine_threadsafe(
904
+ func(*args), threadlocals.loop
905
+ )
906
+ return f.result()
907
+
908
+
909
+ class BlockingPortal(abc.BlockingPortal):
910
+ def __new__(cls) -> BlockingPortal:
911
+ return object.__new__(cls)
912
+
913
+ def __init__(self) -> None:
914
+ super().__init__()
915
+ self._loop = get_running_loop()
916
+
917
+ def _spawn_task_from_thread(
918
+ self,
919
+ func: Callable,
920
+ args: tuple,
921
+ kwargs: dict[str, Any],
922
+ name: object,
923
+ future: Future,
924
+ ) -> None:
925
+ run_sync_from_thread(
926
+ partial(self._task_group.start_soon, name=name),
927
+ self._call_func,
928
+ func,
929
+ args,
930
+ kwargs,
931
+ future,
932
+ loop=self._loop,
933
+ )
934
+
935
+
936
+ #
937
+ # Subprocesses
938
+ #
939
+
940
+
941
+ @dataclass(eq=False)
942
+ class StreamReaderWrapper(abc.ByteReceiveStream):
943
+ _stream: asyncio.StreamReader
944
+
945
+ async def receive(self, max_bytes: int = 65536) -> bytes:
946
+ data = await self._stream.read(max_bytes)
947
+ if data:
948
+ return data
949
+ else:
950
+ raise EndOfStream
951
+
952
+ async def aclose(self) -> None:
953
+ self._stream.feed_eof()
954
+
955
+
956
+ @dataclass(eq=False)
957
+ class StreamWriterWrapper(abc.ByteSendStream):
958
+ _stream: asyncio.StreamWriter
959
+
960
+ async def send(self, item: bytes) -> None:
961
+ self._stream.write(item)
962
+ await self._stream.drain()
963
+
964
+ async def aclose(self) -> None:
965
+ self._stream.close()
966
+
967
+
968
+ @dataclass(eq=False)
969
+ class Process(abc.Process):
970
+ _process: asyncio.subprocess.Process
971
+ _stdin: StreamWriterWrapper | None
972
+ _stdout: StreamReaderWrapper | None
973
+ _stderr: StreamReaderWrapper | None
974
+
975
+ async def aclose(self) -> None:
976
+ if self._stdin:
977
+ await self._stdin.aclose()
978
+ if self._stdout:
979
+ await self._stdout.aclose()
980
+ if self._stderr:
981
+ await self._stderr.aclose()
982
+
983
+ await self.wait()
984
+
985
+ async def wait(self) -> int:
986
+ return await self._process.wait()
987
+
988
+ def terminate(self) -> None:
989
+ self._process.terminate()
990
+
991
+ def kill(self) -> None:
992
+ self._process.kill()
993
+
994
+ def send_signal(self, signal: int) -> None:
995
+ self._process.send_signal(signal)
996
+
997
+ @property
998
+ def pid(self) -> int:
999
+ return self._process.pid
1000
+
1001
+ @property
1002
+ def returncode(self) -> int | None:
1003
+ return self._process.returncode
1004
+
1005
+ @property
1006
+ def stdin(self) -> abc.ByteSendStream | None:
1007
+ return self._stdin
1008
+
1009
+ @property
1010
+ def stdout(self) -> abc.ByteReceiveStream | None:
1011
+ return self._stdout
1012
+
1013
+ @property
1014
+ def stderr(self) -> abc.ByteReceiveStream | None:
1015
+ return self._stderr
1016
+
1017
+
1018
+ async def open_process(
1019
+ command: str | bytes | Sequence[str | bytes],
1020
+ *,
1021
+ shell: bool,
1022
+ stdin: int | IO[Any] | None,
1023
+ stdout: int | IO[Any] | None,
1024
+ stderr: int | IO[Any] | None,
1025
+ cwd: str | bytes | PathLike | None = None,
1026
+ env: Mapping[str, str] | None = None,
1027
+ start_new_session: bool = False,
1028
+ ) -> Process:
1029
+ await checkpoint()
1030
+ if shell:
1031
+ process = await asyncio.create_subprocess_shell(
1032
+ cast(Union[str, bytes], command),
1033
+ stdin=stdin,
1034
+ stdout=stdout,
1035
+ stderr=stderr,
1036
+ cwd=cwd,
1037
+ env=env,
1038
+ start_new_session=start_new_session,
1039
+ )
1040
+ else:
1041
+ process = await asyncio.create_subprocess_exec(
1042
+ *command,
1043
+ stdin=stdin,
1044
+ stdout=stdout,
1045
+ stderr=stderr,
1046
+ cwd=cwd,
1047
+ env=env,
1048
+ start_new_session=start_new_session,
1049
+ )
1050
+
1051
+ stdin_stream = StreamWriterWrapper(process.stdin) if process.stdin else None
1052
+ stdout_stream = StreamReaderWrapper(process.stdout) if process.stdout else None
1053
+ stderr_stream = StreamReaderWrapper(process.stderr) if process.stderr else None
1054
+ return Process(process, stdin_stream, stdout_stream, stderr_stream)
1055
+
1056
+
1057
+ def _forcibly_shutdown_process_pool_on_exit(
1058
+ workers: set[Process], _task: object
1059
+ ) -> None:
1060
+ """
1061
+ Forcibly shuts down worker processes belonging to this event loop."""
1062
+ child_watcher: asyncio.AbstractChildWatcher | None
1063
+ try:
1064
+ child_watcher = asyncio.get_event_loop_policy().get_child_watcher()
1065
+ except NotImplementedError:
1066
+ child_watcher = None
1067
+
1068
+ # Close as much as possible (w/o async/await) to avoid warnings
1069
+ for process in workers:
1070
+ if process.returncode is None:
1071
+ continue
1072
+
1073
+ process._stdin._stream._transport.close() # type: ignore[union-attr]
1074
+ process._stdout._stream._transport.close() # type: ignore[union-attr]
1075
+ process._stderr._stream._transport.close() # type: ignore[union-attr]
1076
+ process.kill()
1077
+ if child_watcher:
1078
+ child_watcher.remove_child_handler(process.pid)
1079
+
1080
+
1081
+ async def _shutdown_process_pool_on_exit(workers: set[Process]) -> None:
1082
+ """
1083
+ Shuts down worker processes belonging to this event loop.
1084
+
1085
+ NOTE: this only works when the event loop was started using asyncio.run() or anyio.run().
1086
+
1087
+ """
1088
+ process: Process
1089
+ try:
1090
+ await sleep(math.inf)
1091
+ except asyncio.CancelledError:
1092
+ for process in workers:
1093
+ if process.returncode is None:
1094
+ process.kill()
1095
+
1096
+ for process in workers:
1097
+ await process.aclose()
1098
+
1099
+
1100
+ def setup_process_pool_exit_at_shutdown(workers: set[Process]) -> None:
1101
+ kwargs: dict[str, Any] = (
1102
+ {"name": "AnyIO process pool shutdown task"} if _native_task_names else {}
1103
+ )
1104
+ create_task(_shutdown_process_pool_on_exit(workers), **kwargs)
1105
+ find_root_task().add_done_callback(
1106
+ partial(_forcibly_shutdown_process_pool_on_exit, workers)
1107
+ )
1108
+
1109
+
1110
+ #
1111
+ # Sockets and networking
1112
+ #
1113
+
1114
+
1115
+ class StreamProtocol(asyncio.Protocol):
1116
+ read_queue: deque[bytes]
1117
+ read_event: asyncio.Event
1118
+ write_event: asyncio.Event
1119
+ exception: Exception | None = None
1120
+
1121
+ def connection_made(self, transport: asyncio.BaseTransport) -> None:
1122
+ self.read_queue = deque()
1123
+ self.read_event = asyncio.Event()
1124
+ self.write_event = asyncio.Event()
1125
+ self.write_event.set()
1126
+ cast(asyncio.Transport, transport).set_write_buffer_limits(0)
1127
+
1128
+ def connection_lost(self, exc: Exception | None) -> None:
1129
+ if exc:
1130
+ self.exception = BrokenResourceError()
1131
+ self.exception.__cause__ = exc
1132
+
1133
+ self.read_event.set()
1134
+ self.write_event.set()
1135
+
1136
+ def data_received(self, data: bytes) -> None:
1137
+ self.read_queue.append(data)
1138
+ self.read_event.set()
1139
+
1140
+ def eof_received(self) -> bool | None:
1141
+ self.read_event.set()
1142
+ return True
1143
+
1144
+ def pause_writing(self) -> None:
1145
+ self.write_event = asyncio.Event()
1146
+
1147
+ def resume_writing(self) -> None:
1148
+ self.write_event.set()
1149
+
1150
+
1151
+ class DatagramProtocol(asyncio.DatagramProtocol):
1152
+ read_queue: deque[tuple[bytes, IPSockAddrType]]
1153
+ read_event: asyncio.Event
1154
+ write_event: asyncio.Event
1155
+ exception: Exception | None = None
1156
+
1157
+ def connection_made(self, transport: asyncio.BaseTransport) -> None:
1158
+ self.read_queue = deque(maxlen=100) # arbitrary value
1159
+ self.read_event = asyncio.Event()
1160
+ self.write_event = asyncio.Event()
1161
+ self.write_event.set()
1162
+
1163
+ def connection_lost(self, exc: Exception | None) -> None:
1164
+ self.read_event.set()
1165
+ self.write_event.set()
1166
+
1167
+ def datagram_received(self, data: bytes, addr: IPSockAddrType) -> None:
1168
+ addr = convert_ipv6_sockaddr(addr)
1169
+ self.read_queue.append((data, addr))
1170
+ self.read_event.set()
1171
+
1172
+ def error_received(self, exc: Exception) -> None:
1173
+ self.exception = exc
1174
+
1175
+ def pause_writing(self) -> None:
1176
+ self.write_event.clear()
1177
+
1178
+ def resume_writing(self) -> None:
1179
+ self.write_event.set()
1180
+
1181
+
1182
+ class SocketStream(abc.SocketStream):
1183
+ def __init__(self, transport: asyncio.Transport, protocol: StreamProtocol):
1184
+ self._transport = transport
1185
+ self._protocol = protocol
1186
+ self._receive_guard = ResourceGuard("reading from")
1187
+ self._send_guard = ResourceGuard("writing to")
1188
+ self._closed = False
1189
+
1190
+ @property
1191
+ def _raw_socket(self) -> socket.socket:
1192
+ return self._transport.get_extra_info("socket")
1193
+
1194
+ async def receive(self, max_bytes: int = 65536) -> bytes:
1195
+ with self._receive_guard:
1196
+ await checkpoint()
1197
+
1198
+ if (
1199
+ not self._protocol.read_event.is_set()
1200
+ and not self._transport.is_closing()
1201
+ ):
1202
+ self._transport.resume_reading()
1203
+ await self._protocol.read_event.wait()
1204
+ self._transport.pause_reading()
1205
+
1206
+ try:
1207
+ chunk = self._protocol.read_queue.popleft()
1208
+ except IndexError:
1209
+ if self._closed:
1210
+ raise ClosedResourceError from None
1211
+ elif self._protocol.exception:
1212
+ raise self._protocol.exception
1213
+ else:
1214
+ raise EndOfStream from None
1215
+
1216
+ if len(chunk) > max_bytes:
1217
+ # Split the oversized chunk
1218
+ chunk, leftover = chunk[:max_bytes], chunk[max_bytes:]
1219
+ self._protocol.read_queue.appendleft(leftover)
1220
+
1221
+ # If the read queue is empty, clear the flag so that the next call will block until
1222
+ # data is available
1223
+ if not self._protocol.read_queue:
1224
+ self._protocol.read_event.clear()
1225
+
1226
+ return chunk
1227
+
1228
+ async def send(self, item: bytes) -> None:
1229
+ with self._send_guard:
1230
+ await checkpoint()
1231
+
1232
+ if self._closed:
1233
+ raise ClosedResourceError
1234
+ elif self._protocol.exception is not None:
1235
+ raise self._protocol.exception
1236
+
1237
+ try:
1238
+ self._transport.write(item)
1239
+ except RuntimeError as exc:
1240
+ if self._transport.is_closing():
1241
+ raise BrokenResourceError from exc
1242
+ else:
1243
+ raise
1244
+
1245
+ await self._protocol.write_event.wait()
1246
+
1247
+ async def send_eof(self) -> None:
1248
+ try:
1249
+ self._transport.write_eof()
1250
+ except OSError:
1251
+ pass
1252
+
1253
+ async def aclose(self) -> None:
1254
+ if not self._transport.is_closing():
1255
+ self._closed = True
1256
+ try:
1257
+ self._transport.write_eof()
1258
+ except OSError:
1259
+ pass
1260
+
1261
+ self._transport.close()
1262
+ await sleep(0)
1263
+ self._transport.abort()
1264
+
1265
+
1266
+ class UNIXSocketStream(abc.SocketStream):
1267
+ _receive_future: asyncio.Future | None = None
1268
+ _send_future: asyncio.Future | None = None
1269
+ _closing = False
1270
+
1271
+ def __init__(self, raw_socket: socket.socket):
1272
+ self.__raw_socket = raw_socket
1273
+ self._loop = get_running_loop()
1274
+ self._receive_guard = ResourceGuard("reading from")
1275
+ self._send_guard = ResourceGuard("writing to")
1276
+
1277
+ @property
1278
+ def _raw_socket(self) -> socket.socket:
1279
+ return self.__raw_socket
1280
+
1281
+ def _wait_until_readable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future:
1282
+ def callback(f: object) -> None:
1283
+ del self._receive_future
1284
+ loop.remove_reader(self.__raw_socket)
1285
+
1286
+ f = self._receive_future = asyncio.Future()
1287
+ self._loop.add_reader(self.__raw_socket, f.set_result, None)
1288
+ f.add_done_callback(callback)
1289
+ return f
1290
+
1291
+ def _wait_until_writable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future:
1292
+ def callback(f: object) -> None:
1293
+ del self._send_future
1294
+ loop.remove_writer(self.__raw_socket)
1295
+
1296
+ f = self._send_future = asyncio.Future()
1297
+ self._loop.add_writer(self.__raw_socket, f.set_result, None)
1298
+ f.add_done_callback(callback)
1299
+ return f
1300
+
1301
+ async def send_eof(self) -> None:
1302
+ with self._send_guard:
1303
+ self._raw_socket.shutdown(socket.SHUT_WR)
1304
+
1305
+ async def receive(self, max_bytes: int = 65536) -> bytes:
1306
+ loop = get_running_loop()
1307
+ await checkpoint()
1308
+ with self._receive_guard:
1309
+ while True:
1310
+ try:
1311
+ data = self.__raw_socket.recv(max_bytes)
1312
+ except BlockingIOError:
1313
+ await self._wait_until_readable(loop)
1314
+ except OSError as exc:
1315
+ if self._closing:
1316
+ raise ClosedResourceError from None
1317
+ else:
1318
+ raise BrokenResourceError from exc
1319
+ else:
1320
+ if not data:
1321
+ raise EndOfStream
1322
+
1323
+ return data
1324
+
1325
+ async def send(self, item: bytes) -> None:
1326
+ loop = get_running_loop()
1327
+ await checkpoint()
1328
+ with self._send_guard:
1329
+ view = memoryview(item)
1330
+ while view:
1331
+ try:
1332
+ bytes_sent = self.__raw_socket.send(view)
1333
+ except BlockingIOError:
1334
+ await self._wait_until_writable(loop)
1335
+ except OSError as exc:
1336
+ if self._closing:
1337
+ raise ClosedResourceError from None
1338
+ else:
1339
+ raise BrokenResourceError from exc
1340
+ else:
1341
+ view = view[bytes_sent:]
1342
+
1343
+ async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
1344
+ if not isinstance(msglen, int) or msglen < 0:
1345
+ raise ValueError("msglen must be a non-negative integer")
1346
+ if not isinstance(maxfds, int) or maxfds < 1:
1347
+ raise ValueError("maxfds must be a positive integer")
1348
+
1349
+ loop = get_running_loop()
1350
+ fds = array.array("i")
1351
+ await checkpoint()
1352
+ with self._receive_guard:
1353
+ while True:
1354
+ try:
1355
+ message, ancdata, flags, addr = self.__raw_socket.recvmsg(
1356
+ msglen, socket.CMSG_LEN(maxfds * fds.itemsize)
1357
+ )
1358
+ except BlockingIOError:
1359
+ await self._wait_until_readable(loop)
1360
+ except OSError as exc:
1361
+ if self._closing:
1362
+ raise ClosedResourceError from None
1363
+ else:
1364
+ raise BrokenResourceError from exc
1365
+ else:
1366
+ if not message and not ancdata:
1367
+ raise EndOfStream
1368
+
1369
+ break
1370
+
1371
+ for cmsg_level, cmsg_type, cmsg_data in ancdata:
1372
+ if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS:
1373
+ raise RuntimeError(
1374
+ f"Received unexpected ancillary data; message = {message!r}, "
1375
+ f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}"
1376
+ )
1377
+
1378
+ fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
1379
+
1380
+ return message, list(fds)
1381
+
1382
+ async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
1383
+ if not message:
1384
+ raise ValueError("message must not be empty")
1385
+ if not fds:
1386
+ raise ValueError("fds must not be empty")
1387
+
1388
+ loop = get_running_loop()
1389
+ filenos: list[int] = []
1390
+ for fd in fds:
1391
+ if isinstance(fd, int):
1392
+ filenos.append(fd)
1393
+ elif isinstance(fd, IOBase):
1394
+ filenos.append(fd.fileno())
1395
+
1396
+ fdarray = array.array("i", filenos)
1397
+ await checkpoint()
1398
+ with self._send_guard:
1399
+ while True:
1400
+ try:
1401
+ # The ignore can be removed after mypy picks up
1402
+ # https://github.com/python/typeshed/pull/5545
1403
+ self.__raw_socket.sendmsg(
1404
+ [message], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fdarray)]
1405
+ )
1406
+ break
1407
+ except BlockingIOError:
1408
+ await self._wait_until_writable(loop)
1409
+ except OSError as exc:
1410
+ if self._closing:
1411
+ raise ClosedResourceError from None
1412
+ else:
1413
+ raise BrokenResourceError from exc
1414
+
1415
+ async def aclose(self) -> None:
1416
+ if not self._closing:
1417
+ self._closing = True
1418
+ if self.__raw_socket.fileno() != -1:
1419
+ self.__raw_socket.close()
1420
+
1421
+ if self._receive_future:
1422
+ self._receive_future.set_result(None)
1423
+ if self._send_future:
1424
+ self._send_future.set_result(None)
1425
+
1426
+
1427
+ class TCPSocketListener(abc.SocketListener):
1428
+ _accept_scope: CancelScope | None = None
1429
+ _closed = False
1430
+
1431
+ def __init__(self, raw_socket: socket.socket):
1432
+ self.__raw_socket = raw_socket
1433
+ self._loop = cast(asyncio.BaseEventLoop, get_running_loop())
1434
+ self._accept_guard = ResourceGuard("accepting connections from")
1435
+
1436
+ @property
1437
+ def _raw_socket(self) -> socket.socket:
1438
+ return self.__raw_socket
1439
+
1440
+ async def accept(self) -> abc.SocketStream:
1441
+ if self._closed:
1442
+ raise ClosedResourceError
1443
+
1444
+ with self._accept_guard:
1445
+ await checkpoint()
1446
+ with CancelScope() as self._accept_scope:
1447
+ try:
1448
+ client_sock, _addr = await self._loop.sock_accept(self._raw_socket)
1449
+ except asyncio.CancelledError:
1450
+ # Workaround for https://bugs.python.org/issue41317
1451
+ try:
1452
+ self._loop.remove_reader(self._raw_socket)
1453
+ except (ValueError, NotImplementedError):
1454
+ pass
1455
+
1456
+ if self._closed:
1457
+ raise ClosedResourceError from None
1458
+
1459
+ raise
1460
+ finally:
1461
+ self._accept_scope = None
1462
+
1463
+ client_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
1464
+ transport, protocol = await self._loop.connect_accepted_socket(
1465
+ StreamProtocol, client_sock
1466
+ )
1467
+ return SocketStream(transport, protocol)
1468
+
1469
+ async def aclose(self) -> None:
1470
+ if self._closed:
1471
+ return
1472
+
1473
+ self._closed = True
1474
+ if self._accept_scope:
1475
+ # Workaround for https://bugs.python.org/issue41317
1476
+ try:
1477
+ self._loop.remove_reader(self._raw_socket)
1478
+ except (ValueError, NotImplementedError):
1479
+ pass
1480
+
1481
+ self._accept_scope.cancel()
1482
+ await sleep(0)
1483
+
1484
+ self._raw_socket.close()
1485
+
1486
+
1487
+ class UNIXSocketListener(abc.SocketListener):
1488
+ def __init__(self, raw_socket: socket.socket):
1489
+ self.__raw_socket = raw_socket
1490
+ self._loop = get_running_loop()
1491
+ self._accept_guard = ResourceGuard("accepting connections from")
1492
+ self._closed = False
1493
+
1494
+ async def accept(self) -> abc.SocketStream:
1495
+ await checkpoint()
1496
+ with self._accept_guard:
1497
+ while True:
1498
+ try:
1499
+ client_sock, _ = self.__raw_socket.accept()
1500
+ client_sock.setblocking(False)
1501
+ return UNIXSocketStream(client_sock)
1502
+ except BlockingIOError:
1503
+ f: asyncio.Future = asyncio.Future()
1504
+ self._loop.add_reader(self.__raw_socket, f.set_result, None)
1505
+ f.add_done_callback(
1506
+ lambda _: self._loop.remove_reader(self.__raw_socket)
1507
+ )
1508
+ await f
1509
+ except OSError as exc:
1510
+ if self._closed:
1511
+ raise ClosedResourceError from None
1512
+ else:
1513
+ raise BrokenResourceError from exc
1514
+
1515
+ async def aclose(self) -> None:
1516
+ self._closed = True
1517
+ self.__raw_socket.close()
1518
+
1519
+ @property
1520
+ def _raw_socket(self) -> socket.socket:
1521
+ return self.__raw_socket
1522
+
1523
+
1524
+ class UDPSocket(abc.UDPSocket):
1525
+ def __init__(
1526
+ self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol
1527
+ ):
1528
+ self._transport = transport
1529
+ self._protocol = protocol
1530
+ self._receive_guard = ResourceGuard("reading from")
1531
+ self._send_guard = ResourceGuard("writing to")
1532
+ self._closed = False
1533
+
1534
+ @property
1535
+ def _raw_socket(self) -> socket.socket:
1536
+ return self._transport.get_extra_info("socket")
1537
+
1538
+ async def aclose(self) -> None:
1539
+ if not self._transport.is_closing():
1540
+ self._closed = True
1541
+ self._transport.close()
1542
+
1543
+ async def receive(self) -> tuple[bytes, IPSockAddrType]:
1544
+ with self._receive_guard:
1545
+ await checkpoint()
1546
+
1547
+ # If the buffer is empty, ask for more data
1548
+ if not self._protocol.read_queue and not self._transport.is_closing():
1549
+ self._protocol.read_event.clear()
1550
+ await self._protocol.read_event.wait()
1551
+
1552
+ try:
1553
+ return self._protocol.read_queue.popleft()
1554
+ except IndexError:
1555
+ if self._closed:
1556
+ raise ClosedResourceError from None
1557
+ else:
1558
+ raise BrokenResourceError from None
1559
+
1560
+ async def send(self, item: UDPPacketType) -> None:
1561
+ with self._send_guard:
1562
+ await checkpoint()
1563
+ await self._protocol.write_event.wait()
1564
+ if self._closed:
1565
+ raise ClosedResourceError
1566
+ elif self._transport.is_closing():
1567
+ raise BrokenResourceError
1568
+ else:
1569
+ self._transport.sendto(*item)
1570
+
1571
+
1572
+ class ConnectedUDPSocket(abc.ConnectedUDPSocket):
1573
+ def __init__(
1574
+ self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol
1575
+ ):
1576
+ self._transport = transport
1577
+ self._protocol = protocol
1578
+ self._receive_guard = ResourceGuard("reading from")
1579
+ self._send_guard = ResourceGuard("writing to")
1580
+ self._closed = False
1581
+
1582
+ @property
1583
+ def _raw_socket(self) -> socket.socket:
1584
+ return self._transport.get_extra_info("socket")
1585
+
1586
+ async def aclose(self) -> None:
1587
+ if not self._transport.is_closing():
1588
+ self._closed = True
1589
+ self._transport.close()
1590
+
1591
+ async def receive(self) -> bytes:
1592
+ with self._receive_guard:
1593
+ await checkpoint()
1594
+
1595
+ # If the buffer is empty, ask for more data
1596
+ if not self._protocol.read_queue and not self._transport.is_closing():
1597
+ self._protocol.read_event.clear()
1598
+ await self._protocol.read_event.wait()
1599
+
1600
+ try:
1601
+ packet = self._protocol.read_queue.popleft()
1602
+ except IndexError:
1603
+ if self._closed:
1604
+ raise ClosedResourceError from None
1605
+ else:
1606
+ raise BrokenResourceError from None
1607
+
1608
+ return packet[0]
1609
+
1610
+ async def send(self, item: bytes) -> None:
1611
+ with self._send_guard:
1612
+ await checkpoint()
1613
+ await self._protocol.write_event.wait()
1614
+ if self._closed:
1615
+ raise ClosedResourceError
1616
+ elif self._transport.is_closing():
1617
+ raise BrokenResourceError
1618
+ else:
1619
+ self._transport.sendto(item)
1620
+
1621
+
1622
+ async def connect_tcp(
1623
+ host: str, port: int, local_addr: tuple[str, int] | None = None
1624
+ ) -> SocketStream:
1625
+ transport, protocol = cast(
1626
+ Tuple[asyncio.Transport, StreamProtocol],
1627
+ await get_running_loop().create_connection(
1628
+ StreamProtocol, host, port, local_addr=local_addr
1629
+ ),
1630
+ )
1631
+ transport.pause_reading()
1632
+ return SocketStream(transport, protocol)
1633
+
1634
+
1635
+ async def connect_unix(path: str) -> UNIXSocketStream:
1636
+ await checkpoint()
1637
+ loop = get_running_loop()
1638
+ raw_socket = socket.socket(socket.AF_UNIX)
1639
+ raw_socket.setblocking(False)
1640
+ while True:
1641
+ try:
1642
+ raw_socket.connect(path)
1643
+ except BlockingIOError:
1644
+ f: asyncio.Future = asyncio.Future()
1645
+ loop.add_writer(raw_socket, f.set_result, None)
1646
+ f.add_done_callback(lambda _: loop.remove_writer(raw_socket))
1647
+ await f
1648
+ except BaseException:
1649
+ raw_socket.close()
1650
+ raise
1651
+ else:
1652
+ return UNIXSocketStream(raw_socket)
1653
+
1654
+
1655
+ async def create_udp_socket(
1656
+ family: socket.AddressFamily,
1657
+ local_address: IPSockAddrType | None,
1658
+ remote_address: IPSockAddrType | None,
1659
+ reuse_port: bool,
1660
+ ) -> UDPSocket | ConnectedUDPSocket:
1661
+ result = await get_running_loop().create_datagram_endpoint(
1662
+ DatagramProtocol,
1663
+ local_addr=local_address,
1664
+ remote_addr=remote_address,
1665
+ family=family,
1666
+ reuse_port=reuse_port,
1667
+ )
1668
+ transport = result[0]
1669
+ protocol = result[1]
1670
+ if protocol.exception:
1671
+ transport.close()
1672
+ raise protocol.exception
1673
+
1674
+ if not remote_address:
1675
+ return UDPSocket(transport, protocol)
1676
+ else:
1677
+ return ConnectedUDPSocket(transport, protocol)
1678
+
1679
+
1680
+ async def getaddrinfo(
1681
+ host: bytes | str,
1682
+ port: str | int | None,
1683
+ *,
1684
+ family: int | AddressFamily = 0,
1685
+ type: int | SocketKind = 0,
1686
+ proto: int = 0,
1687
+ flags: int = 0,
1688
+ ) -> GetAddrInfoReturnType:
1689
+ # https://github.com/python/typeshed/pull/4304
1690
+ result = await get_running_loop().getaddrinfo(
1691
+ host, port, family=family, type=type, proto=proto, flags=flags
1692
+ )
1693
+ return cast(GetAddrInfoReturnType, result)
1694
+
1695
+
1696
+ async def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> tuple[str, str]:
1697
+ return await get_running_loop().getnameinfo(sockaddr, flags)
1698
+
1699
+
1700
+ _read_events: RunVar[dict[Any, asyncio.Event]] = RunVar("read_events")
1701
+ _write_events: RunVar[dict[Any, asyncio.Event]] = RunVar("write_events")
1702
+
1703
+
1704
+ async def wait_socket_readable(sock: socket.socket) -> None:
1705
+ await checkpoint()
1706
+ try:
1707
+ read_events = _read_events.get()
1708
+ except LookupError:
1709
+ read_events = {}
1710
+ _read_events.set(read_events)
1711
+
1712
+ if read_events.get(sock):
1713
+ raise BusyResourceError("reading from") from None
1714
+
1715
+ loop = get_running_loop()
1716
+ event = read_events[sock] = asyncio.Event()
1717
+ loop.add_reader(sock, event.set)
1718
+ try:
1719
+ await event.wait()
1720
+ finally:
1721
+ if read_events.pop(sock, None) is not None:
1722
+ loop.remove_reader(sock)
1723
+ readable = True
1724
+ else:
1725
+ readable = False
1726
+
1727
+ if not readable:
1728
+ raise ClosedResourceError
1729
+
1730
+
1731
+ async def wait_socket_writable(sock: socket.socket) -> None:
1732
+ await checkpoint()
1733
+ try:
1734
+ write_events = _write_events.get()
1735
+ except LookupError:
1736
+ write_events = {}
1737
+ _write_events.set(write_events)
1738
+
1739
+ if write_events.get(sock):
1740
+ raise BusyResourceError("writing to") from None
1741
+
1742
+ loop = get_running_loop()
1743
+ event = write_events[sock] = asyncio.Event()
1744
+ loop.add_writer(sock.fileno(), event.set)
1745
+ try:
1746
+ await event.wait()
1747
+ finally:
1748
+ if write_events.pop(sock, None) is not None:
1749
+ loop.remove_writer(sock)
1750
+ writable = True
1751
+ else:
1752
+ writable = False
1753
+
1754
+ if not writable:
1755
+ raise ClosedResourceError
1756
+
1757
+
1758
+ #
1759
+ # Synchronization
1760
+ #
1761
+
1762
+
1763
+ class Event(BaseEvent):
1764
+ def __new__(cls) -> Event:
1765
+ return object.__new__(cls)
1766
+
1767
+ def __init__(self) -> None:
1768
+ self._event = asyncio.Event()
1769
+
1770
+ def set(self) -> DeprecatedAwaitable:
1771
+ self._event.set()
1772
+ return DeprecatedAwaitable(self.set)
1773
+
1774
+ def is_set(self) -> bool:
1775
+ return self._event.is_set()
1776
+
1777
+ async def wait(self) -> None:
1778
+ if await self._event.wait():
1779
+ await checkpoint()
1780
+
1781
+ def statistics(self) -> EventStatistics:
1782
+ return EventStatistics(len(self._event._waiters)) # type: ignore[attr-defined]
1783
+
1784
+
1785
+ class CapacityLimiter(BaseCapacityLimiter):
1786
+ _total_tokens: float = 0
1787
+
1788
+ def __new__(cls, total_tokens: float) -> CapacityLimiter:
1789
+ return object.__new__(cls)
1790
+
1791
+ def __init__(self, total_tokens: float):
1792
+ self._borrowers: set[Any] = set()
1793
+ self._wait_queue: OrderedDict[Any, asyncio.Event] = OrderedDict()
1794
+ self.total_tokens = total_tokens
1795
+
1796
+ async def __aenter__(self) -> None:
1797
+ await self.acquire()
1798
+
1799
+ async def __aexit__(
1800
+ self,
1801
+ exc_type: type[BaseException] | None,
1802
+ exc_val: BaseException | None,
1803
+ exc_tb: TracebackType | None,
1804
+ ) -> None:
1805
+ self.release()
1806
+
1807
+ @property
1808
+ def total_tokens(self) -> float:
1809
+ return self._total_tokens
1810
+
1811
+ @total_tokens.setter
1812
+ def total_tokens(self, value: float) -> None:
1813
+ if not isinstance(value, int) and not math.isinf(value):
1814
+ raise TypeError("total_tokens must be an int or math.inf")
1815
+ if value < 1:
1816
+ raise ValueError("total_tokens must be >= 1")
1817
+
1818
+ old_value = self._total_tokens
1819
+ self._total_tokens = value
1820
+ events = []
1821
+ for event in self._wait_queue.values():
1822
+ if value <= old_value:
1823
+ break
1824
+
1825
+ if not event.is_set():
1826
+ events.append(event)
1827
+ old_value += 1
1828
+
1829
+ for event in events:
1830
+ event.set()
1831
+
1832
+ @property
1833
+ def borrowed_tokens(self) -> int:
1834
+ return len(self._borrowers)
1835
+
1836
+ @property
1837
+ def available_tokens(self) -> float:
1838
+ return self._total_tokens - len(self._borrowers)
1839
+
1840
+ def acquire_nowait(self) -> DeprecatedAwaitable:
1841
+ self.acquire_on_behalf_of_nowait(current_task())
1842
+ return DeprecatedAwaitable(self.acquire_nowait)
1843
+
1844
+ def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable:
1845
+ if borrower in self._borrowers:
1846
+ raise RuntimeError(
1847
+ "this borrower is already holding one of this CapacityLimiter's "
1848
+ "tokens"
1849
+ )
1850
+
1851
+ if self._wait_queue or len(self._borrowers) >= self._total_tokens:
1852
+ raise WouldBlock
1853
+
1854
+ self._borrowers.add(borrower)
1855
+ return DeprecatedAwaitable(self.acquire_on_behalf_of_nowait)
1856
+
1857
+ async def acquire(self) -> None:
1858
+ return await self.acquire_on_behalf_of(current_task())
1859
+
1860
+ async def acquire_on_behalf_of(self, borrower: object) -> None:
1861
+ await checkpoint_if_cancelled()
1862
+ try:
1863
+ self.acquire_on_behalf_of_nowait(borrower)
1864
+ except WouldBlock:
1865
+ event = asyncio.Event()
1866
+ self._wait_queue[borrower] = event
1867
+ try:
1868
+ await event.wait()
1869
+ except BaseException:
1870
+ self._wait_queue.pop(borrower, None)
1871
+ raise
1872
+
1873
+ self._borrowers.add(borrower)
1874
+ else:
1875
+ try:
1876
+ await cancel_shielded_checkpoint()
1877
+ except BaseException:
1878
+ self.release()
1879
+ raise
1880
+
1881
+ def release(self) -> None:
1882
+ self.release_on_behalf_of(current_task())
1883
+
1884
+ def release_on_behalf_of(self, borrower: object) -> None:
1885
+ try:
1886
+ self._borrowers.remove(borrower)
1887
+ except KeyError:
1888
+ raise RuntimeError(
1889
+ "this borrower isn't holding any of this CapacityLimiter's " "tokens"
1890
+ ) from None
1891
+
1892
+ # Notify the next task in line if this limiter has free capacity now
1893
+ if self._wait_queue and len(self._borrowers) < self._total_tokens:
1894
+ event = self._wait_queue.popitem(last=False)[1]
1895
+ event.set()
1896
+
1897
+ def statistics(self) -> CapacityLimiterStatistics:
1898
+ return CapacityLimiterStatistics(
1899
+ self.borrowed_tokens,
1900
+ self.total_tokens,
1901
+ tuple(self._borrowers),
1902
+ len(self._wait_queue),
1903
+ )
1904
+
1905
+
1906
+ _default_thread_limiter: RunVar[CapacityLimiter] = RunVar("_default_thread_limiter")
1907
+
1908
+
1909
+ def current_default_thread_limiter() -> CapacityLimiter:
1910
+ try:
1911
+ return _default_thread_limiter.get()
1912
+ except LookupError:
1913
+ limiter = CapacityLimiter(40)
1914
+ _default_thread_limiter.set(limiter)
1915
+ return limiter
1916
+
1917
+
1918
+ #
1919
+ # Operating system signals
1920
+ #
1921
+
1922
+
1923
+ class _SignalReceiver(DeprecatedAsyncContextManager["_SignalReceiver"]):
1924
+ def __init__(self, signals: tuple[int, ...]):
1925
+ self._signals = signals
1926
+ self._loop = get_running_loop()
1927
+ self._signal_queue: deque[int] = deque()
1928
+ self._future: asyncio.Future = asyncio.Future()
1929
+ self._handled_signals: set[int] = set()
1930
+
1931
+ def _deliver(self, signum: int) -> None:
1932
+ self._signal_queue.append(signum)
1933
+ if not self._future.done():
1934
+ self._future.set_result(None)
1935
+
1936
+ def __enter__(self) -> _SignalReceiver:
1937
+ for sig in set(self._signals):
1938
+ self._loop.add_signal_handler(sig, self._deliver, sig)
1939
+ self._handled_signals.add(sig)
1940
+
1941
+ return self
1942
+
1943
+ def __exit__(
1944
+ self,
1945
+ exc_type: type[BaseException] | None,
1946
+ exc_val: BaseException | None,
1947
+ exc_tb: TracebackType | None,
1948
+ ) -> bool | None:
1949
+ for sig in self._handled_signals:
1950
+ self._loop.remove_signal_handler(sig)
1951
+ return None
1952
+
1953
+ def __aiter__(self) -> _SignalReceiver:
1954
+ return self
1955
+
1956
+ async def __anext__(self) -> int:
1957
+ await checkpoint()
1958
+ if not self._signal_queue:
1959
+ self._future = asyncio.Future()
1960
+ await self._future
1961
+
1962
+ return self._signal_queue.popleft()
1963
+
1964
+
1965
+ def open_signal_receiver(*signals: int) -> _SignalReceiver:
1966
+ return _SignalReceiver(signals)
1967
+
1968
+
1969
+ #
1970
+ # Testing and debugging
1971
+ #
1972
+
1973
+
1974
+ def _create_task_info(task: asyncio.Task) -> TaskInfo:
1975
+ task_state = _task_states.get(task)
1976
+ if task_state is None:
1977
+ name = task.get_name() if _native_task_names else None
1978
+ parent_id = None
1979
+ else:
1980
+ name = task_state.name
1981
+ parent_id = task_state.parent_id
1982
+
1983
+ return TaskInfo(id(task), parent_id, name, get_coro(task))
1984
+
1985
+
1986
+ def get_current_task() -> TaskInfo:
1987
+ return _create_task_info(current_task()) # type: ignore[arg-type]
1988
+
1989
+
1990
+ def get_running_tasks() -> list[TaskInfo]:
1991
+ return [_create_task_info(task) for task in all_tasks() if not task.done()]
1992
+
1993
+
1994
+ async def wait_all_tasks_blocked() -> None:
1995
+ await checkpoint()
1996
+ this_task = current_task()
1997
+ while True:
1998
+ for task in all_tasks():
1999
+ if task is this_task:
2000
+ continue
2001
+
2002
+ if task._fut_waiter is None or task._fut_waiter.done(): # type: ignore[attr-defined]
2003
+ await sleep(0.1)
2004
+ break
2005
+ else:
2006
+ return
2007
+
2008
+
2009
+ class TestRunner(abc.TestRunner):
2010
+ def __init__(
2011
+ self,
2012
+ debug: bool = False,
2013
+ use_uvloop: bool = False,
2014
+ policy: asyncio.AbstractEventLoopPolicy | None = None,
2015
+ ):
2016
+ self._exceptions: list[BaseException] = []
2017
+ _maybe_set_event_loop_policy(policy, use_uvloop)
2018
+ self._loop = asyncio.new_event_loop()
2019
+ self._loop.set_debug(debug)
2020
+ self._loop.set_exception_handler(self._exception_handler)
2021
+ asyncio.set_event_loop(self._loop)
2022
+
2023
+ def _cancel_all_tasks(self) -> None:
2024
+ to_cancel = all_tasks(self._loop)
2025
+ if not to_cancel:
2026
+ return
2027
+
2028
+ for task in to_cancel:
2029
+ task.cancel()
2030
+
2031
+ self._loop.run_until_complete(
2032
+ asyncio.gather(*to_cancel, return_exceptions=True)
2033
+ )
2034
+
2035
+ for task in to_cancel:
2036
+ if task.cancelled():
2037
+ continue
2038
+ if task.exception() is not None:
2039
+ raise cast(BaseException, task.exception())
2040
+
2041
+ def _exception_handler(
2042
+ self, loop: asyncio.AbstractEventLoop, context: dict[str, Any]
2043
+ ) -> None:
2044
+ if isinstance(context.get("exception"), Exception):
2045
+ self._exceptions.append(context["exception"])
2046
+ else:
2047
+ loop.default_exception_handler(context)
2048
+
2049
+ def _raise_async_exceptions(self) -> None:
2050
+ # Re-raise any exceptions raised in asynchronous callbacks
2051
+ if self._exceptions:
2052
+ exceptions, self._exceptions = self._exceptions, []
2053
+ if len(exceptions) == 1:
2054
+ raise exceptions[0]
2055
+ elif exceptions:
2056
+ raise ExceptionGroup(exceptions)
2057
+
2058
+ def close(self) -> None:
2059
+ try:
2060
+ self._cancel_all_tasks()
2061
+ self._loop.run_until_complete(self._loop.shutdown_asyncgens())
2062
+ finally:
2063
+ asyncio.set_event_loop(None)
2064
+ self._loop.close()
2065
+
2066
+ def run_asyncgen_fixture(
2067
+ self,
2068
+ fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]],
2069
+ kwargs: dict[str, Any],
2070
+ ) -> Iterable[T_Retval]:
2071
+ async def fixture_runner() -> None:
2072
+ agen = fixture_func(**kwargs)
2073
+ try:
2074
+ retval = await agen.asend(None)
2075
+ self._raise_async_exceptions()
2076
+ except BaseException as exc:
2077
+ f.set_exception(exc)
2078
+ return
2079
+ else:
2080
+ f.set_result(retval)
2081
+
2082
+ await event.wait()
2083
+ try:
2084
+ await agen.asend(None)
2085
+ except StopAsyncIteration:
2086
+ pass
2087
+ else:
2088
+ await agen.aclose()
2089
+ raise RuntimeError("Async generator fixture did not stop")
2090
+
2091
+ f = self._loop.create_future()
2092
+ event = asyncio.Event()
2093
+ fixture_task = self._loop.create_task(fixture_runner())
2094
+ self._loop.run_until_complete(f)
2095
+ yield f.result()
2096
+ event.set()
2097
+ self._loop.run_until_complete(fixture_task)
2098
+ self._raise_async_exceptions()
2099
+
2100
+ def run_fixture(
2101
+ self,
2102
+ fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]],
2103
+ kwargs: dict[str, Any],
2104
+ ) -> T_Retval:
2105
+ retval = self._loop.run_until_complete(fixture_func(**kwargs))
2106
+ self._raise_async_exceptions()
2107
+ return retval
2108
+
2109
+ def run_test(
2110
+ self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
2111
+ ) -> None:
2112
+ try:
2113
+ self._loop.run_until_complete(test_func(**kwargs))
2114
+ except Exception as exc:
2115
+ self._exceptions.append(exc)
2116
+
2117
+ self._raise_async_exceptions()
parrot/lib/python3.10/site-packages/anyio/_backends/_trio.py ADDED
@@ -0,0 +1,996 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import array
4
+ import math
5
+ import socket
6
+ from concurrent.futures import Future
7
+ from contextvars import copy_context
8
+ from dataclasses import dataclass
9
+ from functools import partial
10
+ from io import IOBase
11
+ from os import PathLike
12
+ from signal import Signals
13
+ from types import TracebackType
14
+ from typing import (
15
+ IO,
16
+ TYPE_CHECKING,
17
+ Any,
18
+ AsyncGenerator,
19
+ AsyncIterator,
20
+ Awaitable,
21
+ Callable,
22
+ Collection,
23
+ Coroutine,
24
+ Generic,
25
+ Iterable,
26
+ Mapping,
27
+ NoReturn,
28
+ Sequence,
29
+ TypeVar,
30
+ cast,
31
+ )
32
+
33
+ import sniffio
34
+ import trio.from_thread
35
+ from outcome import Error, Outcome, Value
36
+ from trio.socket import SocketType as TrioSocketType
37
+ from trio.to_thread import run_sync
38
+
39
+ from .. import CapacityLimiterStatistics, EventStatistics, TaskInfo, abc
40
+ from .._core._compat import DeprecatedAsyncContextManager, DeprecatedAwaitable
41
+ from .._core._eventloop import claim_worker_thread
42
+ from .._core._exceptions import (
43
+ BrokenResourceError,
44
+ BusyResourceError,
45
+ ClosedResourceError,
46
+ EndOfStream,
47
+ )
48
+ from .._core._exceptions import ExceptionGroup as BaseExceptionGroup
49
+ from .._core._sockets import convert_ipv6_sockaddr
50
+ from .._core._synchronization import CapacityLimiter as BaseCapacityLimiter
51
+ from .._core._synchronization import Event as BaseEvent
52
+ from .._core._synchronization import ResourceGuard
53
+ from .._core._tasks import CancelScope as BaseCancelScope
54
+ from ..abc import IPSockAddrType, UDPPacketType
55
+
56
+ if TYPE_CHECKING:
57
+ from trio_typing import TaskStatus
58
+
59
+ try:
60
+ from trio import lowlevel as trio_lowlevel
61
+ except ImportError:
62
+ from trio import hazmat as trio_lowlevel # type: ignore[no-redef]
63
+ from trio.hazmat import wait_readable, wait_writable
64
+ else:
65
+ from trio.lowlevel import wait_readable, wait_writable
66
+
67
+ try:
68
+ trio_open_process = trio_lowlevel.open_process
69
+ except AttributeError:
70
+ # isort: off
71
+ from trio import ( # type: ignore[attr-defined, no-redef]
72
+ open_process as trio_open_process,
73
+ )
74
+
75
+ T_Retval = TypeVar("T_Retval")
76
+ T_SockAddr = TypeVar("T_SockAddr", str, IPSockAddrType)
77
+
78
+
79
+ #
80
+ # Event loop
81
+ #
82
+
83
+ run = trio.run
84
+ current_token = trio.lowlevel.current_trio_token
85
+ RunVar = trio.lowlevel.RunVar
86
+
87
+
88
+ #
89
+ # Miscellaneous
90
+ #
91
+
92
+ sleep = trio.sleep
93
+
94
+
95
+ #
96
+ # Timeouts and cancellation
97
+ #
98
+
99
+
100
+ class CancelScope(BaseCancelScope):
101
+ def __new__(
102
+ cls, original: trio.CancelScope | None = None, **kwargs: object
103
+ ) -> CancelScope:
104
+ return object.__new__(cls)
105
+
106
+ def __init__(self, original: trio.CancelScope | None = None, **kwargs: Any) -> None:
107
+ self.__original = original or trio.CancelScope(**kwargs)
108
+
109
+ def __enter__(self) -> CancelScope:
110
+ self.__original.__enter__()
111
+ return self
112
+
113
+ def __exit__(
114
+ self,
115
+ exc_type: type[BaseException] | None,
116
+ exc_val: BaseException | None,
117
+ exc_tb: TracebackType | None,
118
+ ) -> bool | None:
119
+ # https://github.com/python-trio/trio-typing/pull/79
120
+ return self.__original.__exit__( # type: ignore[func-returns-value]
121
+ exc_type, exc_val, exc_tb
122
+ )
123
+
124
+ def cancel(self) -> DeprecatedAwaitable:
125
+ self.__original.cancel()
126
+ return DeprecatedAwaitable(self.cancel)
127
+
128
+ @property
129
+ def deadline(self) -> float:
130
+ return self.__original.deadline
131
+
132
+ @deadline.setter
133
+ def deadline(self, value: float) -> None:
134
+ self.__original.deadline = value
135
+
136
+ @property
137
+ def cancel_called(self) -> bool:
138
+ return self.__original.cancel_called
139
+
140
+ @property
141
+ def shield(self) -> bool:
142
+ return self.__original.shield
143
+
144
+ @shield.setter
145
+ def shield(self, value: bool) -> None:
146
+ self.__original.shield = value
147
+
148
+
149
+ CancelledError = trio.Cancelled
150
+ checkpoint = trio.lowlevel.checkpoint
151
+ checkpoint_if_cancelled = trio.lowlevel.checkpoint_if_cancelled
152
+ cancel_shielded_checkpoint = trio.lowlevel.cancel_shielded_checkpoint
153
+ current_effective_deadline = trio.current_effective_deadline
154
+ current_time = trio.current_time
155
+
156
+
157
+ #
158
+ # Task groups
159
+ #
160
+
161
+
162
+ class ExceptionGroup(BaseExceptionGroup, trio.MultiError):
163
+ pass
164
+
165
+
166
+ class TaskGroup(abc.TaskGroup):
167
+ def __init__(self) -> None:
168
+ self._active = False
169
+ self._nursery_manager = trio.open_nursery()
170
+ self.cancel_scope = None # type: ignore[assignment]
171
+
172
+ async def __aenter__(self) -> TaskGroup:
173
+ self._active = True
174
+ self._nursery = await self._nursery_manager.__aenter__()
175
+ self.cancel_scope = CancelScope(self._nursery.cancel_scope)
176
+ return self
177
+
178
+ async def __aexit__(
179
+ self,
180
+ exc_type: type[BaseException] | None,
181
+ exc_val: BaseException | None,
182
+ exc_tb: TracebackType | None,
183
+ ) -> bool | None:
184
+ try:
185
+ return await self._nursery_manager.__aexit__(exc_type, exc_val, exc_tb)
186
+ except trio.MultiError as exc:
187
+ raise ExceptionGroup(exc.exceptions) from None
188
+ finally:
189
+ self._active = False
190
+
191
+ def start_soon(
192
+ self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
193
+ ) -> None:
194
+ if not self._active:
195
+ raise RuntimeError(
196
+ "This task group is not active; no new tasks can be started."
197
+ )
198
+
199
+ self._nursery.start_soon(func, *args, name=name)
200
+
201
+ async def start(
202
+ self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
203
+ ) -> object:
204
+ if not self._active:
205
+ raise RuntimeError(
206
+ "This task group is not active; no new tasks can be started."
207
+ )
208
+
209
+ return await self._nursery.start(func, *args, name=name)
210
+
211
+
212
+ #
213
+ # Threads
214
+ #
215
+
216
+
217
+ async def run_sync_in_worker_thread(
218
+ func: Callable[..., T_Retval],
219
+ *args: object,
220
+ cancellable: bool = False,
221
+ limiter: trio.CapacityLimiter | None = None,
222
+ ) -> T_Retval:
223
+ def wrapper() -> T_Retval:
224
+ with claim_worker_thread("trio"):
225
+ return func(*args)
226
+
227
+ # TODO: remove explicit context copying when trio 0.20 is the minimum requirement
228
+ context = copy_context()
229
+ context.run(sniffio.current_async_library_cvar.set, None)
230
+ return await run_sync(
231
+ context.run, wrapper, cancellable=cancellable, limiter=limiter
232
+ )
233
+
234
+
235
+ # TODO: remove this workaround when trio 0.20 is the minimum requirement
236
+ def run_async_from_thread(
237
+ fn: Callable[..., Awaitable[T_Retval]], *args: Any
238
+ ) -> T_Retval:
239
+ async def wrapper() -> T_Retval:
240
+ retval: T_Retval
241
+
242
+ async def inner() -> None:
243
+ nonlocal retval
244
+ __tracebackhide__ = True
245
+ retval = await fn(*args)
246
+
247
+ async with trio.open_nursery() as n:
248
+ context.run(n.start_soon, inner)
249
+
250
+ __tracebackhide__ = True
251
+ return retval # noqa: F821
252
+
253
+ context = copy_context()
254
+ context.run(sniffio.current_async_library_cvar.set, "trio")
255
+ return trio.from_thread.run(wrapper)
256
+
257
+
258
+ def run_sync_from_thread(fn: Callable[..., T_Retval], *args: Any) -> T_Retval:
259
+ # TODO: remove explicit context copying when trio 0.20 is the minimum requirement
260
+ retval = trio.from_thread.run_sync(copy_context().run, fn, *args)
261
+ return cast(T_Retval, retval)
262
+
263
+
264
+ class BlockingPortal(abc.BlockingPortal):
265
+ def __new__(cls) -> BlockingPortal:
266
+ return object.__new__(cls)
267
+
268
+ def __init__(self) -> None:
269
+ super().__init__()
270
+ self._token = trio.lowlevel.current_trio_token()
271
+
272
+ def _spawn_task_from_thread(
273
+ self,
274
+ func: Callable,
275
+ args: tuple,
276
+ kwargs: dict[str, Any],
277
+ name: object,
278
+ future: Future,
279
+ ) -> None:
280
+ context = copy_context()
281
+ context.run(sniffio.current_async_library_cvar.set, "trio")
282
+ trio.from_thread.run_sync(
283
+ context.run,
284
+ partial(self._task_group.start_soon, name=name),
285
+ self._call_func,
286
+ func,
287
+ args,
288
+ kwargs,
289
+ future,
290
+ trio_token=self._token,
291
+ )
292
+
293
+
294
+ #
295
+ # Subprocesses
296
+ #
297
+
298
+
299
+ @dataclass(eq=False)
300
+ class ReceiveStreamWrapper(abc.ByteReceiveStream):
301
+ _stream: trio.abc.ReceiveStream
302
+
303
+ async def receive(self, max_bytes: int | None = None) -> bytes:
304
+ try:
305
+ data = await self._stream.receive_some(max_bytes)
306
+ except trio.ClosedResourceError as exc:
307
+ raise ClosedResourceError from exc.__cause__
308
+ except trio.BrokenResourceError as exc:
309
+ raise BrokenResourceError from exc.__cause__
310
+
311
+ if data:
312
+ return data
313
+ else:
314
+ raise EndOfStream
315
+
316
+ async def aclose(self) -> None:
317
+ await self._stream.aclose()
318
+
319
+
320
+ @dataclass(eq=False)
321
+ class SendStreamWrapper(abc.ByteSendStream):
322
+ _stream: trio.abc.SendStream
323
+
324
+ async def send(self, item: bytes) -> None:
325
+ try:
326
+ await self._stream.send_all(item)
327
+ except trio.ClosedResourceError as exc:
328
+ raise ClosedResourceError from exc.__cause__
329
+ except trio.BrokenResourceError as exc:
330
+ raise BrokenResourceError from exc.__cause__
331
+
332
+ async def aclose(self) -> None:
333
+ await self._stream.aclose()
334
+
335
+
336
+ @dataclass(eq=False)
337
+ class Process(abc.Process):
338
+ _process: trio.Process
339
+ _stdin: abc.ByteSendStream | None
340
+ _stdout: abc.ByteReceiveStream | None
341
+ _stderr: abc.ByteReceiveStream | None
342
+
343
+ async def aclose(self) -> None:
344
+ if self._stdin:
345
+ await self._stdin.aclose()
346
+ if self._stdout:
347
+ await self._stdout.aclose()
348
+ if self._stderr:
349
+ await self._stderr.aclose()
350
+
351
+ await self.wait()
352
+
353
+ async def wait(self) -> int:
354
+ return await self._process.wait()
355
+
356
+ def terminate(self) -> None:
357
+ self._process.terminate()
358
+
359
+ def kill(self) -> None:
360
+ self._process.kill()
361
+
362
+ def send_signal(self, signal: Signals) -> None:
363
+ self._process.send_signal(signal)
364
+
365
+ @property
366
+ def pid(self) -> int:
367
+ return self._process.pid
368
+
369
+ @property
370
+ def returncode(self) -> int | None:
371
+ return self._process.returncode
372
+
373
+ @property
374
+ def stdin(self) -> abc.ByteSendStream | None:
375
+ return self._stdin
376
+
377
+ @property
378
+ def stdout(self) -> abc.ByteReceiveStream | None:
379
+ return self._stdout
380
+
381
+ @property
382
+ def stderr(self) -> abc.ByteReceiveStream | None:
383
+ return self._stderr
384
+
385
+
386
+ async def open_process(
387
+ command: str | bytes | Sequence[str | bytes],
388
+ *,
389
+ shell: bool,
390
+ stdin: int | IO[Any] | None,
391
+ stdout: int | IO[Any] | None,
392
+ stderr: int | IO[Any] | None,
393
+ cwd: str | bytes | PathLike | None = None,
394
+ env: Mapping[str, str] | None = None,
395
+ start_new_session: bool = False,
396
+ ) -> Process:
397
+ process = await trio_open_process( # type: ignore[misc]
398
+ command, # type: ignore[arg-type]
399
+ stdin=stdin,
400
+ stdout=stdout,
401
+ stderr=stderr,
402
+ shell=shell,
403
+ cwd=cwd,
404
+ env=env,
405
+ start_new_session=start_new_session,
406
+ )
407
+ stdin_stream = SendStreamWrapper(process.stdin) if process.stdin else None
408
+ stdout_stream = ReceiveStreamWrapper(process.stdout) if process.stdout else None
409
+ stderr_stream = ReceiveStreamWrapper(process.stderr) if process.stderr else None
410
+ return Process(process, stdin_stream, stdout_stream, stderr_stream)
411
+
412
+
413
+ class _ProcessPoolShutdownInstrument(trio.abc.Instrument):
414
+ def after_run(self) -> None:
415
+ super().after_run()
416
+
417
+
418
+ current_default_worker_process_limiter: RunVar = RunVar(
419
+ "current_default_worker_process_limiter"
420
+ )
421
+
422
+
423
+ async def _shutdown_process_pool(workers: set[Process]) -> None:
424
+ process: Process
425
+ try:
426
+ await sleep(math.inf)
427
+ except trio.Cancelled:
428
+ for process in workers:
429
+ if process.returncode is None:
430
+ process.kill()
431
+
432
+ with CancelScope(shield=True):
433
+ for process in workers:
434
+ await process.aclose()
435
+
436
+
437
+ def setup_process_pool_exit_at_shutdown(workers: set[Process]) -> None:
438
+ trio.lowlevel.spawn_system_task(_shutdown_process_pool, workers)
439
+
440
+
441
+ #
442
+ # Sockets and networking
443
+ #
444
+
445
+
446
+ class _TrioSocketMixin(Generic[T_SockAddr]):
447
+ def __init__(self, trio_socket: TrioSocketType) -> None:
448
+ self._trio_socket = trio_socket
449
+ self._closed = False
450
+
451
+ def _check_closed(self) -> None:
452
+ if self._closed:
453
+ raise ClosedResourceError
454
+ if self._trio_socket.fileno() < 0:
455
+ raise BrokenResourceError
456
+
457
+ @property
458
+ def _raw_socket(self) -> socket.socket:
459
+ return self._trio_socket._sock # type: ignore[attr-defined]
460
+
461
+ async def aclose(self) -> None:
462
+ if self._trio_socket.fileno() >= 0:
463
+ self._closed = True
464
+ self._trio_socket.close()
465
+
466
+ def _convert_socket_error(self, exc: BaseException) -> NoReturn:
467
+ if isinstance(exc, trio.ClosedResourceError):
468
+ raise ClosedResourceError from exc
469
+ elif self._trio_socket.fileno() < 0 and self._closed:
470
+ raise ClosedResourceError from None
471
+ elif isinstance(exc, OSError):
472
+ raise BrokenResourceError from exc
473
+ else:
474
+ raise exc
475
+
476
+
477
+ class SocketStream(_TrioSocketMixin, abc.SocketStream):
478
+ def __init__(self, trio_socket: TrioSocketType) -> None:
479
+ super().__init__(trio_socket)
480
+ self._receive_guard = ResourceGuard("reading from")
481
+ self._send_guard = ResourceGuard("writing to")
482
+
483
+ async def receive(self, max_bytes: int = 65536) -> bytes:
484
+ with self._receive_guard:
485
+ try:
486
+ data = await self._trio_socket.recv(max_bytes)
487
+ except BaseException as exc:
488
+ self._convert_socket_error(exc)
489
+
490
+ if data:
491
+ return data
492
+ else:
493
+ raise EndOfStream
494
+
495
+ async def send(self, item: bytes) -> None:
496
+ with self._send_guard:
497
+ view = memoryview(item)
498
+ while view:
499
+ try:
500
+ bytes_sent = await self._trio_socket.send(view)
501
+ except BaseException as exc:
502
+ self._convert_socket_error(exc)
503
+
504
+ view = view[bytes_sent:]
505
+
506
+ async def send_eof(self) -> None:
507
+ self._trio_socket.shutdown(socket.SHUT_WR)
508
+
509
+
510
+ class UNIXSocketStream(SocketStream, abc.UNIXSocketStream):
511
+ async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
512
+ if not isinstance(msglen, int) or msglen < 0:
513
+ raise ValueError("msglen must be a non-negative integer")
514
+ if not isinstance(maxfds, int) or maxfds < 1:
515
+ raise ValueError("maxfds must be a positive integer")
516
+
517
+ fds = array.array("i")
518
+ await checkpoint()
519
+ with self._receive_guard:
520
+ while True:
521
+ try:
522
+ message, ancdata, flags, addr = await self._trio_socket.recvmsg(
523
+ msglen, socket.CMSG_LEN(maxfds * fds.itemsize)
524
+ )
525
+ except BaseException as exc:
526
+ self._convert_socket_error(exc)
527
+ else:
528
+ if not message and not ancdata:
529
+ raise EndOfStream
530
+
531
+ break
532
+
533
+ for cmsg_level, cmsg_type, cmsg_data in ancdata:
534
+ if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS:
535
+ raise RuntimeError(
536
+ f"Received unexpected ancillary data; message = {message!r}, "
537
+ f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}"
538
+ )
539
+
540
+ fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
541
+
542
+ return message, list(fds)
543
+
544
+ async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
545
+ if not message:
546
+ raise ValueError("message must not be empty")
547
+ if not fds:
548
+ raise ValueError("fds must not be empty")
549
+
550
+ filenos: list[int] = []
551
+ for fd in fds:
552
+ if isinstance(fd, int):
553
+ filenos.append(fd)
554
+ elif isinstance(fd, IOBase):
555
+ filenos.append(fd.fileno())
556
+
557
+ fdarray = array.array("i", filenos)
558
+ await checkpoint()
559
+ with self._send_guard:
560
+ while True:
561
+ try:
562
+ await self._trio_socket.sendmsg(
563
+ [message],
564
+ [
565
+ (
566
+ socket.SOL_SOCKET,
567
+ socket.SCM_RIGHTS, # type: ignore[list-item]
568
+ fdarray,
569
+ )
570
+ ],
571
+ )
572
+ break
573
+ except BaseException as exc:
574
+ self._convert_socket_error(exc)
575
+
576
+
577
+ class TCPSocketListener(_TrioSocketMixin, abc.SocketListener):
578
+ def __init__(self, raw_socket: socket.socket):
579
+ super().__init__(trio.socket.from_stdlib_socket(raw_socket))
580
+ self._accept_guard = ResourceGuard("accepting connections from")
581
+
582
+ async def accept(self) -> SocketStream:
583
+ with self._accept_guard:
584
+ try:
585
+ trio_socket, _addr = await self._trio_socket.accept()
586
+ except BaseException as exc:
587
+ self._convert_socket_error(exc)
588
+
589
+ trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
590
+ return SocketStream(trio_socket)
591
+
592
+
593
+ class UNIXSocketListener(_TrioSocketMixin, abc.SocketListener):
594
+ def __init__(self, raw_socket: socket.socket):
595
+ super().__init__(trio.socket.from_stdlib_socket(raw_socket))
596
+ self._accept_guard = ResourceGuard("accepting connections from")
597
+
598
+ async def accept(self) -> UNIXSocketStream:
599
+ with self._accept_guard:
600
+ try:
601
+ trio_socket, _addr = await self._trio_socket.accept()
602
+ except BaseException as exc:
603
+ self._convert_socket_error(exc)
604
+
605
+ return UNIXSocketStream(trio_socket)
606
+
607
+
608
+ class UDPSocket(_TrioSocketMixin[IPSockAddrType], abc.UDPSocket):
609
+ def __init__(self, trio_socket: TrioSocketType) -> None:
610
+ super().__init__(trio_socket)
611
+ self._receive_guard = ResourceGuard("reading from")
612
+ self._send_guard = ResourceGuard("writing to")
613
+
614
+ async def receive(self) -> tuple[bytes, IPSockAddrType]:
615
+ with self._receive_guard:
616
+ try:
617
+ data, addr = await self._trio_socket.recvfrom(65536)
618
+ return data, convert_ipv6_sockaddr(addr)
619
+ except BaseException as exc:
620
+ self._convert_socket_error(exc)
621
+
622
+ async def send(self, item: UDPPacketType) -> None:
623
+ with self._send_guard:
624
+ try:
625
+ await self._trio_socket.sendto(*item)
626
+ except BaseException as exc:
627
+ self._convert_socket_error(exc)
628
+
629
+
630
+ class ConnectedUDPSocket(_TrioSocketMixin[IPSockAddrType], abc.ConnectedUDPSocket):
631
+ def __init__(self, trio_socket: TrioSocketType) -> None:
632
+ super().__init__(trio_socket)
633
+ self._receive_guard = ResourceGuard("reading from")
634
+ self._send_guard = ResourceGuard("writing to")
635
+
636
+ async def receive(self) -> bytes:
637
+ with self._receive_guard:
638
+ try:
639
+ return await self._trio_socket.recv(65536)
640
+ except BaseException as exc:
641
+ self._convert_socket_error(exc)
642
+
643
+ async def send(self, item: bytes) -> None:
644
+ with self._send_guard:
645
+ try:
646
+ await self._trio_socket.send(item)
647
+ except BaseException as exc:
648
+ self._convert_socket_error(exc)
649
+
650
+
651
+ async def connect_tcp(
652
+ host: str, port: int, local_address: IPSockAddrType | None = None
653
+ ) -> SocketStream:
654
+ family = socket.AF_INET6 if ":" in host else socket.AF_INET
655
+ trio_socket = trio.socket.socket(family)
656
+ trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
657
+ if local_address:
658
+ await trio_socket.bind(local_address)
659
+
660
+ try:
661
+ await trio_socket.connect((host, port))
662
+ except BaseException:
663
+ trio_socket.close()
664
+ raise
665
+
666
+ return SocketStream(trio_socket)
667
+
668
+
669
+ async def connect_unix(path: str) -> UNIXSocketStream:
670
+ trio_socket = trio.socket.socket(socket.AF_UNIX)
671
+ try:
672
+ await trio_socket.connect(path)
673
+ except BaseException:
674
+ trio_socket.close()
675
+ raise
676
+
677
+ return UNIXSocketStream(trio_socket)
678
+
679
+
680
+ async def create_udp_socket(
681
+ family: socket.AddressFamily,
682
+ local_address: IPSockAddrType | None,
683
+ remote_address: IPSockAddrType | None,
684
+ reuse_port: bool,
685
+ ) -> UDPSocket | ConnectedUDPSocket:
686
+ trio_socket = trio.socket.socket(family=family, type=socket.SOCK_DGRAM)
687
+
688
+ if reuse_port:
689
+ trio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
690
+
691
+ if local_address:
692
+ await trio_socket.bind(local_address)
693
+
694
+ if remote_address:
695
+ await trio_socket.connect(remote_address)
696
+ return ConnectedUDPSocket(trio_socket)
697
+ else:
698
+ return UDPSocket(trio_socket)
699
+
700
+
701
+ getaddrinfo = trio.socket.getaddrinfo
702
+ getnameinfo = trio.socket.getnameinfo
703
+
704
+
705
+ async def wait_socket_readable(sock: socket.socket) -> None:
706
+ try:
707
+ await wait_readable(sock)
708
+ except trio.ClosedResourceError as exc:
709
+ raise ClosedResourceError().with_traceback(exc.__traceback__) from None
710
+ except trio.BusyResourceError:
711
+ raise BusyResourceError("reading from") from None
712
+
713
+
714
+ async def wait_socket_writable(sock: socket.socket) -> None:
715
+ try:
716
+ await wait_writable(sock)
717
+ except trio.ClosedResourceError as exc:
718
+ raise ClosedResourceError().with_traceback(exc.__traceback__) from None
719
+ except trio.BusyResourceError:
720
+ raise BusyResourceError("writing to") from None
721
+
722
+
723
+ #
724
+ # Synchronization
725
+ #
726
+
727
+
728
+ class Event(BaseEvent):
729
+ def __new__(cls) -> Event:
730
+ return object.__new__(cls)
731
+
732
+ def __init__(self) -> None:
733
+ self.__original = trio.Event()
734
+
735
+ def is_set(self) -> bool:
736
+ return self.__original.is_set()
737
+
738
+ async def wait(self) -> None:
739
+ return await self.__original.wait()
740
+
741
+ def statistics(self) -> EventStatistics:
742
+ orig_statistics = self.__original.statistics()
743
+ return EventStatistics(tasks_waiting=orig_statistics.tasks_waiting)
744
+
745
+ def set(self) -> DeprecatedAwaitable:
746
+ self.__original.set()
747
+ return DeprecatedAwaitable(self.set)
748
+
749
+
750
+ class CapacityLimiter(BaseCapacityLimiter):
751
+ def __new__(cls, *args: object, **kwargs: object) -> CapacityLimiter:
752
+ return object.__new__(cls)
753
+
754
+ def __init__(
755
+ self, *args: Any, original: trio.CapacityLimiter | None = None
756
+ ) -> None:
757
+ self.__original = original or trio.CapacityLimiter(*args)
758
+
759
+ async def __aenter__(self) -> None:
760
+ return await self.__original.__aenter__()
761
+
762
+ async def __aexit__(
763
+ self,
764
+ exc_type: type[BaseException] | None,
765
+ exc_val: BaseException | None,
766
+ exc_tb: TracebackType | None,
767
+ ) -> None:
768
+ await self.__original.__aexit__(exc_type, exc_val, exc_tb)
769
+
770
+ @property
771
+ def total_tokens(self) -> float:
772
+ return self.__original.total_tokens
773
+
774
+ @total_tokens.setter
775
+ def total_tokens(self, value: float) -> None:
776
+ self.__original.total_tokens = value
777
+
778
+ @property
779
+ def borrowed_tokens(self) -> int:
780
+ return self.__original.borrowed_tokens
781
+
782
+ @property
783
+ def available_tokens(self) -> float:
784
+ return self.__original.available_tokens
785
+
786
+ def acquire_nowait(self) -> DeprecatedAwaitable:
787
+ self.__original.acquire_nowait()
788
+ return DeprecatedAwaitable(self.acquire_nowait)
789
+
790
+ def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable:
791
+ self.__original.acquire_on_behalf_of_nowait(borrower)
792
+ return DeprecatedAwaitable(self.acquire_on_behalf_of_nowait)
793
+
794
+ async def acquire(self) -> None:
795
+ await self.__original.acquire()
796
+
797
+ async def acquire_on_behalf_of(self, borrower: object) -> None:
798
+ await self.__original.acquire_on_behalf_of(borrower)
799
+
800
+ def release(self) -> None:
801
+ return self.__original.release()
802
+
803
+ def release_on_behalf_of(self, borrower: object) -> None:
804
+ return self.__original.release_on_behalf_of(borrower)
805
+
806
+ def statistics(self) -> CapacityLimiterStatistics:
807
+ orig = self.__original.statistics()
808
+ return CapacityLimiterStatistics(
809
+ borrowed_tokens=orig.borrowed_tokens,
810
+ total_tokens=orig.total_tokens,
811
+ borrowers=orig.borrowers,
812
+ tasks_waiting=orig.tasks_waiting,
813
+ )
814
+
815
+
816
+ _capacity_limiter_wrapper: RunVar = RunVar("_capacity_limiter_wrapper")
817
+
818
+
819
+ def current_default_thread_limiter() -> CapacityLimiter:
820
+ try:
821
+ return _capacity_limiter_wrapper.get()
822
+ except LookupError:
823
+ limiter = CapacityLimiter(
824
+ original=trio.to_thread.current_default_thread_limiter()
825
+ )
826
+ _capacity_limiter_wrapper.set(limiter)
827
+ return limiter
828
+
829
+
830
+ #
831
+ # Signal handling
832
+ #
833
+
834
+
835
+ class _SignalReceiver(DeprecatedAsyncContextManager["_SignalReceiver"]):
836
+ _iterator: AsyncIterator[int]
837
+
838
+ def __init__(self, signals: tuple[Signals, ...]):
839
+ self._signals = signals
840
+
841
+ def __enter__(self) -> _SignalReceiver:
842
+ self._cm = trio.open_signal_receiver(*self._signals)
843
+ self._iterator = self._cm.__enter__()
844
+ return self
845
+
846
+ def __exit__(
847
+ self,
848
+ exc_type: type[BaseException] | None,
849
+ exc_val: BaseException | None,
850
+ exc_tb: TracebackType | None,
851
+ ) -> bool | None:
852
+ return self._cm.__exit__(exc_type, exc_val, exc_tb)
853
+
854
+ def __aiter__(self) -> _SignalReceiver:
855
+ return self
856
+
857
+ async def __anext__(self) -> Signals:
858
+ signum = await self._iterator.__anext__()
859
+ return Signals(signum)
860
+
861
+
862
+ def open_signal_receiver(*signals: Signals) -> _SignalReceiver:
863
+ return _SignalReceiver(signals)
864
+
865
+
866
+ #
867
+ # Testing and debugging
868
+ #
869
+
870
+
871
+ def get_current_task() -> TaskInfo:
872
+ task = trio_lowlevel.current_task()
873
+
874
+ parent_id = None
875
+ if task.parent_nursery and task.parent_nursery.parent_task:
876
+ parent_id = id(task.parent_nursery.parent_task)
877
+
878
+ return TaskInfo(id(task), parent_id, task.name, task.coro)
879
+
880
+
881
+ def get_running_tasks() -> list[TaskInfo]:
882
+ root_task = trio_lowlevel.current_root_task()
883
+ task_infos = [TaskInfo(id(root_task), None, root_task.name, root_task.coro)]
884
+ nurseries = root_task.child_nurseries
885
+ while nurseries:
886
+ new_nurseries: list[trio.Nursery] = []
887
+ for nursery in nurseries:
888
+ for task in nursery.child_tasks:
889
+ task_infos.append(
890
+ TaskInfo(id(task), id(nursery.parent_task), task.name, task.coro)
891
+ )
892
+ new_nurseries.extend(task.child_nurseries)
893
+
894
+ nurseries = new_nurseries
895
+
896
+ return task_infos
897
+
898
+
899
+ def wait_all_tasks_blocked() -> Awaitable[None]:
900
+ import trio.testing
901
+
902
+ return trio.testing.wait_all_tasks_blocked()
903
+
904
+
905
+ class TestRunner(abc.TestRunner):
906
+ def __init__(self, **options: Any) -> None:
907
+ from collections import deque
908
+ from queue import Queue
909
+
910
+ self._call_queue: Queue[Callable[..., object]] = Queue()
911
+ self._result_queue: deque[Outcome] = deque()
912
+ self._stop_event: trio.Event | None = None
913
+ self._nursery: trio.Nursery | None = None
914
+ self._options = options
915
+
916
+ async def _trio_main(self) -> None:
917
+ self._stop_event = trio.Event()
918
+ async with trio.open_nursery() as self._nursery:
919
+ await self._stop_event.wait()
920
+
921
+ async def _call_func(
922
+ self, func: Callable[..., Awaitable[object]], args: tuple, kwargs: dict
923
+ ) -> None:
924
+ try:
925
+ retval = await func(*args, **kwargs)
926
+ except BaseException as exc:
927
+ self._result_queue.append(Error(exc))
928
+ else:
929
+ self._result_queue.append(Value(retval))
930
+
931
+ def _main_task_finished(self, outcome: object) -> None:
932
+ self._nursery = None
933
+
934
+ def _get_nursery(self) -> trio.Nursery:
935
+ if self._nursery is None:
936
+ trio.lowlevel.start_guest_run(
937
+ self._trio_main,
938
+ run_sync_soon_threadsafe=self._call_queue.put,
939
+ done_callback=self._main_task_finished,
940
+ **self._options,
941
+ )
942
+ while self._nursery is None:
943
+ self._call_queue.get()()
944
+
945
+ return self._nursery
946
+
947
+ def _call(
948
+ self, func: Callable[..., Awaitable[T_Retval]], *args: object, **kwargs: object
949
+ ) -> T_Retval:
950
+ self._get_nursery().start_soon(self._call_func, func, args, kwargs)
951
+ while not self._result_queue:
952
+ self._call_queue.get()()
953
+
954
+ outcome = self._result_queue.pop()
955
+ return outcome.unwrap()
956
+
957
+ def close(self) -> None:
958
+ if self._stop_event:
959
+ self._stop_event.set()
960
+ while self._nursery is not None:
961
+ self._call_queue.get()()
962
+
963
+ def run_asyncgen_fixture(
964
+ self,
965
+ fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]],
966
+ kwargs: dict[str, Any],
967
+ ) -> Iterable[T_Retval]:
968
+ async def fixture_runner(*, task_status: TaskStatus[T_Retval]) -> None:
969
+ agen = fixture_func(**kwargs)
970
+ retval = await agen.asend(None)
971
+ task_status.started(retval)
972
+ await teardown_event.wait()
973
+ try:
974
+ await agen.asend(None)
975
+ except StopAsyncIteration:
976
+ pass
977
+ else:
978
+ await agen.aclose()
979
+ raise RuntimeError("Async generator fixture did not stop")
980
+
981
+ teardown_event = trio.Event()
982
+ fixture_value = self._call(lambda: self._get_nursery().start(fixture_runner))
983
+ yield fixture_value
984
+ teardown_event.set()
985
+
986
+ def run_fixture(
987
+ self,
988
+ fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]],
989
+ kwargs: dict[str, Any],
990
+ ) -> T_Retval:
991
+ return self._call(fixture_func, **kwargs)
992
+
993
+ def run_test(
994
+ self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
995
+ ) -> None:
996
+ self._call(test_func, **kwargs)
parrot/lib/python3.10/site-packages/anyio/_core/__init__.py ADDED
File without changes
parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (163 Bytes). View file
 
parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_compat.cpython-310.pyc ADDED
Binary file (8.19 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_eventloop.cpython-310.pyc ADDED
Binary file (4.45 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_exceptions.cpython-310.pyc ADDED
Binary file (4.66 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_fileio.cpython-310.pyc ADDED
Binary file (22.4 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_resources.cpython-310.pyc ADDED
Binary file (742 Bytes). View file
 
parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_signals.cpython-310.pyc ADDED
Binary file (1.13 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_sockets.cpython-310.pyc ADDED
Binary file (16.9 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_streams.cpython-310.pyc ADDED
Binary file (1.68 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_subprocesses.cpython-310.pyc ADDED
Binary file (4.86 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_synchronization.cpython-310.pyc ADDED
Binary file (19 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_tasks.cpython-310.pyc ADDED
Binary file (6.47 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_testing.cpython-310.pyc ADDED
Binary file (3.02 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/_core/__pycache__/_typedattr.cpython-310.pyc ADDED
Binary file (3.32 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/_core/_compat.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from abc import ABCMeta, abstractmethod
4
+ from contextlib import AbstractContextManager
5
+ from types import TracebackType
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ Any,
9
+ AsyncContextManager,
10
+ Callable,
11
+ ContextManager,
12
+ Generator,
13
+ Generic,
14
+ Iterable,
15
+ List,
16
+ TypeVar,
17
+ Union,
18
+ overload,
19
+ )
20
+ from warnings import warn
21
+
22
+ if TYPE_CHECKING:
23
+ from ._testing import TaskInfo
24
+ else:
25
+ TaskInfo = object
26
+
27
+ T = TypeVar("T")
28
+ AnyDeprecatedAwaitable = Union[
29
+ "DeprecatedAwaitable",
30
+ "DeprecatedAwaitableFloat",
31
+ "DeprecatedAwaitableList[T]",
32
+ TaskInfo,
33
+ ]
34
+
35
+
36
+ @overload
37
+ async def maybe_async(__obj: TaskInfo) -> TaskInfo:
38
+ ...
39
+
40
+
41
+ @overload
42
+ async def maybe_async(__obj: DeprecatedAwaitableFloat) -> float:
43
+ ...
44
+
45
+
46
+ @overload
47
+ async def maybe_async(__obj: DeprecatedAwaitableList[T]) -> list[T]:
48
+ ...
49
+
50
+
51
+ @overload
52
+ async def maybe_async(__obj: DeprecatedAwaitable) -> None:
53
+ ...
54
+
55
+
56
+ async def maybe_async(
57
+ __obj: AnyDeprecatedAwaitable[T],
58
+ ) -> TaskInfo | float | list[T] | None:
59
+ """
60
+ Await on the given object if necessary.
61
+
62
+ This function is intended to bridge the gap between AnyIO 2.x and 3.x where some functions and
63
+ methods were converted from coroutine functions into regular functions.
64
+
65
+ Do **not** try to use this for any other purpose!
66
+
67
+ :return: the result of awaiting on the object if coroutine, or the object itself otherwise
68
+
69
+ .. versionadded:: 2.2
70
+
71
+ """
72
+ return __obj._unwrap()
73
+
74
+
75
+ class _ContextManagerWrapper:
76
+ def __init__(self, cm: ContextManager[T]):
77
+ self._cm = cm
78
+
79
+ async def __aenter__(self) -> T:
80
+ return self._cm.__enter__()
81
+
82
+ async def __aexit__(
83
+ self,
84
+ exc_type: type[BaseException] | None,
85
+ exc_val: BaseException | None,
86
+ exc_tb: TracebackType | None,
87
+ ) -> bool | None:
88
+ return self._cm.__exit__(exc_type, exc_val, exc_tb)
89
+
90
+
91
+ def maybe_async_cm(
92
+ cm: ContextManager[T] | AsyncContextManager[T],
93
+ ) -> AsyncContextManager[T]:
94
+ """
95
+ Wrap a regular context manager as an async one if necessary.
96
+
97
+ This function is intended to bridge the gap between AnyIO 2.x and 3.x where some functions and
98
+ methods were changed to return regular context managers instead of async ones.
99
+
100
+ :param cm: a regular or async context manager
101
+ :return: an async context manager
102
+
103
+ .. versionadded:: 2.2
104
+
105
+ """
106
+ if not isinstance(cm, AbstractContextManager):
107
+ raise TypeError("Given object is not an context manager")
108
+
109
+ return _ContextManagerWrapper(cm)
110
+
111
+
112
+ def _warn_deprecation(
113
+ awaitable: AnyDeprecatedAwaitable[Any], stacklevel: int = 1
114
+ ) -> None:
115
+ warn(
116
+ f'Awaiting on {awaitable._name}() is deprecated. Use "await '
117
+ f"anyio.maybe_async({awaitable._name}(...)) if you have to support both AnyIO 2.x "
118
+ f'and 3.x, or just remove the "await" if you are completely migrating to AnyIO 3+.',
119
+ DeprecationWarning,
120
+ stacklevel=stacklevel + 1,
121
+ )
122
+
123
+
124
+ class DeprecatedAwaitable:
125
+ def __init__(self, func: Callable[..., DeprecatedAwaitable]):
126
+ self._name = f"{func.__module__}.{func.__qualname__}"
127
+
128
+ def __await__(self) -> Generator[None, None, None]:
129
+ _warn_deprecation(self)
130
+ if False:
131
+ yield
132
+
133
+ def __reduce__(self) -> tuple[type[None], tuple[()]]:
134
+ return type(None), ()
135
+
136
+ def _unwrap(self) -> None:
137
+ return None
138
+
139
+
140
+ class DeprecatedAwaitableFloat(float):
141
+ def __new__(
142
+ cls, x: float, func: Callable[..., DeprecatedAwaitableFloat]
143
+ ) -> DeprecatedAwaitableFloat:
144
+ return super().__new__(cls, x)
145
+
146
+ def __init__(self, x: float, func: Callable[..., DeprecatedAwaitableFloat]):
147
+ self._name = f"{func.__module__}.{func.__qualname__}"
148
+
149
+ def __await__(self) -> Generator[None, None, float]:
150
+ _warn_deprecation(self)
151
+ if False:
152
+ yield
153
+
154
+ return float(self)
155
+
156
+ def __reduce__(self) -> tuple[type[float], tuple[float]]:
157
+ return float, (float(self),)
158
+
159
+ def _unwrap(self) -> float:
160
+ return float(self)
161
+
162
+
163
+ class DeprecatedAwaitableList(List[T]):
164
+ def __init__(
165
+ self,
166
+ iterable: Iterable[T] = (),
167
+ *,
168
+ func: Callable[..., DeprecatedAwaitableList[T]],
169
+ ):
170
+ super().__init__(iterable)
171
+ self._name = f"{func.__module__}.{func.__qualname__}"
172
+
173
+ def __await__(self) -> Generator[None, None, list[T]]:
174
+ _warn_deprecation(self)
175
+ if False:
176
+ yield
177
+
178
+ return list(self)
179
+
180
+ def __reduce__(self) -> tuple[type[list[T]], tuple[list[T]]]:
181
+ return list, (list(self),)
182
+
183
+ def _unwrap(self) -> list[T]:
184
+ return list(self)
185
+
186
+
187
+ class DeprecatedAsyncContextManager(Generic[T], metaclass=ABCMeta):
188
+ @abstractmethod
189
+ def __enter__(self) -> T:
190
+ pass
191
+
192
+ @abstractmethod
193
+ def __exit__(
194
+ self,
195
+ exc_type: type[BaseException] | None,
196
+ exc_val: BaseException | None,
197
+ exc_tb: TracebackType | None,
198
+ ) -> bool | None:
199
+ pass
200
+
201
+ async def __aenter__(self) -> T:
202
+ warn(
203
+ f"Using {self.__class__.__name__} as an async context manager has been deprecated. "
204
+ f'Use "async with anyio.maybe_async_cm(yourcontextmanager) as foo:" if you have to '
205
+ f'support both AnyIO 2.x and 3.x, or just remove the "async" from "async with" if '
206
+ f"you are completely migrating to AnyIO 3+.",
207
+ DeprecationWarning,
208
+ )
209
+ return self.__enter__()
210
+
211
+ async def __aexit__(
212
+ self,
213
+ exc_type: type[BaseException] | None,
214
+ exc_val: BaseException | None,
215
+ exc_tb: TracebackType | None,
216
+ ) -> bool | None:
217
+ return self.__exit__(exc_type, exc_val, exc_tb)
parrot/lib/python3.10/site-packages/anyio/_core/_eventloop.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import math
4
+ import sys
5
+ import threading
6
+ from contextlib import contextmanager
7
+ from importlib import import_module
8
+ from typing import (
9
+ Any,
10
+ Awaitable,
11
+ Callable,
12
+ Generator,
13
+ TypeVar,
14
+ )
15
+
16
+ import sniffio
17
+
18
+ # This must be updated when new backends are introduced
19
+ from ._compat import DeprecatedAwaitableFloat
20
+
21
+ BACKENDS = "asyncio", "trio"
22
+
23
+ T_Retval = TypeVar("T_Retval")
24
+ threadlocals = threading.local()
25
+
26
+
27
+ def run(
28
+ func: Callable[..., Awaitable[T_Retval]],
29
+ *args: object,
30
+ backend: str = "asyncio",
31
+ backend_options: dict[str, Any] | None = None,
32
+ ) -> T_Retval:
33
+ """
34
+ Run the given coroutine function in an asynchronous event loop.
35
+
36
+ The current thread must not be already running an event loop.
37
+
38
+ :param func: a coroutine function
39
+ :param args: positional arguments to ``func``
40
+ :param backend: name of the asynchronous event loop implementation – currently either
41
+ ``asyncio`` or ``trio``
42
+ :param backend_options: keyword arguments to call the backend ``run()`` implementation with
43
+ (documented :ref:`here <backend options>`)
44
+ :return: the return value of the coroutine function
45
+ :raises RuntimeError: if an asynchronous event loop is already running in this thread
46
+ :raises LookupError: if the named backend is not found
47
+
48
+ """
49
+ try:
50
+ asynclib_name = sniffio.current_async_library()
51
+ except sniffio.AsyncLibraryNotFoundError:
52
+ pass
53
+ else:
54
+ raise RuntimeError(f"Already running {asynclib_name} in this thread")
55
+
56
+ try:
57
+ asynclib = import_module(f"..._backends._{backend}", package=__name__)
58
+ except ImportError as exc:
59
+ raise LookupError(f"No such backend: {backend}") from exc
60
+
61
+ token = None
62
+ if sniffio.current_async_library_cvar.get(None) is None:
63
+ # Since we're in control of the event loop, we can cache the name of the async library
64
+ token = sniffio.current_async_library_cvar.set(backend)
65
+
66
+ try:
67
+ backend_options = backend_options or {}
68
+ return asynclib.run(func, *args, **backend_options)
69
+ finally:
70
+ if token:
71
+ sniffio.current_async_library_cvar.reset(token)
72
+
73
+
74
+ async def sleep(delay: float) -> None:
75
+ """
76
+ Pause the current task for the specified duration.
77
+
78
+ :param delay: the duration, in seconds
79
+
80
+ """
81
+ return await get_asynclib().sleep(delay)
82
+
83
+
84
+ async def sleep_forever() -> None:
85
+ """
86
+ Pause the current task until it's cancelled.
87
+
88
+ This is a shortcut for ``sleep(math.inf)``.
89
+
90
+ .. versionadded:: 3.1
91
+
92
+ """
93
+ await sleep(math.inf)
94
+
95
+
96
+ async def sleep_until(deadline: float) -> None:
97
+ """
98
+ Pause the current task until the given time.
99
+
100
+ :param deadline: the absolute time to wake up at (according to the internal monotonic clock of
101
+ the event loop)
102
+
103
+ .. versionadded:: 3.1
104
+
105
+ """
106
+ now = current_time()
107
+ await sleep(max(deadline - now, 0))
108
+
109
+
110
+ def current_time() -> DeprecatedAwaitableFloat:
111
+ """
112
+ Return the current value of the event loop's internal clock.
113
+
114
+ :return: the clock value (seconds)
115
+
116
+ """
117
+ return DeprecatedAwaitableFloat(get_asynclib().current_time(), current_time)
118
+
119
+
120
+ def get_all_backends() -> tuple[str, ...]:
121
+ """Return a tuple of the names of all built-in backends."""
122
+ return BACKENDS
123
+
124
+
125
+ def get_cancelled_exc_class() -> type[BaseException]:
126
+ """Return the current async library's cancellation exception class."""
127
+ return get_asynclib().CancelledError
128
+
129
+
130
+ #
131
+ # Private API
132
+ #
133
+
134
+
135
+ @contextmanager
136
+ def claim_worker_thread(backend: str) -> Generator[Any, None, None]:
137
+ module = sys.modules["anyio._backends._" + backend]
138
+ threadlocals.current_async_module = module
139
+ try:
140
+ yield
141
+ finally:
142
+ del threadlocals.current_async_module
143
+
144
+
145
+ def get_asynclib(asynclib_name: str | None = None) -> Any:
146
+ if asynclib_name is None:
147
+ asynclib_name = sniffio.current_async_library()
148
+
149
+ modulename = "anyio._backends._" + asynclib_name
150
+ try:
151
+ return sys.modules[modulename]
152
+ except KeyError:
153
+ return import_module(modulename)
parrot/lib/python3.10/site-packages/anyio/_core/_exceptions.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from traceback import format_exception
4
+
5
+
6
+ class BrokenResourceError(Exception):
7
+ """
8
+ Raised when trying to use a resource that has been rendered unusable due to external causes
9
+ (e.g. a send stream whose peer has disconnected).
10
+ """
11
+
12
+
13
+ class BrokenWorkerProcess(Exception):
14
+ """
15
+ Raised by :func:`run_sync_in_process` if the worker process terminates abruptly or otherwise
16
+ misbehaves.
17
+ """
18
+
19
+
20
+ class BusyResourceError(Exception):
21
+ """Raised when two tasks are trying to read from or write to the same resource concurrently."""
22
+
23
+ def __init__(self, action: str):
24
+ super().__init__(f"Another task is already {action} this resource")
25
+
26
+
27
+ class ClosedResourceError(Exception):
28
+ """Raised when trying to use a resource that has been closed."""
29
+
30
+
31
+ class DelimiterNotFound(Exception):
32
+ """
33
+ Raised during :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
34
+ maximum number of bytes has been read without the delimiter being found.
35
+ """
36
+
37
+ def __init__(self, max_bytes: int) -> None:
38
+ super().__init__(
39
+ f"The delimiter was not found among the first {max_bytes} bytes"
40
+ )
41
+
42
+
43
+ class EndOfStream(Exception):
44
+ """Raised when trying to read from a stream that has been closed from the other end."""
45
+
46
+
47
+ class ExceptionGroup(BaseException):
48
+ """
49
+ Raised when multiple exceptions have been raised in a task group.
50
+
51
+ :var ~typing.Sequence[BaseException] exceptions: the sequence of exceptions raised together
52
+ """
53
+
54
+ SEPARATOR = "----------------------------\n"
55
+
56
+ exceptions: list[BaseException]
57
+
58
+ def __str__(self) -> str:
59
+ tracebacks = [
60
+ "".join(format_exception(type(exc), exc, exc.__traceback__))
61
+ for exc in self.exceptions
62
+ ]
63
+ return (
64
+ f"{len(self.exceptions)} exceptions were raised in the task group:\n"
65
+ f"{self.SEPARATOR}{self.SEPARATOR.join(tracebacks)}"
66
+ )
67
+
68
+ def __repr__(self) -> str:
69
+ exception_reprs = ", ".join(repr(exc) for exc in self.exceptions)
70
+ return f"<{self.__class__.__name__}: {exception_reprs}>"
71
+
72
+
73
+ class IncompleteRead(Exception):
74
+ """
75
+ Raised during :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_exactly` or
76
+ :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
77
+ connection is closed before the requested amount of bytes has been read.
78
+ """
79
+
80
+ def __init__(self) -> None:
81
+ super().__init__(
82
+ "The stream was closed before the read operation could be completed"
83
+ )
84
+
85
+
86
+ class TypedAttributeLookupError(LookupError):
87
+ """
88
+ Raised by :meth:`~anyio.TypedAttributeProvider.extra` when the given typed attribute is not
89
+ found and no default value has been given.
90
+ """
91
+
92
+
93
+ class WouldBlock(Exception):
94
+ """Raised by ``X_nowait`` functions if ``X()`` would block."""
parrot/lib/python3.10/site-packages/anyio/_core/_fileio.py ADDED
@@ -0,0 +1,603 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import pathlib
5
+ import sys
6
+ from dataclasses import dataclass
7
+ from functools import partial
8
+ from os import PathLike
9
+ from typing import (
10
+ IO,
11
+ TYPE_CHECKING,
12
+ Any,
13
+ AnyStr,
14
+ AsyncIterator,
15
+ Callable,
16
+ Generic,
17
+ Iterable,
18
+ Iterator,
19
+ Sequence,
20
+ cast,
21
+ overload,
22
+ )
23
+
24
+ from .. import to_thread
25
+ from ..abc import AsyncResource
26
+
27
+ if sys.version_info >= (3, 8):
28
+ from typing import Final
29
+ else:
30
+ from typing_extensions import Final
31
+
32
+ if TYPE_CHECKING:
33
+ from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer
34
+ else:
35
+ ReadableBuffer = OpenBinaryMode = OpenTextMode = WriteableBuffer = object
36
+
37
+
38
+ class AsyncFile(AsyncResource, Generic[AnyStr]):
39
+ """
40
+ An asynchronous file object.
41
+
42
+ This class wraps a standard file object and provides async friendly versions of the following
43
+ blocking methods (where available on the original file object):
44
+
45
+ * read
46
+ * read1
47
+ * readline
48
+ * readlines
49
+ * readinto
50
+ * readinto1
51
+ * write
52
+ * writelines
53
+ * truncate
54
+ * seek
55
+ * tell
56
+ * flush
57
+
58
+ All other methods are directly passed through.
59
+
60
+ This class supports the asynchronous context manager protocol which closes the underlying file
61
+ at the end of the context block.
62
+
63
+ This class also supports asynchronous iteration::
64
+
65
+ async with await open_file(...) as f:
66
+ async for line in f:
67
+ print(line)
68
+ """
69
+
70
+ def __init__(self, fp: IO[AnyStr]) -> None:
71
+ self._fp: Any = fp
72
+
73
+ def __getattr__(self, name: str) -> object:
74
+ return getattr(self._fp, name)
75
+
76
+ @property
77
+ def wrapped(self) -> IO[AnyStr]:
78
+ """The wrapped file object."""
79
+ return self._fp
80
+
81
+ async def __aiter__(self) -> AsyncIterator[AnyStr]:
82
+ while True:
83
+ line = await self.readline()
84
+ if line:
85
+ yield line
86
+ else:
87
+ break
88
+
89
+ async def aclose(self) -> None:
90
+ return await to_thread.run_sync(self._fp.close)
91
+
92
+ async def read(self, size: int = -1) -> AnyStr:
93
+ return await to_thread.run_sync(self._fp.read, size)
94
+
95
+ async def read1(self: AsyncFile[bytes], size: int = -1) -> bytes:
96
+ return await to_thread.run_sync(self._fp.read1, size)
97
+
98
+ async def readline(self) -> AnyStr:
99
+ return await to_thread.run_sync(self._fp.readline)
100
+
101
+ async def readlines(self) -> list[AnyStr]:
102
+ return await to_thread.run_sync(self._fp.readlines)
103
+
104
+ async def readinto(self: AsyncFile[bytes], b: WriteableBuffer) -> bytes:
105
+ return await to_thread.run_sync(self._fp.readinto, b)
106
+
107
+ async def readinto1(self: AsyncFile[bytes], b: WriteableBuffer) -> bytes:
108
+ return await to_thread.run_sync(self._fp.readinto1, b)
109
+
110
+ @overload
111
+ async def write(self: AsyncFile[bytes], b: ReadableBuffer) -> int:
112
+ ...
113
+
114
+ @overload
115
+ async def write(self: AsyncFile[str], b: str) -> int:
116
+ ...
117
+
118
+ async def write(self, b: ReadableBuffer | str) -> int:
119
+ return await to_thread.run_sync(self._fp.write, b)
120
+
121
+ @overload
122
+ async def writelines(
123
+ self: AsyncFile[bytes], lines: Iterable[ReadableBuffer]
124
+ ) -> None:
125
+ ...
126
+
127
+ @overload
128
+ async def writelines(self: AsyncFile[str], lines: Iterable[str]) -> None:
129
+ ...
130
+
131
+ async def writelines(self, lines: Iterable[ReadableBuffer] | Iterable[str]) -> None:
132
+ return await to_thread.run_sync(self._fp.writelines, lines)
133
+
134
+ async def truncate(self, size: int | None = None) -> int:
135
+ return await to_thread.run_sync(self._fp.truncate, size)
136
+
137
+ async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int:
138
+ return await to_thread.run_sync(self._fp.seek, offset, whence)
139
+
140
+ async def tell(self) -> int:
141
+ return await to_thread.run_sync(self._fp.tell)
142
+
143
+ async def flush(self) -> None:
144
+ return await to_thread.run_sync(self._fp.flush)
145
+
146
+
147
+ @overload
148
+ async def open_file(
149
+ file: str | PathLike[str] | int,
150
+ mode: OpenBinaryMode,
151
+ buffering: int = ...,
152
+ encoding: str | None = ...,
153
+ errors: str | None = ...,
154
+ newline: str | None = ...,
155
+ closefd: bool = ...,
156
+ opener: Callable[[str, int], int] | None = ...,
157
+ ) -> AsyncFile[bytes]:
158
+ ...
159
+
160
+
161
+ @overload
162
+ async def open_file(
163
+ file: str | PathLike[str] | int,
164
+ mode: OpenTextMode = ...,
165
+ buffering: int = ...,
166
+ encoding: str | None = ...,
167
+ errors: str | None = ...,
168
+ newline: str | None = ...,
169
+ closefd: bool = ...,
170
+ opener: Callable[[str, int], int] | None = ...,
171
+ ) -> AsyncFile[str]:
172
+ ...
173
+
174
+
175
+ async def open_file(
176
+ file: str | PathLike[str] | int,
177
+ mode: str = "r",
178
+ buffering: int = -1,
179
+ encoding: str | None = None,
180
+ errors: str | None = None,
181
+ newline: str | None = None,
182
+ closefd: bool = True,
183
+ opener: Callable[[str, int], int] | None = None,
184
+ ) -> AsyncFile[Any]:
185
+ """
186
+ Open a file asynchronously.
187
+
188
+ The arguments are exactly the same as for the builtin :func:`open`.
189
+
190
+ :return: an asynchronous file object
191
+
192
+ """
193
+ fp = await to_thread.run_sync(
194
+ open, file, mode, buffering, encoding, errors, newline, closefd, opener
195
+ )
196
+ return AsyncFile(fp)
197
+
198
+
199
+ def wrap_file(file: IO[AnyStr]) -> AsyncFile[AnyStr]:
200
+ """
201
+ Wrap an existing file as an asynchronous file.
202
+
203
+ :param file: an existing file-like object
204
+ :return: an asynchronous file object
205
+
206
+ """
207
+ return AsyncFile(file)
208
+
209
+
210
+ @dataclass(eq=False)
211
+ class _PathIterator(AsyncIterator["Path"]):
212
+ iterator: Iterator[PathLike[str]]
213
+
214
+ async def __anext__(self) -> Path:
215
+ nextval = await to_thread.run_sync(next, self.iterator, None, cancellable=True)
216
+ if nextval is None:
217
+ raise StopAsyncIteration from None
218
+
219
+ return Path(cast("PathLike[str]", nextval))
220
+
221
+
222
+ class Path:
223
+ """
224
+ An asynchronous version of :class:`pathlib.Path`.
225
+
226
+ This class cannot be substituted for :class:`pathlib.Path` or :class:`pathlib.PurePath`, but
227
+ it is compatible with the :class:`os.PathLike` interface.
228
+
229
+ It implements the Python 3.10 version of :class:`pathlib.Path` interface, except for the
230
+ deprecated :meth:`~pathlib.Path.link_to` method.
231
+
232
+ Any methods that do disk I/O need to be awaited on. These methods are:
233
+
234
+ * :meth:`~pathlib.Path.absolute`
235
+ * :meth:`~pathlib.Path.chmod`
236
+ * :meth:`~pathlib.Path.cwd`
237
+ * :meth:`~pathlib.Path.exists`
238
+ * :meth:`~pathlib.Path.expanduser`
239
+ * :meth:`~pathlib.Path.group`
240
+ * :meth:`~pathlib.Path.hardlink_to`
241
+ * :meth:`~pathlib.Path.home`
242
+ * :meth:`~pathlib.Path.is_block_device`
243
+ * :meth:`~pathlib.Path.is_char_device`
244
+ * :meth:`~pathlib.Path.is_dir`
245
+ * :meth:`~pathlib.Path.is_fifo`
246
+ * :meth:`~pathlib.Path.is_file`
247
+ * :meth:`~pathlib.Path.is_mount`
248
+ * :meth:`~pathlib.Path.lchmod`
249
+ * :meth:`~pathlib.Path.lstat`
250
+ * :meth:`~pathlib.Path.mkdir`
251
+ * :meth:`~pathlib.Path.open`
252
+ * :meth:`~pathlib.Path.owner`
253
+ * :meth:`~pathlib.Path.read_bytes`
254
+ * :meth:`~pathlib.Path.read_text`
255
+ * :meth:`~pathlib.Path.readlink`
256
+ * :meth:`~pathlib.Path.rename`
257
+ * :meth:`~pathlib.Path.replace`
258
+ * :meth:`~pathlib.Path.rmdir`
259
+ * :meth:`~pathlib.Path.samefile`
260
+ * :meth:`~pathlib.Path.stat`
261
+ * :meth:`~pathlib.Path.touch`
262
+ * :meth:`~pathlib.Path.unlink`
263
+ * :meth:`~pathlib.Path.write_bytes`
264
+ * :meth:`~pathlib.Path.write_text`
265
+
266
+ Additionally, the following methods return an async iterator yielding :class:`~.Path` objects:
267
+
268
+ * :meth:`~pathlib.Path.glob`
269
+ * :meth:`~pathlib.Path.iterdir`
270
+ * :meth:`~pathlib.Path.rglob`
271
+ """
272
+
273
+ __slots__ = "_path", "__weakref__"
274
+
275
+ __weakref__: Any
276
+
277
+ def __init__(self, *args: str | PathLike[str]) -> None:
278
+ self._path: Final[pathlib.Path] = pathlib.Path(*args)
279
+
280
+ def __fspath__(self) -> str:
281
+ return self._path.__fspath__()
282
+
283
+ def __str__(self) -> str:
284
+ return self._path.__str__()
285
+
286
+ def __repr__(self) -> str:
287
+ return f"{self.__class__.__name__}({self.as_posix()!r})"
288
+
289
+ def __bytes__(self) -> bytes:
290
+ return self._path.__bytes__()
291
+
292
+ def __hash__(self) -> int:
293
+ return self._path.__hash__()
294
+
295
+ def __eq__(self, other: object) -> bool:
296
+ target = other._path if isinstance(other, Path) else other
297
+ return self._path.__eq__(target)
298
+
299
+ def __lt__(self, other: Path) -> bool:
300
+ target = other._path if isinstance(other, Path) else other
301
+ return self._path.__lt__(target)
302
+
303
+ def __le__(self, other: Path) -> bool:
304
+ target = other._path if isinstance(other, Path) else other
305
+ return self._path.__le__(target)
306
+
307
+ def __gt__(self, other: Path) -> bool:
308
+ target = other._path if isinstance(other, Path) else other
309
+ return self._path.__gt__(target)
310
+
311
+ def __ge__(self, other: Path) -> bool:
312
+ target = other._path if isinstance(other, Path) else other
313
+ return self._path.__ge__(target)
314
+
315
+ def __truediv__(self, other: Any) -> Path:
316
+ return Path(self._path / other)
317
+
318
+ def __rtruediv__(self, other: Any) -> Path:
319
+ return Path(other) / self
320
+
321
+ @property
322
+ def parts(self) -> tuple[str, ...]:
323
+ return self._path.parts
324
+
325
+ @property
326
+ def drive(self) -> str:
327
+ return self._path.drive
328
+
329
+ @property
330
+ def root(self) -> str:
331
+ return self._path.root
332
+
333
+ @property
334
+ def anchor(self) -> str:
335
+ return self._path.anchor
336
+
337
+ @property
338
+ def parents(self) -> Sequence[Path]:
339
+ return tuple(Path(p) for p in self._path.parents)
340
+
341
+ @property
342
+ def parent(self) -> Path:
343
+ return Path(self._path.parent)
344
+
345
+ @property
346
+ def name(self) -> str:
347
+ return self._path.name
348
+
349
+ @property
350
+ def suffix(self) -> str:
351
+ return self._path.suffix
352
+
353
+ @property
354
+ def suffixes(self) -> list[str]:
355
+ return self._path.suffixes
356
+
357
+ @property
358
+ def stem(self) -> str:
359
+ return self._path.stem
360
+
361
+ async def absolute(self) -> Path:
362
+ path = await to_thread.run_sync(self._path.absolute)
363
+ return Path(path)
364
+
365
+ def as_posix(self) -> str:
366
+ return self._path.as_posix()
367
+
368
+ def as_uri(self) -> str:
369
+ return self._path.as_uri()
370
+
371
+ def match(self, path_pattern: str) -> bool:
372
+ return self._path.match(path_pattern)
373
+
374
+ def is_relative_to(self, *other: str | PathLike[str]) -> bool:
375
+ try:
376
+ self.relative_to(*other)
377
+ return True
378
+ except ValueError:
379
+ return False
380
+
381
+ async def chmod(self, mode: int, *, follow_symlinks: bool = True) -> None:
382
+ func = partial(os.chmod, follow_symlinks=follow_symlinks)
383
+ return await to_thread.run_sync(func, self._path, mode)
384
+
385
+ @classmethod
386
+ async def cwd(cls) -> Path:
387
+ path = await to_thread.run_sync(pathlib.Path.cwd)
388
+ return cls(path)
389
+
390
+ async def exists(self) -> bool:
391
+ return await to_thread.run_sync(self._path.exists, cancellable=True)
392
+
393
+ async def expanduser(self) -> Path:
394
+ return Path(await to_thread.run_sync(self._path.expanduser, cancellable=True))
395
+
396
+ def glob(self, pattern: str) -> AsyncIterator[Path]:
397
+ gen = self._path.glob(pattern)
398
+ return _PathIterator(gen)
399
+
400
+ async def group(self) -> str:
401
+ return await to_thread.run_sync(self._path.group, cancellable=True)
402
+
403
+ async def hardlink_to(self, target: str | pathlib.Path | Path) -> None:
404
+ if isinstance(target, Path):
405
+ target = target._path
406
+
407
+ await to_thread.run_sync(os.link, target, self)
408
+
409
+ @classmethod
410
+ async def home(cls) -> Path:
411
+ home_path = await to_thread.run_sync(pathlib.Path.home)
412
+ return cls(home_path)
413
+
414
+ def is_absolute(self) -> bool:
415
+ return self._path.is_absolute()
416
+
417
+ async def is_block_device(self) -> bool:
418
+ return await to_thread.run_sync(self._path.is_block_device, cancellable=True)
419
+
420
+ async def is_char_device(self) -> bool:
421
+ return await to_thread.run_sync(self._path.is_char_device, cancellable=True)
422
+
423
+ async def is_dir(self) -> bool:
424
+ return await to_thread.run_sync(self._path.is_dir, cancellable=True)
425
+
426
+ async def is_fifo(self) -> bool:
427
+ return await to_thread.run_sync(self._path.is_fifo, cancellable=True)
428
+
429
+ async def is_file(self) -> bool:
430
+ return await to_thread.run_sync(self._path.is_file, cancellable=True)
431
+
432
+ async def is_mount(self) -> bool:
433
+ return await to_thread.run_sync(os.path.ismount, self._path, cancellable=True)
434
+
435
+ def is_reserved(self) -> bool:
436
+ return self._path.is_reserved()
437
+
438
+ async def is_socket(self) -> bool:
439
+ return await to_thread.run_sync(self._path.is_socket, cancellable=True)
440
+
441
+ async def is_symlink(self) -> bool:
442
+ return await to_thread.run_sync(self._path.is_symlink, cancellable=True)
443
+
444
+ def iterdir(self) -> AsyncIterator[Path]:
445
+ gen = self._path.iterdir()
446
+ return _PathIterator(gen)
447
+
448
+ def joinpath(self, *args: str | PathLike[str]) -> Path:
449
+ return Path(self._path.joinpath(*args))
450
+
451
+ async def lchmod(self, mode: int) -> None:
452
+ await to_thread.run_sync(self._path.lchmod, mode)
453
+
454
+ async def lstat(self) -> os.stat_result:
455
+ return await to_thread.run_sync(self._path.lstat, cancellable=True)
456
+
457
+ async def mkdir(
458
+ self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False
459
+ ) -> None:
460
+ await to_thread.run_sync(self._path.mkdir, mode, parents, exist_ok)
461
+
462
+ @overload
463
+ async def open(
464
+ self,
465
+ mode: OpenBinaryMode,
466
+ buffering: int = ...,
467
+ encoding: str | None = ...,
468
+ errors: str | None = ...,
469
+ newline: str | None = ...,
470
+ ) -> AsyncFile[bytes]:
471
+ ...
472
+
473
+ @overload
474
+ async def open(
475
+ self,
476
+ mode: OpenTextMode = ...,
477
+ buffering: int = ...,
478
+ encoding: str | None = ...,
479
+ errors: str | None = ...,
480
+ newline: str | None = ...,
481
+ ) -> AsyncFile[str]:
482
+ ...
483
+
484
+ async def open(
485
+ self,
486
+ mode: str = "r",
487
+ buffering: int = -1,
488
+ encoding: str | None = None,
489
+ errors: str | None = None,
490
+ newline: str | None = None,
491
+ ) -> AsyncFile[Any]:
492
+ fp = await to_thread.run_sync(
493
+ self._path.open, mode, buffering, encoding, errors, newline
494
+ )
495
+ return AsyncFile(fp)
496
+
497
+ async def owner(self) -> str:
498
+ return await to_thread.run_sync(self._path.owner, cancellable=True)
499
+
500
+ async def read_bytes(self) -> bytes:
501
+ return await to_thread.run_sync(self._path.read_bytes)
502
+
503
+ async def read_text(
504
+ self, encoding: str | None = None, errors: str | None = None
505
+ ) -> str:
506
+ return await to_thread.run_sync(self._path.read_text, encoding, errors)
507
+
508
+ def relative_to(self, *other: str | PathLike[str]) -> Path:
509
+ return Path(self._path.relative_to(*other))
510
+
511
+ async def readlink(self) -> Path:
512
+ target = await to_thread.run_sync(os.readlink, self._path)
513
+ return Path(cast(str, target))
514
+
515
+ async def rename(self, target: str | pathlib.PurePath | Path) -> Path:
516
+ if isinstance(target, Path):
517
+ target = target._path
518
+
519
+ await to_thread.run_sync(self._path.rename, target)
520
+ return Path(target)
521
+
522
+ async def replace(self, target: str | pathlib.PurePath | Path) -> Path:
523
+ if isinstance(target, Path):
524
+ target = target._path
525
+
526
+ await to_thread.run_sync(self._path.replace, target)
527
+ return Path(target)
528
+
529
+ async def resolve(self, strict: bool = False) -> Path:
530
+ func = partial(self._path.resolve, strict=strict)
531
+ return Path(await to_thread.run_sync(func, cancellable=True))
532
+
533
+ def rglob(self, pattern: str) -> AsyncIterator[Path]:
534
+ gen = self._path.rglob(pattern)
535
+ return _PathIterator(gen)
536
+
537
+ async def rmdir(self) -> None:
538
+ await to_thread.run_sync(self._path.rmdir)
539
+
540
+ async def samefile(
541
+ self, other_path: str | bytes | int | pathlib.Path | Path
542
+ ) -> bool:
543
+ if isinstance(other_path, Path):
544
+ other_path = other_path._path
545
+
546
+ return await to_thread.run_sync(
547
+ self._path.samefile, other_path, cancellable=True
548
+ )
549
+
550
+ async def stat(self, *, follow_symlinks: bool = True) -> os.stat_result:
551
+ func = partial(os.stat, follow_symlinks=follow_symlinks)
552
+ return await to_thread.run_sync(func, self._path, cancellable=True)
553
+
554
+ async def symlink_to(
555
+ self,
556
+ target: str | pathlib.Path | Path,
557
+ target_is_directory: bool = False,
558
+ ) -> None:
559
+ if isinstance(target, Path):
560
+ target = target._path
561
+
562
+ await to_thread.run_sync(self._path.symlink_to, target, target_is_directory)
563
+
564
+ async def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None:
565
+ await to_thread.run_sync(self._path.touch, mode, exist_ok)
566
+
567
+ async def unlink(self, missing_ok: bool = False) -> None:
568
+ try:
569
+ await to_thread.run_sync(self._path.unlink)
570
+ except FileNotFoundError:
571
+ if not missing_ok:
572
+ raise
573
+
574
+ def with_name(self, name: str) -> Path:
575
+ return Path(self._path.with_name(name))
576
+
577
+ def with_stem(self, stem: str) -> Path:
578
+ return Path(self._path.with_name(stem + self._path.suffix))
579
+
580
+ def with_suffix(self, suffix: str) -> Path:
581
+ return Path(self._path.with_suffix(suffix))
582
+
583
+ async def write_bytes(self, data: bytes) -> int:
584
+ return await to_thread.run_sync(self._path.write_bytes, data)
585
+
586
+ async def write_text(
587
+ self,
588
+ data: str,
589
+ encoding: str | None = None,
590
+ errors: str | None = None,
591
+ newline: str | None = None,
592
+ ) -> int:
593
+ # Path.write_text() does not support the "newline" parameter before Python 3.10
594
+ def sync_write_text() -> int:
595
+ with self._path.open(
596
+ "w", encoding=encoding, errors=errors, newline=newline
597
+ ) as fp:
598
+ return fp.write(data)
599
+
600
+ return await to_thread.run_sync(sync_write_text)
601
+
602
+
603
+ PathLike.register(Path)
parrot/lib/python3.10/site-packages/anyio/_core/_resources.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from ..abc import AsyncResource
4
+ from ._tasks import CancelScope
5
+
6
+
7
+ async def aclose_forcefully(resource: AsyncResource) -> None:
8
+ """
9
+ Close an asynchronous resource in a cancelled scope.
10
+
11
+ Doing this closes the resource without waiting on anything.
12
+
13
+ :param resource: the resource to close
14
+
15
+ """
16
+ with CancelScope() as scope:
17
+ scope.cancel()
18
+ await resource.aclose()
parrot/lib/python3.10/site-packages/anyio/_core/_signals.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import AsyncIterator
4
+
5
+ from ._compat import DeprecatedAsyncContextManager
6
+ from ._eventloop import get_asynclib
7
+
8
+
9
+ def open_signal_receiver(
10
+ *signals: int,
11
+ ) -> DeprecatedAsyncContextManager[AsyncIterator[int]]:
12
+ """
13
+ Start receiving operating system signals.
14
+
15
+ :param signals: signals to receive (e.g. ``signal.SIGINT``)
16
+ :return: an asynchronous context manager for an asynchronous iterator which yields signal
17
+ numbers
18
+
19
+ .. warning:: Windows does not support signals natively so it is best to avoid relying on this
20
+ in cross-platform applications.
21
+
22
+ .. warning:: On asyncio, this permanently replaces any previous signal handler for the given
23
+ signals, as set via :meth:`~asyncio.loop.add_signal_handler`.
24
+
25
+ """
26
+ return get_asynclib().open_signal_receiver(*signals)
parrot/lib/python3.10/site-packages/anyio/_core/_sockets.py ADDED
@@ -0,0 +1,607 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import socket
4
+ import ssl
5
+ import sys
6
+ from ipaddress import IPv6Address, ip_address
7
+ from os import PathLike, chmod
8
+ from pathlib import Path
9
+ from socket import AddressFamily, SocketKind
10
+ from typing import Awaitable, List, Tuple, cast, overload
11
+
12
+ from .. import to_thread
13
+ from ..abc import (
14
+ ConnectedUDPSocket,
15
+ IPAddressType,
16
+ IPSockAddrType,
17
+ SocketListener,
18
+ SocketStream,
19
+ UDPSocket,
20
+ UNIXSocketStream,
21
+ )
22
+ from ..streams.stapled import MultiListener
23
+ from ..streams.tls import TLSStream
24
+ from ._eventloop import get_asynclib
25
+ from ._resources import aclose_forcefully
26
+ from ._synchronization import Event
27
+ from ._tasks import create_task_group, move_on_after
28
+
29
+ if sys.version_info >= (3, 8):
30
+ from typing import Literal
31
+ else:
32
+ from typing_extensions import Literal
33
+
34
+ IPPROTO_IPV6 = getattr(socket, "IPPROTO_IPV6", 41) # https://bugs.python.org/issue29515
35
+
36
+ GetAddrInfoReturnType = List[
37
+ Tuple[AddressFamily, SocketKind, int, str, Tuple[str, int]]
38
+ ]
39
+ AnyIPAddressFamily = Literal[
40
+ AddressFamily.AF_UNSPEC, AddressFamily.AF_INET, AddressFamily.AF_INET6
41
+ ]
42
+ IPAddressFamily = Literal[AddressFamily.AF_INET, AddressFamily.AF_INET6]
43
+
44
+
45
+ # tls_hostname given
46
+ @overload
47
+ async def connect_tcp(
48
+ remote_host: IPAddressType,
49
+ remote_port: int,
50
+ *,
51
+ local_host: IPAddressType | None = ...,
52
+ ssl_context: ssl.SSLContext | None = ...,
53
+ tls_standard_compatible: bool = ...,
54
+ tls_hostname: str,
55
+ happy_eyeballs_delay: float = ...,
56
+ ) -> TLSStream:
57
+ ...
58
+
59
+
60
+ # ssl_context given
61
+ @overload
62
+ async def connect_tcp(
63
+ remote_host: IPAddressType,
64
+ remote_port: int,
65
+ *,
66
+ local_host: IPAddressType | None = ...,
67
+ ssl_context: ssl.SSLContext,
68
+ tls_standard_compatible: bool = ...,
69
+ tls_hostname: str | None = ...,
70
+ happy_eyeballs_delay: float = ...,
71
+ ) -> TLSStream:
72
+ ...
73
+
74
+
75
+ # tls=True
76
+ @overload
77
+ async def connect_tcp(
78
+ remote_host: IPAddressType,
79
+ remote_port: int,
80
+ *,
81
+ local_host: IPAddressType | None = ...,
82
+ tls: Literal[True],
83
+ ssl_context: ssl.SSLContext | None = ...,
84
+ tls_standard_compatible: bool = ...,
85
+ tls_hostname: str | None = ...,
86
+ happy_eyeballs_delay: float = ...,
87
+ ) -> TLSStream:
88
+ ...
89
+
90
+
91
+ # tls=False
92
+ @overload
93
+ async def connect_tcp(
94
+ remote_host: IPAddressType,
95
+ remote_port: int,
96
+ *,
97
+ local_host: IPAddressType | None = ...,
98
+ tls: Literal[False],
99
+ ssl_context: ssl.SSLContext | None = ...,
100
+ tls_standard_compatible: bool = ...,
101
+ tls_hostname: str | None = ...,
102
+ happy_eyeballs_delay: float = ...,
103
+ ) -> SocketStream:
104
+ ...
105
+
106
+
107
+ # No TLS arguments
108
+ @overload
109
+ async def connect_tcp(
110
+ remote_host: IPAddressType,
111
+ remote_port: int,
112
+ *,
113
+ local_host: IPAddressType | None = ...,
114
+ happy_eyeballs_delay: float = ...,
115
+ ) -> SocketStream:
116
+ ...
117
+
118
+
119
+ async def connect_tcp(
120
+ remote_host: IPAddressType,
121
+ remote_port: int,
122
+ *,
123
+ local_host: IPAddressType | None = None,
124
+ tls: bool = False,
125
+ ssl_context: ssl.SSLContext | None = None,
126
+ tls_standard_compatible: bool = True,
127
+ tls_hostname: str | None = None,
128
+ happy_eyeballs_delay: float = 0.25,
129
+ ) -> SocketStream | TLSStream:
130
+ """
131
+ Connect to a host using the TCP protocol.
132
+
133
+ This function implements the stateless version of the Happy Eyeballs algorithm (RFC
134
+ 6555). If ``remote_host`` is a host name that resolves to multiple IP addresses,
135
+ each one is tried until one connection attempt succeeds. If the first attempt does
136
+ not connected within 250 milliseconds, a second attempt is started using the next
137
+ address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if
138
+ available) is tried first.
139
+
140
+ When the connection has been established, a TLS handshake will be done if either
141
+ ``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``.
142
+
143
+ :param remote_host: the IP address or host name to connect to
144
+ :param remote_port: port on the target host to connect to
145
+ :param local_host: the interface address or name to bind the socket to before connecting
146
+ :param tls: ``True`` to do a TLS handshake with the connected stream and return a
147
+ :class:`~anyio.streams.tls.TLSStream` instead
148
+ :param ssl_context: the SSL context object to use (if omitted, a default context is created)
149
+ :param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake before closing
150
+ the stream and requires that the server does this as well. Otherwise,
151
+ :exc:`~ssl.SSLEOFError` may be raised during reads from the stream.
152
+ Some protocols, such as HTTP, require this option to be ``False``.
153
+ See :meth:`~ssl.SSLContext.wrap_socket` for details.
154
+ :param tls_hostname: host name to check the server certificate against (defaults to the value
155
+ of ``remote_host``)
156
+ :param happy_eyeballs_delay: delay (in seconds) before starting the next connection attempt
157
+ :return: a socket stream object if no TLS handshake was done, otherwise a TLS stream
158
+ :raises OSError: if the connection attempt fails
159
+
160
+ """
161
+ # Placed here due to https://github.com/python/mypy/issues/7057
162
+ connected_stream: SocketStream | None = None
163
+
164
+ async def try_connect(remote_host: str, event: Event) -> None:
165
+ nonlocal connected_stream
166
+ try:
167
+ stream = await asynclib.connect_tcp(remote_host, remote_port, local_address)
168
+ except OSError as exc:
169
+ oserrors.append(exc)
170
+ return
171
+ else:
172
+ if connected_stream is None:
173
+ connected_stream = stream
174
+ tg.cancel_scope.cancel()
175
+ else:
176
+ await stream.aclose()
177
+ finally:
178
+ event.set()
179
+
180
+ asynclib = get_asynclib()
181
+ local_address: IPSockAddrType | None = None
182
+ family = socket.AF_UNSPEC
183
+ if local_host:
184
+ gai_res = await getaddrinfo(str(local_host), None)
185
+ family, *_, local_address = gai_res[0]
186
+
187
+ target_host = str(remote_host)
188
+ try:
189
+ addr_obj = ip_address(remote_host)
190
+ except ValueError:
191
+ # getaddrinfo() will raise an exception if name resolution fails
192
+ gai_res = await getaddrinfo(
193
+ target_host, remote_port, family=family, type=socket.SOCK_STREAM
194
+ )
195
+
196
+ # Organize the list so that the first address is an IPv6 address (if available) and the
197
+ # second one is an IPv4 addresses. The rest can be in whatever order.
198
+ v6_found = v4_found = False
199
+ target_addrs: list[tuple[socket.AddressFamily, str]] = []
200
+ for af, *rest, sa in gai_res:
201
+ if af == socket.AF_INET6 and not v6_found:
202
+ v6_found = True
203
+ target_addrs.insert(0, (af, sa[0]))
204
+ elif af == socket.AF_INET and not v4_found and v6_found:
205
+ v4_found = True
206
+ target_addrs.insert(1, (af, sa[0]))
207
+ else:
208
+ target_addrs.append((af, sa[0]))
209
+ else:
210
+ if isinstance(addr_obj, IPv6Address):
211
+ target_addrs = [(socket.AF_INET6, addr_obj.compressed)]
212
+ else:
213
+ target_addrs = [(socket.AF_INET, addr_obj.compressed)]
214
+
215
+ oserrors: list[OSError] = []
216
+ async with create_task_group() as tg:
217
+ for i, (af, addr) in enumerate(target_addrs):
218
+ event = Event()
219
+ tg.start_soon(try_connect, addr, event)
220
+ with move_on_after(happy_eyeballs_delay):
221
+ await event.wait()
222
+
223
+ if connected_stream is None:
224
+ cause = oserrors[0] if len(oserrors) == 1 else asynclib.ExceptionGroup(oserrors)
225
+ raise OSError("All connection attempts failed") from cause
226
+
227
+ if tls or tls_hostname or ssl_context:
228
+ try:
229
+ return await TLSStream.wrap(
230
+ connected_stream,
231
+ server_side=False,
232
+ hostname=tls_hostname or str(remote_host),
233
+ ssl_context=ssl_context,
234
+ standard_compatible=tls_standard_compatible,
235
+ )
236
+ except BaseException:
237
+ await aclose_forcefully(connected_stream)
238
+ raise
239
+
240
+ return connected_stream
241
+
242
+
243
+ async def connect_unix(path: str | PathLike[str]) -> UNIXSocketStream:
244
+ """
245
+ Connect to the given UNIX socket.
246
+
247
+ Not available on Windows.
248
+
249
+ :param path: path to the socket
250
+ :return: a socket stream object
251
+
252
+ """
253
+ path = str(Path(path))
254
+ return await get_asynclib().connect_unix(path)
255
+
256
+
257
+ async def create_tcp_listener(
258
+ *,
259
+ local_host: IPAddressType | None = None,
260
+ local_port: int = 0,
261
+ family: AnyIPAddressFamily = socket.AddressFamily.AF_UNSPEC,
262
+ backlog: int = 65536,
263
+ reuse_port: bool = False,
264
+ ) -> MultiListener[SocketStream]:
265
+ """
266
+ Create a TCP socket listener.
267
+
268
+ :param local_port: port number to listen on
269
+ :param local_host: IP address of the interface to listen on. If omitted, listen on
270
+ all IPv4 and IPv6 interfaces. To listen on all interfaces on a specific address
271
+ family, use ``0.0.0.0`` for IPv4 or ``::`` for IPv6.
272
+ :param family: address family (used if ``local_host`` was omitted)
273
+ :param backlog: maximum number of queued incoming connections (up to a maximum of
274
+ 2**16, or 65536)
275
+ :param reuse_port: ``True`` to allow multiple sockets to bind to the same
276
+ address/port (not supported on Windows)
277
+ :return: a list of listener objects
278
+
279
+ """
280
+ asynclib = get_asynclib()
281
+ backlog = min(backlog, 65536)
282
+ local_host = str(local_host) if local_host is not None else None
283
+ gai_res = await getaddrinfo(
284
+ local_host, # type: ignore[arg-type]
285
+ local_port,
286
+ family=family,
287
+ type=socket.SocketKind.SOCK_STREAM if sys.platform == "win32" else 0,
288
+ flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
289
+ )
290
+ listeners: list[SocketListener] = []
291
+ try:
292
+ # The set() is here to work around a glibc bug:
293
+ # https://sourceware.org/bugzilla/show_bug.cgi?id=14969
294
+ sockaddr: tuple[str, int] | tuple[str, int, int, int]
295
+ for fam, kind, *_, sockaddr in sorted(set(gai_res)):
296
+ # Workaround for an uvloop bug where we don't get the correct scope ID for
297
+ # IPv6 link-local addresses when passing type=socket.SOCK_STREAM to
298
+ # getaddrinfo(): https://github.com/MagicStack/uvloop/issues/539
299
+ if sys.platform != "win32" and kind is not SocketKind.SOCK_STREAM:
300
+ continue
301
+
302
+ raw_socket = socket.socket(fam)
303
+ raw_socket.setblocking(False)
304
+
305
+ # For Windows, enable exclusive address use. For others, enable address reuse.
306
+ if sys.platform == "win32":
307
+ raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
308
+ else:
309
+ raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
310
+
311
+ if reuse_port:
312
+ raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
313
+
314
+ # If only IPv6 was requested, disable dual stack operation
315
+ if fam == socket.AF_INET6:
316
+ raw_socket.setsockopt(IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
317
+
318
+ # Workaround for #554
319
+ if "%" in sockaddr[0]:
320
+ addr, scope_id = sockaddr[0].split("%", 1)
321
+ sockaddr = (addr, sockaddr[1], 0, int(scope_id))
322
+
323
+ raw_socket.bind(sockaddr)
324
+ raw_socket.listen(backlog)
325
+ listener = asynclib.TCPSocketListener(raw_socket)
326
+ listeners.append(listener)
327
+ except BaseException:
328
+ for listener in listeners:
329
+ await listener.aclose()
330
+
331
+ raise
332
+
333
+ return MultiListener(listeners)
334
+
335
+
336
+ async def create_unix_listener(
337
+ path: str | PathLike[str],
338
+ *,
339
+ mode: int | None = None,
340
+ backlog: int = 65536,
341
+ ) -> SocketListener:
342
+ """
343
+ Create a UNIX socket listener.
344
+
345
+ Not available on Windows.
346
+
347
+ :param path: path of the socket
348
+ :param mode: permissions to set on the socket
349
+ :param backlog: maximum number of queued incoming connections (up to a maximum of 2**16, or
350
+ 65536)
351
+ :return: a listener object
352
+
353
+ .. versionchanged:: 3.0
354
+ If a socket already exists on the file system in the given path, it will be removed first.
355
+
356
+ """
357
+ path_str = str(path)
358
+ path = Path(path)
359
+ if path.is_socket():
360
+ path.unlink()
361
+
362
+ backlog = min(backlog, 65536)
363
+ raw_socket = socket.socket(socket.AF_UNIX)
364
+ raw_socket.setblocking(False)
365
+ try:
366
+ await to_thread.run_sync(raw_socket.bind, path_str, cancellable=True)
367
+ if mode is not None:
368
+ await to_thread.run_sync(chmod, path_str, mode, cancellable=True)
369
+
370
+ raw_socket.listen(backlog)
371
+ return get_asynclib().UNIXSocketListener(raw_socket)
372
+ except BaseException:
373
+ raw_socket.close()
374
+ raise
375
+
376
+
377
+ async def create_udp_socket(
378
+ family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
379
+ *,
380
+ local_host: IPAddressType | None = None,
381
+ local_port: int = 0,
382
+ reuse_port: bool = False,
383
+ ) -> UDPSocket:
384
+ """
385
+ Create a UDP socket.
386
+
387
+ If ``local_port`` has been given, the socket will be bound to this port on the local
388
+ machine, making this socket suitable for providing UDP based services.
389
+
390
+ :param family: address family (``AF_INET`` or ``AF_INET6``) – automatically determined from
391
+ ``local_host`` if omitted
392
+ :param local_host: IP address or host name of the local interface to bind to
393
+ :param local_port: local port to bind to
394
+ :param reuse_port: ``True`` to allow multiple sockets to bind to the same address/port
395
+ (not supported on Windows)
396
+ :return: a UDP socket
397
+
398
+ """
399
+ if family is AddressFamily.AF_UNSPEC and not local_host:
400
+ raise ValueError('Either "family" or "local_host" must be given')
401
+
402
+ if local_host:
403
+ gai_res = await getaddrinfo(
404
+ str(local_host),
405
+ local_port,
406
+ family=family,
407
+ type=socket.SOCK_DGRAM,
408
+ flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
409
+ )
410
+ family = cast(AnyIPAddressFamily, gai_res[0][0])
411
+ local_address = gai_res[0][-1]
412
+ elif family is AddressFamily.AF_INET6:
413
+ local_address = ("::", 0)
414
+ else:
415
+ local_address = ("0.0.0.0", 0)
416
+
417
+ return await get_asynclib().create_udp_socket(
418
+ family, local_address, None, reuse_port
419
+ )
420
+
421
+
422
+ async def create_connected_udp_socket(
423
+ remote_host: IPAddressType,
424
+ remote_port: int,
425
+ *,
426
+ family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
427
+ local_host: IPAddressType | None = None,
428
+ local_port: int = 0,
429
+ reuse_port: bool = False,
430
+ ) -> ConnectedUDPSocket:
431
+ """
432
+ Create a connected UDP socket.
433
+
434
+ Connected UDP sockets can only communicate with the specified remote host/port, and any packets
435
+ sent from other sources are dropped.
436
+
437
+ :param remote_host: remote host to set as the default target
438
+ :param remote_port: port on the remote host to set as the default target
439
+ :param family: address family (``AF_INET`` or ``AF_INET6``) – automatically determined from
440
+ ``local_host`` or ``remote_host`` if omitted
441
+ :param local_host: IP address or host name of the local interface to bind to
442
+ :param local_port: local port to bind to
443
+ :param reuse_port: ``True`` to allow multiple sockets to bind to the same address/port
444
+ (not supported on Windows)
445
+ :return: a connected UDP socket
446
+
447
+ """
448
+ local_address = None
449
+ if local_host:
450
+ gai_res = await getaddrinfo(
451
+ str(local_host),
452
+ local_port,
453
+ family=family,
454
+ type=socket.SOCK_DGRAM,
455
+ flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
456
+ )
457
+ family = cast(AnyIPAddressFamily, gai_res[0][0])
458
+ local_address = gai_res[0][-1]
459
+
460
+ gai_res = await getaddrinfo(
461
+ str(remote_host), remote_port, family=family, type=socket.SOCK_DGRAM
462
+ )
463
+ family = cast(AnyIPAddressFamily, gai_res[0][0])
464
+ remote_address = gai_res[0][-1]
465
+
466
+ return await get_asynclib().create_udp_socket(
467
+ family, local_address, remote_address, reuse_port
468
+ )
469
+
470
+
471
+ async def getaddrinfo(
472
+ host: bytearray | bytes | str,
473
+ port: str | int | None,
474
+ *,
475
+ family: int | AddressFamily = 0,
476
+ type: int | SocketKind = 0,
477
+ proto: int = 0,
478
+ flags: int = 0,
479
+ ) -> GetAddrInfoReturnType:
480
+ """
481
+ Look up a numeric IP address given a host name.
482
+
483
+ Internationalized domain names are translated according to the (non-transitional) IDNA 2008
484
+ standard.
485
+
486
+ .. note:: 4-tuple IPv6 socket addresses are automatically converted to 2-tuples of
487
+ (host, port), unlike what :func:`socket.getaddrinfo` does.
488
+
489
+ :param host: host name
490
+ :param port: port number
491
+ :param family: socket family (`'AF_INET``, ...)
492
+ :param type: socket type (``SOCK_STREAM``, ...)
493
+ :param proto: protocol number
494
+ :param flags: flags to pass to upstream ``getaddrinfo()``
495
+ :return: list of tuples containing (family, type, proto, canonname, sockaddr)
496
+
497
+ .. seealso:: :func:`socket.getaddrinfo`
498
+
499
+ """
500
+ # Handle unicode hostnames
501
+ if isinstance(host, str):
502
+ try:
503
+ encoded_host = host.encode("ascii")
504
+ except UnicodeEncodeError:
505
+ import idna
506
+
507
+ encoded_host = idna.encode(host, uts46=True)
508
+ else:
509
+ encoded_host = host
510
+
511
+ gai_res = await get_asynclib().getaddrinfo(
512
+ encoded_host, port, family=family, type=type, proto=proto, flags=flags
513
+ )
514
+ return [
515
+ (family, type, proto, canonname, convert_ipv6_sockaddr(sockaddr))
516
+ for family, type, proto, canonname, sockaddr in gai_res
517
+ ]
518
+
519
+
520
+ def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Awaitable[tuple[str, str]]:
521
+ """
522
+ Look up the host name of an IP address.
523
+
524
+ :param sockaddr: socket address (e.g. (ipaddress, port) for IPv4)
525
+ :param flags: flags to pass to upstream ``getnameinfo()``
526
+ :return: a tuple of (host name, service name)
527
+
528
+ .. seealso:: :func:`socket.getnameinfo`
529
+
530
+ """
531
+ return get_asynclib().getnameinfo(sockaddr, flags)
532
+
533
+
534
+ def wait_socket_readable(sock: socket.socket) -> Awaitable[None]:
535
+ """
536
+ Wait until the given socket has data to be read.
537
+
538
+ This does **NOT** work on Windows when using the asyncio backend with a proactor event loop
539
+ (default on py3.8+).
540
+
541
+ .. warning:: Only use this on raw sockets that have not been wrapped by any higher level
542
+ constructs like socket streams!
543
+
544
+ :param sock: a socket object
545
+ :raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
546
+ socket to become readable
547
+ :raises ~anyio.BusyResourceError: if another task is already waiting for the socket
548
+ to become readable
549
+
550
+ """
551
+ return get_asynclib().wait_socket_readable(sock)
552
+
553
+
554
+ def wait_socket_writable(sock: socket.socket) -> Awaitable[None]:
555
+ """
556
+ Wait until the given socket can be written to.
557
+
558
+ This does **NOT** work on Windows when using the asyncio backend with a proactor event loop
559
+ (default on py3.8+).
560
+
561
+ .. warning:: Only use this on raw sockets that have not been wrapped by any higher level
562
+ constructs like socket streams!
563
+
564
+ :param sock: a socket object
565
+ :raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
566
+ socket to become writable
567
+ :raises ~anyio.BusyResourceError: if another task is already waiting for the socket
568
+ to become writable
569
+
570
+ """
571
+ return get_asynclib().wait_socket_writable(sock)
572
+
573
+
574
+ #
575
+ # Private API
576
+ #
577
+
578
+
579
+ def convert_ipv6_sockaddr(
580
+ sockaddr: tuple[str, int, int, int] | tuple[str, int]
581
+ ) -> tuple[str, int]:
582
+ """
583
+ Convert a 4-tuple IPv6 socket address to a 2-tuple (address, port) format.
584
+
585
+ If the scope ID is nonzero, it is added to the address, separated with ``%``.
586
+ Otherwise the flow id and scope id are simply cut off from the tuple.
587
+ Any other kinds of socket addresses are returned as-is.
588
+
589
+ :param sockaddr: the result of :meth:`~socket.socket.getsockname`
590
+ :return: the converted socket address
591
+
592
+ """
593
+ # This is more complicated than it should be because of MyPy
594
+ if isinstance(sockaddr, tuple) and len(sockaddr) == 4:
595
+ host, port, flowinfo, scope_id = cast(Tuple[str, int, int, int], sockaddr)
596
+ if scope_id:
597
+ # PyPy (as of v7.3.11) leaves the interface name in the result, so
598
+ # we discard it and only get the scope ID from the end
599
+ # (https://foss.heptapod.net/pypy/pypy/-/issues/3938)
600
+ host = host.split("%")[0]
601
+
602
+ # Add scope_id to the address
603
+ return f"{host}%{scope_id}", port
604
+ else:
605
+ return host, port
606
+ else:
607
+ return cast(Tuple[str, int], sockaddr)
parrot/lib/python3.10/site-packages/anyio/_core/_streams.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import math
4
+ from typing import Any, TypeVar, overload
5
+
6
+ from ..streams.memory import (
7
+ MemoryObjectReceiveStream,
8
+ MemoryObjectSendStream,
9
+ MemoryObjectStreamState,
10
+ )
11
+
12
+ T_Item = TypeVar("T_Item")
13
+
14
+
15
+ @overload
16
+ def create_memory_object_stream(
17
+ max_buffer_size: float = ...,
18
+ ) -> tuple[MemoryObjectSendStream[Any], MemoryObjectReceiveStream[Any]]:
19
+ ...
20
+
21
+
22
+ @overload
23
+ def create_memory_object_stream(
24
+ max_buffer_size: float = ..., item_type: type[T_Item] = ...
25
+ ) -> tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]]:
26
+ ...
27
+
28
+
29
+ def create_memory_object_stream(
30
+ max_buffer_size: float = 0, item_type: type[T_Item] | None = None
31
+ ) -> tuple[MemoryObjectSendStream[Any], MemoryObjectReceiveStream[Any]]:
32
+ """
33
+ Create a memory object stream.
34
+
35
+ :param max_buffer_size: number of items held in the buffer until ``send()`` starts blocking
36
+ :param item_type: type of item, for marking the streams with the right generic type for
37
+ static typing (not used at run time)
38
+ :return: a tuple of (send stream, receive stream)
39
+
40
+ """
41
+ if max_buffer_size != math.inf and not isinstance(max_buffer_size, int):
42
+ raise ValueError("max_buffer_size must be either an integer or math.inf")
43
+ if max_buffer_size < 0:
44
+ raise ValueError("max_buffer_size cannot be negative")
45
+
46
+ state: MemoryObjectStreamState = MemoryObjectStreamState(max_buffer_size)
47
+ return MemoryObjectSendStream(state), MemoryObjectReceiveStream(state)
parrot/lib/python3.10/site-packages/anyio/_core/_subprocesses.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from io import BytesIO
4
+ from os import PathLike
5
+ from subprocess import DEVNULL, PIPE, CalledProcessError, CompletedProcess
6
+ from typing import (
7
+ IO,
8
+ Any,
9
+ AsyncIterable,
10
+ Mapping,
11
+ Sequence,
12
+ cast,
13
+ )
14
+
15
+ from ..abc import Process
16
+ from ._eventloop import get_asynclib
17
+ from ._tasks import create_task_group
18
+
19
+
20
+ async def run_process(
21
+ command: str | bytes | Sequence[str | bytes],
22
+ *,
23
+ input: bytes | None = None,
24
+ stdout: int | IO[Any] | None = PIPE,
25
+ stderr: int | IO[Any] | None = PIPE,
26
+ check: bool = True,
27
+ cwd: str | bytes | PathLike[str] | None = None,
28
+ env: Mapping[str, str] | None = None,
29
+ start_new_session: bool = False,
30
+ ) -> CompletedProcess[bytes]:
31
+ """
32
+ Run an external command in a subprocess and wait until it completes.
33
+
34
+ .. seealso:: :func:`subprocess.run`
35
+
36
+ :param command: either a string to pass to the shell, or an iterable of strings containing the
37
+ executable name or path and its arguments
38
+ :param input: bytes passed to the standard input of the subprocess
39
+ :param stdout: either :data:`subprocess.PIPE` or :data:`subprocess.DEVNULL`
40
+ :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL` or
41
+ :data:`subprocess.STDOUT`
42
+ :param check: if ``True``, raise :exc:`~subprocess.CalledProcessError` if the process
43
+ terminates with a return code other than 0
44
+ :param cwd: If not ``None``, change the working directory to this before running the command
45
+ :param env: if not ``None``, this mapping replaces the inherited environment variables from the
46
+ parent process
47
+ :param start_new_session: if ``true`` the setsid() system call will be made in the child
48
+ process prior to the execution of the subprocess. (POSIX only)
49
+ :return: an object representing the completed process
50
+ :raises ~subprocess.CalledProcessError: if ``check`` is ``True`` and the process exits with a
51
+ nonzero return code
52
+
53
+ """
54
+
55
+ async def drain_stream(stream: AsyncIterable[bytes], index: int) -> None:
56
+ buffer = BytesIO()
57
+ async for chunk in stream:
58
+ buffer.write(chunk)
59
+
60
+ stream_contents[index] = buffer.getvalue()
61
+
62
+ async with await open_process(
63
+ command,
64
+ stdin=PIPE if input else DEVNULL,
65
+ stdout=stdout,
66
+ stderr=stderr,
67
+ cwd=cwd,
68
+ env=env,
69
+ start_new_session=start_new_session,
70
+ ) as process:
71
+ stream_contents: list[bytes | None] = [None, None]
72
+ try:
73
+ async with create_task_group() as tg:
74
+ if process.stdout:
75
+ tg.start_soon(drain_stream, process.stdout, 0)
76
+ if process.stderr:
77
+ tg.start_soon(drain_stream, process.stderr, 1)
78
+ if process.stdin and input:
79
+ await process.stdin.send(input)
80
+ await process.stdin.aclose()
81
+
82
+ await process.wait()
83
+ except BaseException:
84
+ process.kill()
85
+ raise
86
+
87
+ output, errors = stream_contents
88
+ if check and process.returncode != 0:
89
+ raise CalledProcessError(cast(int, process.returncode), command, output, errors)
90
+
91
+ return CompletedProcess(command, cast(int, process.returncode), output, errors)
92
+
93
+
94
+ async def open_process(
95
+ command: str | bytes | Sequence[str | bytes],
96
+ *,
97
+ stdin: int | IO[Any] | None = PIPE,
98
+ stdout: int | IO[Any] | None = PIPE,
99
+ stderr: int | IO[Any] | None = PIPE,
100
+ cwd: str | bytes | PathLike[str] | None = None,
101
+ env: Mapping[str, str] | None = None,
102
+ start_new_session: bool = False,
103
+ ) -> Process:
104
+ """
105
+ Start an external command in a subprocess.
106
+
107
+ .. seealso:: :class:`subprocess.Popen`
108
+
109
+ :param command: either a string to pass to the shell, or an iterable of strings containing the
110
+ executable name or path and its arguments
111
+ :param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, a
112
+ file-like object, or ``None``
113
+ :param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
114
+ a file-like object, or ``None``
115
+ :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
116
+ :data:`subprocess.STDOUT`, a file-like object, or ``None``
117
+ :param cwd: If not ``None``, the working directory is changed before executing
118
+ :param env: If env is not ``None``, it must be a mapping that defines the environment
119
+ variables for the new process
120
+ :param start_new_session: if ``true`` the setsid() system call will be made in the child
121
+ process prior to the execution of the subprocess. (POSIX only)
122
+ :return: an asynchronous process object
123
+
124
+ """
125
+ shell = isinstance(command, str)
126
+ return await get_asynclib().open_process(
127
+ command,
128
+ shell=shell,
129
+ stdin=stdin,
130
+ stdout=stdout,
131
+ stderr=stderr,
132
+ cwd=cwd,
133
+ env=env,
134
+ start_new_session=start_new_session,
135
+ )
parrot/lib/python3.10/site-packages/anyio/_core/_synchronization.py ADDED
@@ -0,0 +1,596 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections import deque
4
+ from dataclasses import dataclass
5
+ from types import TracebackType
6
+ from warnings import warn
7
+
8
+ from ..lowlevel import cancel_shielded_checkpoint, checkpoint, checkpoint_if_cancelled
9
+ from ._compat import DeprecatedAwaitable
10
+ from ._eventloop import get_asynclib
11
+ from ._exceptions import BusyResourceError, WouldBlock
12
+ from ._tasks import CancelScope
13
+ from ._testing import TaskInfo, get_current_task
14
+
15
+
16
+ @dataclass(frozen=True)
17
+ class EventStatistics:
18
+ """
19
+ :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait`
20
+ """
21
+
22
+ tasks_waiting: int
23
+
24
+
25
+ @dataclass(frozen=True)
26
+ class CapacityLimiterStatistics:
27
+ """
28
+ :ivar int borrowed_tokens: number of tokens currently borrowed by tasks
29
+ :ivar float total_tokens: total number of available tokens
30
+ :ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from this
31
+ limiter
32
+ :ivar int tasks_waiting: number of tasks waiting on :meth:`~.CapacityLimiter.acquire` or
33
+ :meth:`~.CapacityLimiter.acquire_on_behalf_of`
34
+ """
35
+
36
+ borrowed_tokens: int
37
+ total_tokens: float
38
+ borrowers: tuple[object, ...]
39
+ tasks_waiting: int
40
+
41
+
42
+ @dataclass(frozen=True)
43
+ class LockStatistics:
44
+ """
45
+ :ivar bool locked: flag indicating if this lock is locked or not
46
+ :ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the lock is not
47
+ held by any task)
48
+ :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire`
49
+ """
50
+
51
+ locked: bool
52
+ owner: TaskInfo | None
53
+ tasks_waiting: int
54
+
55
+
56
+ @dataclass(frozen=True)
57
+ class ConditionStatistics:
58
+ """
59
+ :ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait`
60
+ :ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying :class:`~.Lock`
61
+ """
62
+
63
+ tasks_waiting: int
64
+ lock_statistics: LockStatistics
65
+
66
+
67
+ @dataclass(frozen=True)
68
+ class SemaphoreStatistics:
69
+ """
70
+ :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire`
71
+
72
+ """
73
+
74
+ tasks_waiting: int
75
+
76
+
77
+ class Event:
78
+ def __new__(cls) -> Event:
79
+ return get_asynclib().Event()
80
+
81
+ def set(self) -> DeprecatedAwaitable:
82
+ """Set the flag, notifying all listeners."""
83
+ raise NotImplementedError
84
+
85
+ def is_set(self) -> bool:
86
+ """Return ``True`` if the flag is set, ``False`` if not."""
87
+ raise NotImplementedError
88
+
89
+ async def wait(self) -> None:
90
+ """
91
+ Wait until the flag has been set.
92
+
93
+ If the flag has already been set when this method is called, it returns immediately.
94
+
95
+ """
96
+ raise NotImplementedError
97
+
98
+ def statistics(self) -> EventStatistics:
99
+ """Return statistics about the current state of this event."""
100
+ raise NotImplementedError
101
+
102
+
103
+ class Lock:
104
+ _owner_task: TaskInfo | None = None
105
+
106
+ def __init__(self) -> None:
107
+ self._waiters: deque[tuple[TaskInfo, Event]] = deque()
108
+
109
+ async def __aenter__(self) -> None:
110
+ await self.acquire()
111
+
112
+ async def __aexit__(
113
+ self,
114
+ exc_type: type[BaseException] | None,
115
+ exc_val: BaseException | None,
116
+ exc_tb: TracebackType | None,
117
+ ) -> None:
118
+ self.release()
119
+
120
+ async def acquire(self) -> None:
121
+ """Acquire the lock."""
122
+ await checkpoint_if_cancelled()
123
+ try:
124
+ self.acquire_nowait()
125
+ except WouldBlock:
126
+ task = get_current_task()
127
+ event = Event()
128
+ token = task, event
129
+ self._waiters.append(token)
130
+ try:
131
+ await event.wait()
132
+ except BaseException:
133
+ if not event.is_set():
134
+ self._waiters.remove(token)
135
+ elif self._owner_task == task:
136
+ self.release()
137
+
138
+ raise
139
+
140
+ assert self._owner_task == task
141
+ else:
142
+ try:
143
+ await cancel_shielded_checkpoint()
144
+ except BaseException:
145
+ self.release()
146
+ raise
147
+
148
+ def acquire_nowait(self) -> None:
149
+ """
150
+ Acquire the lock, without blocking.
151
+
152
+ :raises ~anyio.WouldBlock: if the operation would block
153
+
154
+ """
155
+ task = get_current_task()
156
+ if self._owner_task == task:
157
+ raise RuntimeError("Attempted to acquire an already held Lock")
158
+
159
+ if self._owner_task is not None:
160
+ raise WouldBlock
161
+
162
+ self._owner_task = task
163
+
164
+ def release(self) -> DeprecatedAwaitable:
165
+ """Release the lock."""
166
+ if self._owner_task != get_current_task():
167
+ raise RuntimeError("The current task is not holding this lock")
168
+
169
+ if self._waiters:
170
+ self._owner_task, event = self._waiters.popleft()
171
+ event.set()
172
+ else:
173
+ del self._owner_task
174
+
175
+ return DeprecatedAwaitable(self.release)
176
+
177
+ def locked(self) -> bool:
178
+ """Return True if the lock is currently held."""
179
+ return self._owner_task is not None
180
+
181
+ def statistics(self) -> LockStatistics:
182
+ """
183
+ Return statistics about the current state of this lock.
184
+
185
+ .. versionadded:: 3.0
186
+ """
187
+ return LockStatistics(self.locked(), self._owner_task, len(self._waiters))
188
+
189
+
190
+ class Condition:
191
+ _owner_task: TaskInfo | None = None
192
+
193
+ def __init__(self, lock: Lock | None = None):
194
+ self._lock = lock or Lock()
195
+ self._waiters: deque[Event] = deque()
196
+
197
+ async def __aenter__(self) -> None:
198
+ await self.acquire()
199
+
200
+ async def __aexit__(
201
+ self,
202
+ exc_type: type[BaseException] | None,
203
+ exc_val: BaseException | None,
204
+ exc_tb: TracebackType | None,
205
+ ) -> None:
206
+ self.release()
207
+
208
+ def _check_acquired(self) -> None:
209
+ if self._owner_task != get_current_task():
210
+ raise RuntimeError("The current task is not holding the underlying lock")
211
+
212
+ async def acquire(self) -> None:
213
+ """Acquire the underlying lock."""
214
+ await self._lock.acquire()
215
+ self._owner_task = get_current_task()
216
+
217
+ def acquire_nowait(self) -> None:
218
+ """
219
+ Acquire the underlying lock, without blocking.
220
+
221
+ :raises ~anyio.WouldBlock: if the operation would block
222
+
223
+ """
224
+ self._lock.acquire_nowait()
225
+ self._owner_task = get_current_task()
226
+
227
+ def release(self) -> DeprecatedAwaitable:
228
+ """Release the underlying lock."""
229
+ self._lock.release()
230
+ return DeprecatedAwaitable(self.release)
231
+
232
+ def locked(self) -> bool:
233
+ """Return True if the lock is set."""
234
+ return self._lock.locked()
235
+
236
+ def notify(self, n: int = 1) -> None:
237
+ """Notify exactly n listeners."""
238
+ self._check_acquired()
239
+ for _ in range(n):
240
+ try:
241
+ event = self._waiters.popleft()
242
+ except IndexError:
243
+ break
244
+
245
+ event.set()
246
+
247
+ def notify_all(self) -> None:
248
+ """Notify all the listeners."""
249
+ self._check_acquired()
250
+ for event in self._waiters:
251
+ event.set()
252
+
253
+ self._waiters.clear()
254
+
255
+ async def wait(self) -> None:
256
+ """Wait for a notification."""
257
+ await checkpoint()
258
+ event = Event()
259
+ self._waiters.append(event)
260
+ self.release()
261
+ try:
262
+ await event.wait()
263
+ except BaseException:
264
+ if not event.is_set():
265
+ self._waiters.remove(event)
266
+
267
+ raise
268
+ finally:
269
+ with CancelScope(shield=True):
270
+ await self.acquire()
271
+
272
+ def statistics(self) -> ConditionStatistics:
273
+ """
274
+ Return statistics about the current state of this condition.
275
+
276
+ .. versionadded:: 3.0
277
+ """
278
+ return ConditionStatistics(len(self._waiters), self._lock.statistics())
279
+
280
+
281
+ class Semaphore:
282
+ def __init__(self, initial_value: int, *, max_value: int | None = None):
283
+ if not isinstance(initial_value, int):
284
+ raise TypeError("initial_value must be an integer")
285
+ if initial_value < 0:
286
+ raise ValueError("initial_value must be >= 0")
287
+ if max_value is not None:
288
+ if not isinstance(max_value, int):
289
+ raise TypeError("max_value must be an integer or None")
290
+ if max_value < initial_value:
291
+ raise ValueError(
292
+ "max_value must be equal to or higher than initial_value"
293
+ )
294
+
295
+ self._value = initial_value
296
+ self._max_value = max_value
297
+ self._waiters: deque[Event] = deque()
298
+
299
+ async def __aenter__(self) -> Semaphore:
300
+ await self.acquire()
301
+ return self
302
+
303
+ async def __aexit__(
304
+ self,
305
+ exc_type: type[BaseException] | None,
306
+ exc_val: BaseException | None,
307
+ exc_tb: TracebackType | None,
308
+ ) -> None:
309
+ self.release()
310
+
311
+ async def acquire(self) -> None:
312
+ """Decrement the semaphore value, blocking if necessary."""
313
+ await checkpoint_if_cancelled()
314
+ try:
315
+ self.acquire_nowait()
316
+ except WouldBlock:
317
+ event = Event()
318
+ self._waiters.append(event)
319
+ try:
320
+ await event.wait()
321
+ except BaseException:
322
+ if not event.is_set():
323
+ self._waiters.remove(event)
324
+ else:
325
+ self.release()
326
+
327
+ raise
328
+ else:
329
+ try:
330
+ await cancel_shielded_checkpoint()
331
+ except BaseException:
332
+ self.release()
333
+ raise
334
+
335
+ def acquire_nowait(self) -> None:
336
+ """
337
+ Acquire the underlying lock, without blocking.
338
+
339
+ :raises ~anyio.WouldBlock: if the operation would block
340
+
341
+ """
342
+ if self._value == 0:
343
+ raise WouldBlock
344
+
345
+ self._value -= 1
346
+
347
+ def release(self) -> DeprecatedAwaitable:
348
+ """Increment the semaphore value."""
349
+ if self._max_value is not None and self._value == self._max_value:
350
+ raise ValueError("semaphore released too many times")
351
+
352
+ if self._waiters:
353
+ self._waiters.popleft().set()
354
+ else:
355
+ self._value += 1
356
+
357
+ return DeprecatedAwaitable(self.release)
358
+
359
+ @property
360
+ def value(self) -> int:
361
+ """The current value of the semaphore."""
362
+ return self._value
363
+
364
+ @property
365
+ def max_value(self) -> int | None:
366
+ """The maximum value of the semaphore."""
367
+ return self._max_value
368
+
369
+ def statistics(self) -> SemaphoreStatistics:
370
+ """
371
+ Return statistics about the current state of this semaphore.
372
+
373
+ .. versionadded:: 3.0
374
+ """
375
+ return SemaphoreStatistics(len(self._waiters))
376
+
377
+
378
+ class CapacityLimiter:
379
+ def __new__(cls, total_tokens: float) -> CapacityLimiter:
380
+ return get_asynclib().CapacityLimiter(total_tokens)
381
+
382
+ async def __aenter__(self) -> None:
383
+ raise NotImplementedError
384
+
385
+ async def __aexit__(
386
+ self,
387
+ exc_type: type[BaseException] | None,
388
+ exc_val: BaseException | None,
389
+ exc_tb: TracebackType | None,
390
+ ) -> bool | None:
391
+ raise NotImplementedError
392
+
393
+ @property
394
+ def total_tokens(self) -> float:
395
+ """
396
+ The total number of tokens available for borrowing.
397
+
398
+ This is a read-write property. If the total number of tokens is increased, the
399
+ proportionate number of tasks waiting on this limiter will be granted their tokens.
400
+
401
+ .. versionchanged:: 3.0
402
+ The property is now writable.
403
+
404
+ """
405
+ raise NotImplementedError
406
+
407
+ @total_tokens.setter
408
+ def total_tokens(self, value: float) -> None:
409
+ raise NotImplementedError
410
+
411
+ async def set_total_tokens(self, value: float) -> None:
412
+ warn(
413
+ "CapacityLimiter.set_total_tokens has been deprecated. Set the value of the"
414
+ '"total_tokens" attribute directly.',
415
+ DeprecationWarning,
416
+ )
417
+ self.total_tokens = value
418
+
419
+ @property
420
+ def borrowed_tokens(self) -> int:
421
+ """The number of tokens that have currently been borrowed."""
422
+ raise NotImplementedError
423
+
424
+ @property
425
+ def available_tokens(self) -> float:
426
+ """The number of tokens currently available to be borrowed"""
427
+ raise NotImplementedError
428
+
429
+ def acquire_nowait(self) -> DeprecatedAwaitable:
430
+ """
431
+ Acquire a token for the current task without waiting for one to become available.
432
+
433
+ :raises ~anyio.WouldBlock: if there are no tokens available for borrowing
434
+
435
+ """
436
+ raise NotImplementedError
437
+
438
+ def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable:
439
+ """
440
+ Acquire a token without waiting for one to become available.
441
+
442
+ :param borrower: the entity borrowing a token
443
+ :raises ~anyio.WouldBlock: if there are no tokens available for borrowing
444
+
445
+ """
446
+ raise NotImplementedError
447
+
448
+ async def acquire(self) -> None:
449
+ """
450
+ Acquire a token for the current task, waiting if necessary for one to become available.
451
+
452
+ """
453
+ raise NotImplementedError
454
+
455
+ async def acquire_on_behalf_of(self, borrower: object) -> None:
456
+ """
457
+ Acquire a token, waiting if necessary for one to become available.
458
+
459
+ :param borrower: the entity borrowing a token
460
+
461
+ """
462
+ raise NotImplementedError
463
+
464
+ def release(self) -> None:
465
+ """
466
+ Release the token held by the current task.
467
+ :raises RuntimeError: if the current task has not borrowed a token from this limiter.
468
+
469
+ """
470
+ raise NotImplementedError
471
+
472
+ def release_on_behalf_of(self, borrower: object) -> None:
473
+ """
474
+ Release the token held by the given borrower.
475
+
476
+ :raises RuntimeError: if the borrower has not borrowed a token from this limiter.
477
+
478
+ """
479
+ raise NotImplementedError
480
+
481
+ def statistics(self) -> CapacityLimiterStatistics:
482
+ """
483
+ Return statistics about the current state of this limiter.
484
+
485
+ .. versionadded:: 3.0
486
+
487
+ """
488
+ raise NotImplementedError
489
+
490
+
491
+ def create_lock() -> Lock:
492
+ """
493
+ Create an asynchronous lock.
494
+
495
+ :return: a lock object
496
+
497
+ .. deprecated:: 3.0
498
+ Use :class:`~Lock` directly.
499
+
500
+ """
501
+ warn("create_lock() is deprecated -- use Lock() directly", DeprecationWarning)
502
+ return Lock()
503
+
504
+
505
+ def create_condition(lock: Lock | None = None) -> Condition:
506
+ """
507
+ Create an asynchronous condition.
508
+
509
+ :param lock: the lock to base the condition object on
510
+ :return: a condition object
511
+
512
+ .. deprecated:: 3.0
513
+ Use :class:`~Condition` directly.
514
+
515
+ """
516
+ warn(
517
+ "create_condition() is deprecated -- use Condition() directly",
518
+ DeprecationWarning,
519
+ )
520
+ return Condition(lock=lock)
521
+
522
+
523
+ def create_event() -> Event:
524
+ """
525
+ Create an asynchronous event object.
526
+
527
+ :return: an event object
528
+
529
+ .. deprecated:: 3.0
530
+ Use :class:`~Event` directly.
531
+
532
+ """
533
+ warn("create_event() is deprecated -- use Event() directly", DeprecationWarning)
534
+ return get_asynclib().Event()
535
+
536
+
537
+ def create_semaphore(value: int, *, max_value: int | None = None) -> Semaphore:
538
+ """
539
+ Create an asynchronous semaphore.
540
+
541
+ :param value: the semaphore's initial value
542
+ :param max_value: if set, makes this a "bounded" semaphore that raises :exc:`ValueError` if the
543
+ semaphore's value would exceed this number
544
+ :return: a semaphore object
545
+
546
+ .. deprecated:: 3.0
547
+ Use :class:`~Semaphore` directly.
548
+
549
+ """
550
+ warn(
551
+ "create_semaphore() is deprecated -- use Semaphore() directly",
552
+ DeprecationWarning,
553
+ )
554
+ return Semaphore(value, max_value=max_value)
555
+
556
+
557
+ def create_capacity_limiter(total_tokens: float) -> CapacityLimiter:
558
+ """
559
+ Create a capacity limiter.
560
+
561
+ :param total_tokens: the total number of tokens available for borrowing (can be an integer or
562
+ :data:`math.inf`)
563
+ :return: a capacity limiter object
564
+
565
+ .. deprecated:: 3.0
566
+ Use :class:`~CapacityLimiter` directly.
567
+
568
+ """
569
+ warn(
570
+ "create_capacity_limiter() is deprecated -- use CapacityLimiter() directly",
571
+ DeprecationWarning,
572
+ )
573
+ return get_asynclib().CapacityLimiter(total_tokens)
574
+
575
+
576
+ class ResourceGuard:
577
+ __slots__ = "action", "_guarded"
578
+
579
+ def __init__(self, action: str):
580
+ self.action = action
581
+ self._guarded = False
582
+
583
+ def __enter__(self) -> None:
584
+ if self._guarded:
585
+ raise BusyResourceError(self.action)
586
+
587
+ self._guarded = True
588
+
589
+ def __exit__(
590
+ self,
591
+ exc_type: type[BaseException] | None,
592
+ exc_val: BaseException | None,
593
+ exc_tb: TracebackType | None,
594
+ ) -> bool | None:
595
+ self._guarded = False
596
+ return None
parrot/lib/python3.10/site-packages/anyio/_core/_tasks.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import math
4
+ from types import TracebackType
5
+ from warnings import warn
6
+
7
+ from ..abc._tasks import TaskGroup, TaskStatus
8
+ from ._compat import (
9
+ DeprecatedAsyncContextManager,
10
+ DeprecatedAwaitable,
11
+ DeprecatedAwaitableFloat,
12
+ )
13
+ from ._eventloop import get_asynclib
14
+
15
+
16
+ class _IgnoredTaskStatus(TaskStatus[object]):
17
+ def started(self, value: object = None) -> None:
18
+ pass
19
+
20
+
21
+ TASK_STATUS_IGNORED = _IgnoredTaskStatus()
22
+
23
+
24
+ class CancelScope(DeprecatedAsyncContextManager["CancelScope"]):
25
+ """
26
+ Wraps a unit of work that can be made separately cancellable.
27
+
28
+ :param deadline: The time (clock value) when this scope is cancelled automatically
29
+ :param shield: ``True`` to shield the cancel scope from external cancellation
30
+ """
31
+
32
+ def __new__(
33
+ cls, *, deadline: float = math.inf, shield: bool = False
34
+ ) -> CancelScope:
35
+ return get_asynclib().CancelScope(shield=shield, deadline=deadline)
36
+
37
+ def cancel(self) -> DeprecatedAwaitable:
38
+ """Cancel this scope immediately."""
39
+ raise NotImplementedError
40
+
41
+ @property
42
+ def deadline(self) -> float:
43
+ """
44
+ The time (clock value) when this scope is cancelled automatically.
45
+
46
+ Will be ``float('inf')`` if no timeout has been set.
47
+
48
+ """
49
+ raise NotImplementedError
50
+
51
+ @deadline.setter
52
+ def deadline(self, value: float) -> None:
53
+ raise NotImplementedError
54
+
55
+ @property
56
+ def cancel_called(self) -> bool:
57
+ """``True`` if :meth:`cancel` has been called."""
58
+ raise NotImplementedError
59
+
60
+ @property
61
+ def shield(self) -> bool:
62
+ """
63
+ ``True`` if this scope is shielded from external cancellation.
64
+
65
+ While a scope is shielded, it will not receive cancellations from outside.
66
+
67
+ """
68
+ raise NotImplementedError
69
+
70
+ @shield.setter
71
+ def shield(self, value: bool) -> None:
72
+ raise NotImplementedError
73
+
74
+ def __enter__(self) -> CancelScope:
75
+ raise NotImplementedError
76
+
77
+ def __exit__(
78
+ self,
79
+ exc_type: type[BaseException] | None,
80
+ exc_val: BaseException | None,
81
+ exc_tb: TracebackType | None,
82
+ ) -> bool | None:
83
+ raise NotImplementedError
84
+
85
+
86
+ def open_cancel_scope(*, shield: bool = False) -> CancelScope:
87
+ """
88
+ Open a cancel scope.
89
+
90
+ :param shield: ``True`` to shield the cancel scope from external cancellation
91
+ :return: a cancel scope
92
+
93
+ .. deprecated:: 3.0
94
+ Use :class:`~CancelScope` directly.
95
+
96
+ """
97
+ warn(
98
+ "open_cancel_scope() is deprecated -- use CancelScope() directly",
99
+ DeprecationWarning,
100
+ )
101
+ return get_asynclib().CancelScope(shield=shield)
102
+
103
+
104
+ class FailAfterContextManager(DeprecatedAsyncContextManager[CancelScope]):
105
+ def __init__(self, cancel_scope: CancelScope):
106
+ self._cancel_scope = cancel_scope
107
+
108
+ def __enter__(self) -> CancelScope:
109
+ return self._cancel_scope.__enter__()
110
+
111
+ def __exit__(
112
+ self,
113
+ exc_type: type[BaseException] | None,
114
+ exc_val: BaseException | None,
115
+ exc_tb: TracebackType | None,
116
+ ) -> bool | None:
117
+ retval = self._cancel_scope.__exit__(exc_type, exc_val, exc_tb)
118
+ if self._cancel_scope.cancel_called:
119
+ raise TimeoutError
120
+
121
+ return retval
122
+
123
+
124
+ def fail_after(delay: float | None, shield: bool = False) -> FailAfterContextManager:
125
+ """
126
+ Create a context manager which raises a :class:`TimeoutError` if does not finish in time.
127
+
128
+ :param delay: maximum allowed time (in seconds) before raising the exception, or ``None`` to
129
+ disable the timeout
130
+ :param shield: ``True`` to shield the cancel scope from external cancellation
131
+ :return: a context manager that yields a cancel scope
132
+ :rtype: :class:`~typing.ContextManager`\\[:class:`~anyio.CancelScope`\\]
133
+
134
+ """
135
+ deadline = (
136
+ (get_asynclib().current_time() + delay) if delay is not None else math.inf
137
+ )
138
+ cancel_scope = get_asynclib().CancelScope(deadline=deadline, shield=shield)
139
+ return FailAfterContextManager(cancel_scope)
140
+
141
+
142
+ def move_on_after(delay: float | None, shield: bool = False) -> CancelScope:
143
+ """
144
+ Create a cancel scope with a deadline that expires after the given delay.
145
+
146
+ :param delay: maximum allowed time (in seconds) before exiting the context block, or ``None``
147
+ to disable the timeout
148
+ :param shield: ``True`` to shield the cancel scope from external cancellation
149
+ :return: a cancel scope
150
+
151
+ """
152
+ deadline = (
153
+ (get_asynclib().current_time() + delay) if delay is not None else math.inf
154
+ )
155
+ return get_asynclib().CancelScope(deadline=deadline, shield=shield)
156
+
157
+
158
+ def current_effective_deadline() -> DeprecatedAwaitableFloat:
159
+ """
160
+ Return the nearest deadline among all the cancel scopes effective for the current task.
161
+
162
+ :return: a clock value from the event loop's internal clock (or ``float('inf')`` if
163
+ there is no deadline in effect, or ``float('-inf')`` if the current scope has
164
+ been cancelled)
165
+ :rtype: float
166
+
167
+ """
168
+ return DeprecatedAwaitableFloat(
169
+ get_asynclib().current_effective_deadline(), current_effective_deadline
170
+ )
171
+
172
+
173
+ def create_task_group() -> TaskGroup:
174
+ """
175
+ Create a task group.
176
+
177
+ :return: a task group
178
+
179
+ """
180
+ return get_asynclib().TaskGroup()
parrot/lib/python3.10/site-packages/anyio/_core/_testing.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Awaitable, Generator
4
+
5
+ from ._compat import DeprecatedAwaitableList, _warn_deprecation
6
+ from ._eventloop import get_asynclib
7
+
8
+
9
+ class TaskInfo:
10
+ """
11
+ Represents an asynchronous task.
12
+
13
+ :ivar int id: the unique identifier of the task
14
+ :ivar parent_id: the identifier of the parent task, if any
15
+ :vartype parent_id: Optional[int]
16
+ :ivar str name: the description of the task (if any)
17
+ :ivar ~collections.abc.Coroutine coro: the coroutine object of the task
18
+ """
19
+
20
+ __slots__ = "_name", "id", "parent_id", "name", "coro"
21
+
22
+ def __init__(
23
+ self,
24
+ id: int,
25
+ parent_id: int | None,
26
+ name: str | None,
27
+ coro: Generator[Any, Any, Any] | Awaitable[Any],
28
+ ):
29
+ func = get_current_task
30
+ self._name = f"{func.__module__}.{func.__qualname__}"
31
+ self.id: int = id
32
+ self.parent_id: int | None = parent_id
33
+ self.name: str | None = name
34
+ self.coro: Generator[Any, Any, Any] | Awaitable[Any] = coro
35
+
36
+ def __eq__(self, other: object) -> bool:
37
+ if isinstance(other, TaskInfo):
38
+ return self.id == other.id
39
+
40
+ return NotImplemented
41
+
42
+ def __hash__(self) -> int:
43
+ return hash(self.id)
44
+
45
+ def __repr__(self) -> str:
46
+ return f"{self.__class__.__name__}(id={self.id!r}, name={self.name!r})"
47
+
48
+ def __await__(self) -> Generator[None, None, TaskInfo]:
49
+ _warn_deprecation(self)
50
+ if False:
51
+ yield
52
+
53
+ return self
54
+
55
+ def _unwrap(self) -> TaskInfo:
56
+ return self
57
+
58
+
59
+ def get_current_task() -> TaskInfo:
60
+ """
61
+ Return the current task.
62
+
63
+ :return: a representation of the current task
64
+
65
+ """
66
+ return get_asynclib().get_current_task()
67
+
68
+
69
+ def get_running_tasks() -> DeprecatedAwaitableList[TaskInfo]:
70
+ """
71
+ Return a list of running tasks in the current event loop.
72
+
73
+ :return: a list of task info objects
74
+
75
+ """
76
+ tasks = get_asynclib().get_running_tasks()
77
+ return DeprecatedAwaitableList(tasks, func=get_running_tasks)
78
+
79
+
80
+ async def wait_all_tasks_blocked() -> None:
81
+ """Wait until all other tasks are waiting for something."""
82
+ await get_asynclib().wait_all_tasks_blocked()
parrot/lib/python3.10/site-packages/anyio/_core/_typedattr.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ from typing import Any, Callable, Mapping, TypeVar, overload
5
+
6
+ from ._exceptions import TypedAttributeLookupError
7
+
8
+ if sys.version_info >= (3, 8):
9
+ from typing import final
10
+ else:
11
+ from typing_extensions import final
12
+
13
+ T_Attr = TypeVar("T_Attr")
14
+ T_Default = TypeVar("T_Default")
15
+ undefined = object()
16
+
17
+
18
+ def typed_attribute() -> Any:
19
+ """Return a unique object, used to mark typed attributes."""
20
+ return object()
21
+
22
+
23
+ class TypedAttributeSet:
24
+ """
25
+ Superclass for typed attribute collections.
26
+
27
+ Checks that every public attribute of every subclass has a type annotation.
28
+ """
29
+
30
+ def __init_subclass__(cls) -> None:
31
+ annotations: dict[str, Any] = getattr(cls, "__annotations__", {})
32
+ for attrname in dir(cls):
33
+ if not attrname.startswith("_") and attrname not in annotations:
34
+ raise TypeError(
35
+ f"Attribute {attrname!r} is missing its type annotation"
36
+ )
37
+
38
+ super().__init_subclass__()
39
+
40
+
41
+ class TypedAttributeProvider:
42
+ """Base class for classes that wish to provide typed extra attributes."""
43
+
44
+ @property
45
+ def extra_attributes(self) -> Mapping[T_Attr, Callable[[], T_Attr]]:
46
+ """
47
+ A mapping of the extra attributes to callables that return the corresponding values.
48
+
49
+ If the provider wraps another provider, the attributes from that wrapper should also be
50
+ included in the returned mapping (but the wrapper may override the callables from the
51
+ wrapped instance).
52
+
53
+ """
54
+ return {}
55
+
56
+ @overload
57
+ def extra(self, attribute: T_Attr) -> T_Attr:
58
+ ...
59
+
60
+ @overload
61
+ def extra(self, attribute: T_Attr, default: T_Default) -> T_Attr | T_Default:
62
+ ...
63
+
64
+ @final
65
+ def extra(self, attribute: Any, default: object = undefined) -> object:
66
+ """
67
+ extra(attribute, default=undefined)
68
+
69
+ Return the value of the given typed extra attribute.
70
+
71
+ :param attribute: the attribute (member of a :class:`~TypedAttributeSet`) to look for
72
+ :param default: the value that should be returned if no value is found for the attribute
73
+ :raises ~anyio.TypedAttributeLookupError: if the search failed and no default value was
74
+ given
75
+
76
+ """
77
+ try:
78
+ return self.extra_attributes[attribute]()
79
+ except KeyError:
80
+ if default is undefined:
81
+ raise TypedAttributeLookupError("Attribute not found") from None
82
+ else:
83
+ return default
parrot/lib/python3.10/site-packages/anyio/abc/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.84 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/abc/__pycache__/_resources.cpython-310.pyc ADDED
Binary file (1.41 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/abc/__pycache__/_sockets.cpython-310.pyc ADDED
Binary file (6.19 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/abc/__pycache__/_streams.cpython-310.pyc ADDED
Binary file (7.44 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/abc/__pycache__/_subprocesses.cpython-310.pyc ADDED
Binary file (2.97 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/abc/__pycache__/_tasks.cpython-310.pyc ADDED
Binary file (4.3 kB). View file
 
parrot/lib/python3.10/site-packages/anyio/abc/_tasks.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ from abc import ABCMeta, abstractmethod
5
+ from types import TracebackType
6
+ from typing import TYPE_CHECKING, Any, Awaitable, Callable, TypeVar, overload
7
+ from warnings import warn
8
+
9
+ if sys.version_info >= (3, 8):
10
+ from typing import Protocol
11
+ else:
12
+ from typing_extensions import Protocol
13
+
14
+ if TYPE_CHECKING:
15
+ from anyio._core._tasks import CancelScope
16
+
17
+ T_Retval = TypeVar("T_Retval")
18
+ T_contra = TypeVar("T_contra", contravariant=True)
19
+
20
+
21
+ class TaskStatus(Protocol[T_contra]):
22
+ @overload
23
+ def started(self: TaskStatus[None]) -> None:
24
+ ...
25
+
26
+ @overload
27
+ def started(self, value: T_contra) -> None:
28
+ ...
29
+
30
+ def started(self, value: T_contra | None = None) -> None:
31
+ """
32
+ Signal that the task has started.
33
+
34
+ :param value: object passed back to the starter of the task
35
+ """
36
+
37
+
38
+ class TaskGroup(metaclass=ABCMeta):
39
+ """
40
+ Groups several asynchronous tasks together.
41
+
42
+ :ivar cancel_scope: the cancel scope inherited by all child tasks
43
+ :vartype cancel_scope: CancelScope
44
+ """
45
+
46
+ cancel_scope: CancelScope
47
+
48
+ async def spawn(
49
+ self,
50
+ func: Callable[..., Awaitable[Any]],
51
+ *args: object,
52
+ name: object = None,
53
+ ) -> None:
54
+ """
55
+ Start a new task in this task group.
56
+
57
+ :param func: a coroutine function
58
+ :param args: positional arguments to call the function with
59
+ :param name: name of the task, for the purposes of introspection and debugging
60
+
61
+ .. deprecated:: 3.0
62
+ Use :meth:`start_soon` instead. If your code needs AnyIO 2 compatibility, you
63
+ can keep using this until AnyIO 4.
64
+
65
+ """
66
+ warn(
67
+ 'spawn() is deprecated -- use start_soon() (without the "await") instead',
68
+ DeprecationWarning,
69
+ )
70
+ self.start_soon(func, *args, name=name)
71
+
72
+ @abstractmethod
73
+ def start_soon(
74
+ self,
75
+ func: Callable[..., Awaitable[Any]],
76
+ *args: object,
77
+ name: object = None,
78
+ ) -> None:
79
+ """
80
+ Start a new task in this task group.
81
+
82
+ :param func: a coroutine function
83
+ :param args: positional arguments to call the function with
84
+ :param name: name of the task, for the purposes of introspection and debugging
85
+
86
+ .. versionadded:: 3.0
87
+ """
88
+
89
+ @abstractmethod
90
+ async def start(
91
+ self,
92
+ func: Callable[..., Awaitable[Any]],
93
+ *args: object,
94
+ name: object = None,
95
+ ) -> Any:
96
+ """
97
+ Start a new task and wait until it signals for readiness.
98
+
99
+ :param func: a coroutine function
100
+ :param args: positional arguments to call the function with
101
+ :param name: name of the task, for the purposes of introspection and debugging
102
+ :return: the value passed to ``task_status.started()``
103
+ :raises RuntimeError: if the task finishes without calling ``task_status.started()``
104
+
105
+ .. versionadded:: 3.0
106
+ """
107
+
108
+ @abstractmethod
109
+ async def __aenter__(self) -> TaskGroup:
110
+ """Enter the task group context and allow starting new tasks."""
111
+
112
+ @abstractmethod
113
+ async def __aexit__(
114
+ self,
115
+ exc_type: type[BaseException] | None,
116
+ exc_val: BaseException | None,
117
+ exc_tb: TracebackType | None,
118
+ ) -> bool | None:
119
+ """Exit the task group context waiting for all tasks to finish."""
parrot/lib/python3.10/site-packages/anyio/abc/_testing.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import types
4
+ from abc import ABCMeta, abstractmethod
5
+ from collections.abc import AsyncGenerator, Iterable
6
+ from typing import Any, Callable, Coroutine, TypeVar
7
+
8
+ _T = TypeVar("_T")
9
+
10
+
11
+ class TestRunner(metaclass=ABCMeta):
12
+ """
13
+ Encapsulates a running event loop. Every call made through this object will use the same event
14
+ loop.
15
+ """
16
+
17
+ def __enter__(self) -> TestRunner:
18
+ return self
19
+
20
+ def __exit__(
21
+ self,
22
+ exc_type: type[BaseException] | None,
23
+ exc_val: BaseException | None,
24
+ exc_tb: types.TracebackType | None,
25
+ ) -> bool | None:
26
+ self.close()
27
+ return None
28
+
29
+ @abstractmethod
30
+ def close(self) -> None:
31
+ """Close the event loop."""
32
+
33
+ @abstractmethod
34
+ def run_asyncgen_fixture(
35
+ self,
36
+ fixture_func: Callable[..., AsyncGenerator[_T, Any]],
37
+ kwargs: dict[str, Any],
38
+ ) -> Iterable[_T]:
39
+ """
40
+ Run an async generator fixture.
41
+
42
+ :param fixture_func: the fixture function
43
+ :param kwargs: keyword arguments to call the fixture function with
44
+ :return: an iterator yielding the value yielded from the async generator
45
+ """
46
+
47
+ @abstractmethod
48
+ def run_fixture(
49
+ self,
50
+ fixture_func: Callable[..., Coroutine[Any, Any, _T]],
51
+ kwargs: dict[str, Any],
52
+ ) -> _T:
53
+ """
54
+ Run an async fixture.
55
+
56
+ :param fixture_func: the fixture function
57
+ :param kwargs: keyword arguments to call the fixture function with
58
+ :return: the return value of the fixture function
59
+ """
60
+
61
+ @abstractmethod
62
+ def run_test(
63
+ self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
64
+ ) -> None:
65
+ """
66
+ Run an async test function.
67
+
68
+ :param test_func: the test function
69
+ :param kwargs: keyword arguments to call the test function with
70
+ """