koichi12 commited on
Commit
9aa22f7
·
verified ·
1 Parent(s): 6ef22bb

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .venv/lib/python3.11/site-packages/annotated_types/__init__.py +432 -0
  2. .venv/lib/python3.11/site-packages/annotated_types/__pycache__/__init__.cpython-311.pyc +0 -0
  3. .venv/lib/python3.11/site-packages/annotated_types/__pycache__/test_cases.cpython-311.pyc +0 -0
  4. .venv/lib/python3.11/site-packages/anyio/__init__.py +77 -0
  5. .venv/lib/python3.11/site-packages/anyio/__pycache__/__init__.cpython-311.pyc +0 -0
  6. .venv/lib/python3.11/site-packages/anyio/__pycache__/from_thread.cpython-311.pyc +0 -0
  7. .venv/lib/python3.11/site-packages/anyio/__pycache__/lowlevel.cpython-311.pyc +0 -0
  8. .venv/lib/python3.11/site-packages/anyio/__pycache__/pytest_plugin.cpython-311.pyc +0 -0
  9. .venv/lib/python3.11/site-packages/anyio/__pycache__/to_interpreter.cpython-311.pyc +0 -0
  10. .venv/lib/python3.11/site-packages/anyio/__pycache__/to_process.cpython-311.pyc +0 -0
  11. .venv/lib/python3.11/site-packages/anyio/__pycache__/to_thread.cpython-311.pyc +0 -0
  12. .venv/lib/python3.11/site-packages/anyio/_backends/__init__.py +0 -0
  13. .venv/lib/python3.11/site-packages/anyio/_backends/__pycache__/__init__.cpython-311.pyc +0 -0
  14. .venv/lib/python3.11/site-packages/anyio/_backends/__pycache__/_trio.cpython-311.pyc +0 -0
  15. .venv/lib/python3.11/site-packages/anyio/_backends/_asyncio.py +2807 -0
  16. .venv/lib/python3.11/site-packages/anyio/_backends/_trio.py +1334 -0
  17. .venv/lib/python3.11/site-packages/anyio/_core/__init__.py +0 -0
  18. .venv/lib/python3.11/site-packages/anyio/_core/__pycache__/__init__.cpython-311.pyc +0 -0
  19. .venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_asyncio_selector_thread.cpython-311.pyc +0 -0
  20. .venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_exceptions.cpython-311.pyc +0 -0
  21. .venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_resources.cpython-311.pyc +0 -0
  22. .venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_signals.cpython-311.pyc +0 -0
  23. .venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_sockets.cpython-311.pyc +0 -0
  24. .venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_subprocesses.cpython-311.pyc +0 -0
  25. .venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_synchronization.cpython-311.pyc +0 -0
  26. .venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_tasks.cpython-311.pyc +0 -0
  27. .venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_typedattr.cpython-311.pyc +0 -0
  28. .venv/lib/python3.11/site-packages/anyio/_core/_asyncio_selector_thread.py +167 -0
  29. .venv/lib/python3.11/site-packages/anyio/_core/_eventloop.py +166 -0
  30. .venv/lib/python3.11/site-packages/anyio/_core/_exceptions.py +126 -0
  31. .venv/lib/python3.11/site-packages/anyio/_core/_fileio.py +729 -0
  32. .venv/lib/python3.11/site-packages/anyio/_core/_resources.py +18 -0
  33. .venv/lib/python3.11/site-packages/anyio/_core/_signals.py +27 -0
  34. .venv/lib/python3.11/site-packages/anyio/_core/_sockets.py +787 -0
  35. .venv/lib/python3.11/site-packages/anyio/_core/_streams.py +52 -0
  36. .venv/lib/python3.11/site-packages/anyio/_core/_subprocesses.py +196 -0
  37. .venv/lib/python3.11/site-packages/anyio/_core/_synchronization.py +732 -0
  38. .venv/lib/python3.11/site-packages/anyio/_core/_tasks.py +158 -0
  39. .venv/lib/python3.11/site-packages/anyio/_core/_testing.py +78 -0
  40. .venv/lib/python3.11/site-packages/anyio/_core/_typedattr.py +81 -0
  41. .venv/lib/python3.11/site-packages/anyio/abc/_eventloop.py +376 -0
  42. .venv/lib/python3.11/site-packages/anyio/abc/_subprocesses.py +79 -0
  43. .venv/lib/python3.11/site-packages/anyio/abc/_testing.py +65 -0
  44. .venv/lib/python3.11/site-packages/anyio/from_thread.py +527 -0
  45. .venv/lib/python3.11/site-packages/anyio/lowlevel.py +161 -0
  46. .venv/lib/python3.11/site-packages/anyio/py.typed +0 -0
  47. .venv/lib/python3.11/site-packages/anyio/pytest_plugin.py +191 -0
  48. .venv/lib/python3.11/site-packages/anyio/streams/__init__.py +0 -0
  49. .venv/lib/python3.11/site-packages/anyio/streams/__pycache__/__init__.cpython-311.pyc +0 -0
  50. .venv/lib/python3.11/site-packages/anyio/streams/__pycache__/buffered.cpython-311.pyc +0 -0
.venv/lib/python3.11/site-packages/annotated_types/__init__.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import sys
3
+ import types
4
+ from dataclasses import dataclass
5
+ from datetime import tzinfo
6
+ from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, SupportsFloat, SupportsIndex, TypeVar, Union
7
+
8
+ if sys.version_info < (3, 8):
9
+ from typing_extensions import Protocol, runtime_checkable
10
+ else:
11
+ from typing import Protocol, runtime_checkable
12
+
13
+ if sys.version_info < (3, 9):
14
+ from typing_extensions import Annotated, Literal
15
+ else:
16
+ from typing import Annotated, Literal
17
+
18
+ if sys.version_info < (3, 10):
19
+ EllipsisType = type(Ellipsis)
20
+ KW_ONLY = {}
21
+ SLOTS = {}
22
+ else:
23
+ from types import EllipsisType
24
+
25
+ KW_ONLY = {"kw_only": True}
26
+ SLOTS = {"slots": True}
27
+
28
+
29
+ __all__ = (
30
+ 'BaseMetadata',
31
+ 'GroupedMetadata',
32
+ 'Gt',
33
+ 'Ge',
34
+ 'Lt',
35
+ 'Le',
36
+ 'Interval',
37
+ 'MultipleOf',
38
+ 'MinLen',
39
+ 'MaxLen',
40
+ 'Len',
41
+ 'Timezone',
42
+ 'Predicate',
43
+ 'LowerCase',
44
+ 'UpperCase',
45
+ 'IsDigits',
46
+ 'IsFinite',
47
+ 'IsNotFinite',
48
+ 'IsNan',
49
+ 'IsNotNan',
50
+ 'IsInfinite',
51
+ 'IsNotInfinite',
52
+ 'doc',
53
+ 'DocInfo',
54
+ '__version__',
55
+ )
56
+
57
+ __version__ = '0.7.0'
58
+
59
+
60
+ T = TypeVar('T')
61
+
62
+
63
+ # arguments that start with __ are considered
64
+ # positional only
65
+ # see https://peps.python.org/pep-0484/#positional-only-arguments
66
+
67
+
68
+ class SupportsGt(Protocol):
69
+ def __gt__(self: T, __other: T) -> bool:
70
+ ...
71
+
72
+
73
+ class SupportsGe(Protocol):
74
+ def __ge__(self: T, __other: T) -> bool:
75
+ ...
76
+
77
+
78
+ class SupportsLt(Protocol):
79
+ def __lt__(self: T, __other: T) -> bool:
80
+ ...
81
+
82
+
83
+ class SupportsLe(Protocol):
84
+ def __le__(self: T, __other: T) -> bool:
85
+ ...
86
+
87
+
88
+ class SupportsMod(Protocol):
89
+ def __mod__(self: T, __other: T) -> T:
90
+ ...
91
+
92
+
93
+ class SupportsDiv(Protocol):
94
+ def __div__(self: T, __other: T) -> T:
95
+ ...
96
+
97
+
98
+ class BaseMetadata:
99
+ """Base class for all metadata.
100
+
101
+ This exists mainly so that implementers
102
+ can do `isinstance(..., BaseMetadata)` while traversing field annotations.
103
+ """
104
+
105
+ __slots__ = ()
106
+
107
+
108
+ @dataclass(frozen=True, **SLOTS)
109
+ class Gt(BaseMetadata):
110
+ """Gt(gt=x) implies that the value must be greater than x.
111
+
112
+ It can be used with any type that supports the ``>`` operator,
113
+ including numbers, dates and times, strings, sets, and so on.
114
+ """
115
+
116
+ gt: SupportsGt
117
+
118
+
119
+ @dataclass(frozen=True, **SLOTS)
120
+ class Ge(BaseMetadata):
121
+ """Ge(ge=x) implies that the value must be greater than or equal to x.
122
+
123
+ It can be used with any type that supports the ``>=`` operator,
124
+ including numbers, dates and times, strings, sets, and so on.
125
+ """
126
+
127
+ ge: SupportsGe
128
+
129
+
130
+ @dataclass(frozen=True, **SLOTS)
131
+ class Lt(BaseMetadata):
132
+ """Lt(lt=x) implies that the value must be less than x.
133
+
134
+ It can be used with any type that supports the ``<`` operator,
135
+ including numbers, dates and times, strings, sets, and so on.
136
+ """
137
+
138
+ lt: SupportsLt
139
+
140
+
141
+ @dataclass(frozen=True, **SLOTS)
142
+ class Le(BaseMetadata):
143
+ """Le(le=x) implies that the value must be less than or equal to x.
144
+
145
+ It can be used with any type that supports the ``<=`` operator,
146
+ including numbers, dates and times, strings, sets, and so on.
147
+ """
148
+
149
+ le: SupportsLe
150
+
151
+
152
+ @runtime_checkable
153
+ class GroupedMetadata(Protocol):
154
+ """A grouping of multiple objects, like typing.Unpack.
155
+
156
+ `GroupedMetadata` on its own is not metadata and has no meaning.
157
+ All of the constraints and metadata should be fully expressable
158
+ in terms of the `BaseMetadata`'s returned by `GroupedMetadata.__iter__()`.
159
+
160
+ Concrete implementations should override `GroupedMetadata.__iter__()`
161
+ to add their own metadata.
162
+ For example:
163
+
164
+ >>> @dataclass
165
+ >>> class Field(GroupedMetadata):
166
+ >>> gt: float | None = None
167
+ >>> description: str | None = None
168
+ ...
169
+ >>> def __iter__(self) -> Iterable[object]:
170
+ >>> if self.gt is not None:
171
+ >>> yield Gt(self.gt)
172
+ >>> if self.description is not None:
173
+ >>> yield Description(self.gt)
174
+
175
+ Also see the implementation of `Interval` below for an example.
176
+
177
+ Parsers should recognize this and unpack it so that it can be used
178
+ both with and without unpacking:
179
+
180
+ - `Annotated[int, Field(...)]` (parser must unpack Field)
181
+ - `Annotated[int, *Field(...)]` (PEP-646)
182
+ """ # noqa: trailing-whitespace
183
+
184
+ @property
185
+ def __is_annotated_types_grouped_metadata__(self) -> Literal[True]:
186
+ return True
187
+
188
+ def __iter__(self) -> Iterator[object]:
189
+ ...
190
+
191
+ if not TYPE_CHECKING:
192
+ __slots__ = () # allow subclasses to use slots
193
+
194
+ def __init_subclass__(cls, *args: Any, **kwargs: Any) -> None:
195
+ # Basic ABC like functionality without the complexity of an ABC
196
+ super().__init_subclass__(*args, **kwargs)
197
+ if cls.__iter__ is GroupedMetadata.__iter__:
198
+ raise TypeError("Can't subclass GroupedMetadata without implementing __iter__")
199
+
200
+ def __iter__(self) -> Iterator[object]: # noqa: F811
201
+ raise NotImplementedError # more helpful than "None has no attribute..." type errors
202
+
203
+
204
+ @dataclass(frozen=True, **KW_ONLY, **SLOTS)
205
+ class Interval(GroupedMetadata):
206
+ """Interval can express inclusive or exclusive bounds with a single object.
207
+
208
+ It accepts keyword arguments ``gt``, ``ge``, ``lt``, and/or ``le``, which
209
+ are interpreted the same way as the single-bound constraints.
210
+ """
211
+
212
+ gt: Union[SupportsGt, None] = None
213
+ ge: Union[SupportsGe, None] = None
214
+ lt: Union[SupportsLt, None] = None
215
+ le: Union[SupportsLe, None] = None
216
+
217
+ def __iter__(self) -> Iterator[BaseMetadata]:
218
+ """Unpack an Interval into zero or more single-bounds."""
219
+ if self.gt is not None:
220
+ yield Gt(self.gt)
221
+ if self.ge is not None:
222
+ yield Ge(self.ge)
223
+ if self.lt is not None:
224
+ yield Lt(self.lt)
225
+ if self.le is not None:
226
+ yield Le(self.le)
227
+
228
+
229
+ @dataclass(frozen=True, **SLOTS)
230
+ class MultipleOf(BaseMetadata):
231
+ """MultipleOf(multiple_of=x) might be interpreted in two ways:
232
+
233
+ 1. Python semantics, implying ``value % multiple_of == 0``, or
234
+ 2. JSONschema semantics, where ``int(value / multiple_of) == value / multiple_of``
235
+
236
+ We encourage users to be aware of these two common interpretations,
237
+ and libraries to carefully document which they implement.
238
+ """
239
+
240
+ multiple_of: Union[SupportsDiv, SupportsMod]
241
+
242
+
243
+ @dataclass(frozen=True, **SLOTS)
244
+ class MinLen(BaseMetadata):
245
+ """
246
+ MinLen() implies minimum inclusive length,
247
+ e.g. ``len(value) >= min_length``.
248
+ """
249
+
250
+ min_length: Annotated[int, Ge(0)]
251
+
252
+
253
+ @dataclass(frozen=True, **SLOTS)
254
+ class MaxLen(BaseMetadata):
255
+ """
256
+ MaxLen() implies maximum inclusive length,
257
+ e.g. ``len(value) <= max_length``.
258
+ """
259
+
260
+ max_length: Annotated[int, Ge(0)]
261
+
262
+
263
+ @dataclass(frozen=True, **SLOTS)
264
+ class Len(GroupedMetadata):
265
+ """
266
+ Len() implies that ``min_length <= len(value) <= max_length``.
267
+
268
+ Upper bound may be omitted or ``None`` to indicate no upper length bound.
269
+ """
270
+
271
+ min_length: Annotated[int, Ge(0)] = 0
272
+ max_length: Optional[Annotated[int, Ge(0)]] = None
273
+
274
+ def __iter__(self) -> Iterator[BaseMetadata]:
275
+ """Unpack a Len into zone or more single-bounds."""
276
+ if self.min_length > 0:
277
+ yield MinLen(self.min_length)
278
+ if self.max_length is not None:
279
+ yield MaxLen(self.max_length)
280
+
281
+
282
+ @dataclass(frozen=True, **SLOTS)
283
+ class Timezone(BaseMetadata):
284
+ """Timezone(tz=...) requires a datetime to be aware (or ``tz=None``, naive).
285
+
286
+ ``Annotated[datetime, Timezone(None)]`` must be a naive datetime.
287
+ ``Timezone[...]`` (the ellipsis literal) expresses that the datetime must be
288
+ tz-aware but any timezone is allowed.
289
+
290
+ You may also pass a specific timezone string or tzinfo object such as
291
+ ``Timezone(timezone.utc)`` or ``Timezone("Africa/Abidjan")`` to express that
292
+ you only allow a specific timezone, though we note that this is often
293
+ a symptom of poor design.
294
+ """
295
+
296
+ tz: Union[str, tzinfo, EllipsisType, None]
297
+
298
+
299
+ @dataclass(frozen=True, **SLOTS)
300
+ class Unit(BaseMetadata):
301
+ """Indicates that the value is a physical quantity with the specified unit.
302
+
303
+ It is intended for usage with numeric types, where the value represents the
304
+ magnitude of the quantity. For example, ``distance: Annotated[float, Unit('m')]``
305
+ or ``speed: Annotated[float, Unit('m/s')]``.
306
+
307
+ Interpretation of the unit string is left to the discretion of the consumer.
308
+ It is suggested to follow conventions established by python libraries that work
309
+ with physical quantities, such as
310
+
311
+ - ``pint`` : <https://pint.readthedocs.io/en/stable/>
312
+ - ``astropy.units``: <https://docs.astropy.org/en/stable/units/>
313
+
314
+ For indicating a quantity with a certain dimensionality but without a specific unit
315
+ it is recommended to use square brackets, e.g. `Annotated[float, Unit('[time]')]`.
316
+ Note, however, ``annotated_types`` itself makes no use of the unit string.
317
+ """
318
+
319
+ unit: str
320
+
321
+
322
+ @dataclass(frozen=True, **SLOTS)
323
+ class Predicate(BaseMetadata):
324
+ """``Predicate(func: Callable)`` implies `func(value)` is truthy for valid values.
325
+
326
+ Users should prefer statically inspectable metadata, but if you need the full
327
+ power and flexibility of arbitrary runtime predicates... here it is.
328
+
329
+ We provide a few predefined predicates for common string constraints:
330
+ ``IsLower = Predicate(str.islower)``, ``IsUpper = Predicate(str.isupper)``, and
331
+ ``IsDigits = Predicate(str.isdigit)``. Users are encouraged to use methods which
332
+ can be given special handling, and avoid indirection like ``lambda s: s.lower()``.
333
+
334
+ Some libraries might have special logic to handle certain predicates, e.g. by
335
+ checking for `str.isdigit` and using its presence to both call custom logic to
336
+ enforce digit-only strings, and customise some generated external schema.
337
+
338
+ We do not specify what behaviour should be expected for predicates that raise
339
+ an exception. For example `Annotated[int, Predicate(str.isdigit)]` might silently
340
+ skip invalid constraints, or statically raise an error; or it might try calling it
341
+ and then propagate or discard the resulting exception.
342
+ """
343
+
344
+ func: Callable[[Any], bool]
345
+
346
+ def __repr__(self) -> str:
347
+ if getattr(self.func, "__name__", "<lambda>") == "<lambda>":
348
+ return f"{self.__class__.__name__}({self.func!r})"
349
+ if isinstance(self.func, (types.MethodType, types.BuiltinMethodType)) and (
350
+ namespace := getattr(self.func.__self__, "__name__", None)
351
+ ):
352
+ return f"{self.__class__.__name__}({namespace}.{self.func.__name__})"
353
+ if isinstance(self.func, type(str.isascii)): # method descriptor
354
+ return f"{self.__class__.__name__}({self.func.__qualname__})"
355
+ return f"{self.__class__.__name__}({self.func.__name__})"
356
+
357
+
358
+ @dataclass
359
+ class Not:
360
+ func: Callable[[Any], bool]
361
+
362
+ def __call__(self, __v: Any) -> bool:
363
+ return not self.func(__v)
364
+
365
+
366
+ _StrType = TypeVar("_StrType", bound=str)
367
+
368
+ LowerCase = Annotated[_StrType, Predicate(str.islower)]
369
+ """
370
+ Return True if the string is a lowercase string, False otherwise.
371
+
372
+ A string is lowercase if all cased characters in the string are lowercase and there is at least one cased character in the string.
373
+ """ # noqa: E501
374
+ UpperCase = Annotated[_StrType, Predicate(str.isupper)]
375
+ """
376
+ Return True if the string is an uppercase string, False otherwise.
377
+
378
+ A string is uppercase if all cased characters in the string are uppercase and there is at least one cased character in the string.
379
+ """ # noqa: E501
380
+ IsDigit = Annotated[_StrType, Predicate(str.isdigit)]
381
+ IsDigits = IsDigit # type: ignore # plural for backwards compatibility, see #63
382
+ """
383
+ Return True if the string is a digit string, False otherwise.
384
+
385
+ A string is a digit string if all characters in the string are digits and there is at least one character in the string.
386
+ """ # noqa: E501
387
+ IsAscii = Annotated[_StrType, Predicate(str.isascii)]
388
+ """
389
+ Return True if all characters in the string are ASCII, False otherwise.
390
+
391
+ ASCII characters have code points in the range U+0000-U+007F. Empty string is ASCII too.
392
+ """
393
+
394
+ _NumericType = TypeVar('_NumericType', bound=Union[SupportsFloat, SupportsIndex])
395
+ IsFinite = Annotated[_NumericType, Predicate(math.isfinite)]
396
+ """Return True if x is neither an infinity nor a NaN, and False otherwise."""
397
+ IsNotFinite = Annotated[_NumericType, Predicate(Not(math.isfinite))]
398
+ """Return True if x is one of infinity or NaN, and False otherwise"""
399
+ IsNan = Annotated[_NumericType, Predicate(math.isnan)]
400
+ """Return True if x is a NaN (not a number), and False otherwise."""
401
+ IsNotNan = Annotated[_NumericType, Predicate(Not(math.isnan))]
402
+ """Return True if x is anything but NaN (not a number), and False otherwise."""
403
+ IsInfinite = Annotated[_NumericType, Predicate(math.isinf)]
404
+ """Return True if x is a positive or negative infinity, and False otherwise."""
405
+ IsNotInfinite = Annotated[_NumericType, Predicate(Not(math.isinf))]
406
+ """Return True if x is neither a positive or negative infinity, and False otherwise."""
407
+
408
+ try:
409
+ from typing_extensions import DocInfo, doc # type: ignore [attr-defined]
410
+ except ImportError:
411
+
412
+ @dataclass(frozen=True, **SLOTS)
413
+ class DocInfo: # type: ignore [no-redef]
414
+ """ "
415
+ The return value of doc(), mainly to be used by tools that want to extract the
416
+ Annotated documentation at runtime.
417
+ """
418
+
419
+ documentation: str
420
+ """The documentation string passed to doc()."""
421
+
422
+ def doc(
423
+ documentation: str,
424
+ ) -> DocInfo:
425
+ """
426
+ Add documentation to a type annotation inside of Annotated.
427
+
428
+ For example:
429
+
430
+ >>> def hi(name: Annotated[int, doc("The name of the user")]) -> None: ...
431
+ """
432
+ return DocInfo(documentation)
.venv/lib/python3.11/site-packages/annotated_types/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (20.6 kB). View file
 
.venv/lib/python3.11/site-packages/annotated_types/__pycache__/test_cases.cpython-311.pyc ADDED
Binary file (15.4 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/__init__.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from ._core._eventloop import current_time as current_time
4
+ from ._core._eventloop import get_all_backends as get_all_backends
5
+ from ._core._eventloop import get_cancelled_exc_class as get_cancelled_exc_class
6
+ from ._core._eventloop import run as run
7
+ from ._core._eventloop import sleep as sleep
8
+ from ._core._eventloop import sleep_forever as sleep_forever
9
+ from ._core._eventloop import sleep_until as sleep_until
10
+ from ._core._exceptions import BrokenResourceError as BrokenResourceError
11
+ from ._core._exceptions import BrokenWorkerIntepreter as BrokenWorkerIntepreter
12
+ from ._core._exceptions import BrokenWorkerProcess as BrokenWorkerProcess
13
+ from ._core._exceptions import BusyResourceError as BusyResourceError
14
+ from ._core._exceptions import ClosedResourceError as ClosedResourceError
15
+ from ._core._exceptions import DelimiterNotFound as DelimiterNotFound
16
+ from ._core._exceptions import EndOfStream as EndOfStream
17
+ from ._core._exceptions import IncompleteRead as IncompleteRead
18
+ from ._core._exceptions import TypedAttributeLookupError as TypedAttributeLookupError
19
+ from ._core._exceptions import WouldBlock as WouldBlock
20
+ from ._core._fileio import AsyncFile as AsyncFile
21
+ from ._core._fileio import Path as Path
22
+ from ._core._fileio import open_file as open_file
23
+ from ._core._fileio import wrap_file as wrap_file
24
+ from ._core._resources import aclose_forcefully as aclose_forcefully
25
+ from ._core._signals import open_signal_receiver as open_signal_receiver
26
+ from ._core._sockets import connect_tcp as connect_tcp
27
+ from ._core._sockets import connect_unix as connect_unix
28
+ from ._core._sockets import create_connected_udp_socket as create_connected_udp_socket
29
+ from ._core._sockets import (
30
+ create_connected_unix_datagram_socket as create_connected_unix_datagram_socket,
31
+ )
32
+ from ._core._sockets import create_tcp_listener as create_tcp_listener
33
+ from ._core._sockets import create_udp_socket as create_udp_socket
34
+ from ._core._sockets import create_unix_datagram_socket as create_unix_datagram_socket
35
+ from ._core._sockets import create_unix_listener as create_unix_listener
36
+ from ._core._sockets import getaddrinfo as getaddrinfo
37
+ from ._core._sockets import getnameinfo as getnameinfo
38
+ from ._core._sockets import wait_readable as wait_readable
39
+ from ._core._sockets import wait_socket_readable as wait_socket_readable
40
+ from ._core._sockets import wait_socket_writable as wait_socket_writable
41
+ from ._core._sockets import wait_writable as wait_writable
42
+ from ._core._streams import create_memory_object_stream as create_memory_object_stream
43
+ from ._core._subprocesses import open_process as open_process
44
+ from ._core._subprocesses import run_process as run_process
45
+ from ._core._synchronization import CapacityLimiter as CapacityLimiter
46
+ from ._core._synchronization import (
47
+ CapacityLimiterStatistics as CapacityLimiterStatistics,
48
+ )
49
+ from ._core._synchronization import Condition as Condition
50
+ from ._core._synchronization import ConditionStatistics as ConditionStatistics
51
+ from ._core._synchronization import Event as Event
52
+ from ._core._synchronization import EventStatistics as EventStatistics
53
+ from ._core._synchronization import Lock as Lock
54
+ from ._core._synchronization import LockStatistics as LockStatistics
55
+ from ._core._synchronization import ResourceGuard as ResourceGuard
56
+ from ._core._synchronization import Semaphore as Semaphore
57
+ from ._core._synchronization import SemaphoreStatistics as SemaphoreStatistics
58
+ from ._core._tasks import TASK_STATUS_IGNORED as TASK_STATUS_IGNORED
59
+ from ._core._tasks import CancelScope as CancelScope
60
+ from ._core._tasks import create_task_group as create_task_group
61
+ from ._core._tasks import current_effective_deadline as current_effective_deadline
62
+ from ._core._tasks import fail_after as fail_after
63
+ from ._core._tasks import move_on_after as move_on_after
64
+ from ._core._testing import TaskInfo as TaskInfo
65
+ from ._core._testing import get_current_task as get_current_task
66
+ from ._core._testing import get_running_tasks as get_running_tasks
67
+ from ._core._testing import wait_all_tasks_blocked as wait_all_tasks_blocked
68
+ from ._core._typedattr import TypedAttributeProvider as TypedAttributeProvider
69
+ from ._core._typedattr import TypedAttributeSet as TypedAttributeSet
70
+ from ._core._typedattr import typed_attribute as typed_attribute
71
+
72
+ # Re-export imports so they look like they live directly in this package
73
+ for __value in list(locals().values()):
74
+ if getattr(__value, "__module__", "").startswith("anyio."):
75
+ __value.__module__ = __name__
76
+
77
+ del __value
.venv/lib/python3.11/site-packages/anyio/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (4.31 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/__pycache__/from_thread.cpython-311.pyc ADDED
Binary file (26.2 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/__pycache__/lowlevel.cpython-311.pyc ADDED
Binary file (7.64 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/__pycache__/pytest_plugin.cpython-311.pyc ADDED
Binary file (10.9 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/__pycache__/to_interpreter.cpython-311.pyc ADDED
Binary file (10 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/__pycache__/to_process.cpython-311.pyc ADDED
Binary file (13.7 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/__pycache__/to_thread.cpython-311.pyc ADDED
Binary file (3.18 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/_backends/__init__.py ADDED
File without changes
.venv/lib/python3.11/site-packages/anyio/_backends/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (188 Bytes). View file
 
.venv/lib/python3.11/site-packages/anyio/_backends/__pycache__/_trio.cpython-311.pyc ADDED
Binary file (77.9 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/_backends/_asyncio.py ADDED
@@ -0,0 +1,2807 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import array
4
+ import asyncio
5
+ import concurrent.futures
6
+ import math
7
+ import os
8
+ import socket
9
+ import sys
10
+ import threading
11
+ import weakref
12
+ from asyncio import (
13
+ AbstractEventLoop,
14
+ CancelledError,
15
+ all_tasks,
16
+ create_task,
17
+ current_task,
18
+ get_running_loop,
19
+ sleep,
20
+ )
21
+ from asyncio.base_events import _run_until_complete_cb # type: ignore[attr-defined]
22
+ from collections import OrderedDict, deque
23
+ from collections.abc import (
24
+ AsyncGenerator,
25
+ AsyncIterator,
26
+ Awaitable,
27
+ Callable,
28
+ Collection,
29
+ Coroutine,
30
+ Iterable,
31
+ Sequence,
32
+ )
33
+ from concurrent.futures import Future
34
+ from contextlib import AbstractContextManager, suppress
35
+ from contextvars import Context, copy_context
36
+ from dataclasses import dataclass
37
+ from functools import partial, wraps
38
+ from inspect import (
39
+ CORO_RUNNING,
40
+ CORO_SUSPENDED,
41
+ getcoroutinestate,
42
+ iscoroutine,
43
+ )
44
+ from io import IOBase
45
+ from os import PathLike
46
+ from queue import Queue
47
+ from signal import Signals
48
+ from socket import AddressFamily, SocketKind
49
+ from threading import Thread
50
+ from types import CodeType, TracebackType
51
+ from typing import (
52
+ IO,
53
+ TYPE_CHECKING,
54
+ Any,
55
+ Optional,
56
+ TypeVar,
57
+ cast,
58
+ )
59
+ from weakref import WeakKeyDictionary
60
+
61
+ import sniffio
62
+
63
+ from .. import (
64
+ CapacityLimiterStatistics,
65
+ EventStatistics,
66
+ LockStatistics,
67
+ TaskInfo,
68
+ abc,
69
+ )
70
+ from .._core._eventloop import claim_worker_thread, threadlocals
71
+ from .._core._exceptions import (
72
+ BrokenResourceError,
73
+ BusyResourceError,
74
+ ClosedResourceError,
75
+ EndOfStream,
76
+ WouldBlock,
77
+ iterate_exceptions,
78
+ )
79
+ from .._core._sockets import convert_ipv6_sockaddr
80
+ from .._core._streams import create_memory_object_stream
81
+ from .._core._synchronization import (
82
+ CapacityLimiter as BaseCapacityLimiter,
83
+ )
84
+ from .._core._synchronization import Event as BaseEvent
85
+ from .._core._synchronization import Lock as BaseLock
86
+ from .._core._synchronization import (
87
+ ResourceGuard,
88
+ SemaphoreStatistics,
89
+ )
90
+ from .._core._synchronization import Semaphore as BaseSemaphore
91
+ from .._core._tasks import CancelScope as BaseCancelScope
92
+ from ..abc import (
93
+ AsyncBackend,
94
+ IPSockAddrType,
95
+ SocketListener,
96
+ UDPPacketType,
97
+ UNIXDatagramPacketType,
98
+ )
99
+ from ..abc._eventloop import StrOrBytesPath
100
+ from ..lowlevel import RunVar
101
+ from ..streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
102
+
103
+ if TYPE_CHECKING:
104
+ from _typeshed import FileDescriptorLike
105
+ else:
106
+ FileDescriptorLike = object
107
+
108
+ if sys.version_info >= (3, 10):
109
+ from typing import ParamSpec
110
+ else:
111
+ from typing_extensions import ParamSpec
112
+
113
+ if sys.version_info >= (3, 11):
114
+ from asyncio import Runner
115
+ from typing import TypeVarTuple, Unpack
116
+ else:
117
+ import contextvars
118
+ import enum
119
+ import signal
120
+ from asyncio import coroutines, events, exceptions, tasks
121
+
122
+ from exceptiongroup import BaseExceptionGroup
123
+ from typing_extensions import TypeVarTuple, Unpack
124
+
125
+ class _State(enum.Enum):
126
+ CREATED = "created"
127
+ INITIALIZED = "initialized"
128
+ CLOSED = "closed"
129
+
130
+ class Runner:
131
+ # Copied from CPython 3.11
132
+ def __init__(
133
+ self,
134
+ *,
135
+ debug: bool | None = None,
136
+ loop_factory: Callable[[], AbstractEventLoop] | None = None,
137
+ ):
138
+ self._state = _State.CREATED
139
+ self._debug = debug
140
+ self._loop_factory = loop_factory
141
+ self._loop: AbstractEventLoop | None = None
142
+ self._context = None
143
+ self._interrupt_count = 0
144
+ self._set_event_loop = False
145
+
146
+ def __enter__(self) -> Runner:
147
+ self._lazy_init()
148
+ return self
149
+
150
+ def __exit__(
151
+ self,
152
+ exc_type: type[BaseException],
153
+ exc_val: BaseException,
154
+ exc_tb: TracebackType,
155
+ ) -> None:
156
+ self.close()
157
+
158
+ def close(self) -> None:
159
+ """Shutdown and close event loop."""
160
+ if self._state is not _State.INITIALIZED:
161
+ return
162
+ try:
163
+ loop = self._loop
164
+ _cancel_all_tasks(loop)
165
+ loop.run_until_complete(loop.shutdown_asyncgens())
166
+ if hasattr(loop, "shutdown_default_executor"):
167
+ loop.run_until_complete(loop.shutdown_default_executor())
168
+ else:
169
+ loop.run_until_complete(_shutdown_default_executor(loop))
170
+ finally:
171
+ if self._set_event_loop:
172
+ events.set_event_loop(None)
173
+ loop.close()
174
+ self._loop = None
175
+ self._state = _State.CLOSED
176
+
177
+ def get_loop(self) -> AbstractEventLoop:
178
+ """Return embedded event loop."""
179
+ self._lazy_init()
180
+ return self._loop
181
+
182
+ def run(self, coro: Coroutine[T_Retval], *, context=None) -> T_Retval:
183
+ """Run a coroutine inside the embedded event loop."""
184
+ if not coroutines.iscoroutine(coro):
185
+ raise ValueError(f"a coroutine was expected, got {coro!r}")
186
+
187
+ if events._get_running_loop() is not None:
188
+ # fail fast with short traceback
189
+ raise RuntimeError(
190
+ "Runner.run() cannot be called from a running event loop"
191
+ )
192
+
193
+ self._lazy_init()
194
+
195
+ if context is None:
196
+ context = self._context
197
+ task = context.run(self._loop.create_task, coro)
198
+
199
+ if (
200
+ threading.current_thread() is threading.main_thread()
201
+ and signal.getsignal(signal.SIGINT) is signal.default_int_handler
202
+ ):
203
+ sigint_handler = partial(self._on_sigint, main_task=task)
204
+ try:
205
+ signal.signal(signal.SIGINT, sigint_handler)
206
+ except ValueError:
207
+ # `signal.signal` may throw if `threading.main_thread` does
208
+ # not support signals (e.g. embedded interpreter with signals
209
+ # not registered - see gh-91880)
210
+ sigint_handler = None
211
+ else:
212
+ sigint_handler = None
213
+
214
+ self._interrupt_count = 0
215
+ try:
216
+ return self._loop.run_until_complete(task)
217
+ except exceptions.CancelledError:
218
+ if self._interrupt_count > 0:
219
+ uncancel = getattr(task, "uncancel", None)
220
+ if uncancel is not None and uncancel() == 0:
221
+ raise KeyboardInterrupt()
222
+ raise # CancelledError
223
+ finally:
224
+ if (
225
+ sigint_handler is not None
226
+ and signal.getsignal(signal.SIGINT) is sigint_handler
227
+ ):
228
+ signal.signal(signal.SIGINT, signal.default_int_handler)
229
+
230
+ def _lazy_init(self) -> None:
231
+ if self._state is _State.CLOSED:
232
+ raise RuntimeError("Runner is closed")
233
+ if self._state is _State.INITIALIZED:
234
+ return
235
+ if self._loop_factory is None:
236
+ self._loop = events.new_event_loop()
237
+ if not self._set_event_loop:
238
+ # Call set_event_loop only once to avoid calling
239
+ # attach_loop multiple times on child watchers
240
+ events.set_event_loop(self._loop)
241
+ self._set_event_loop = True
242
+ else:
243
+ self._loop = self._loop_factory()
244
+ if self._debug is not None:
245
+ self._loop.set_debug(self._debug)
246
+ self._context = contextvars.copy_context()
247
+ self._state = _State.INITIALIZED
248
+
249
+ def _on_sigint(self, signum, frame, main_task: asyncio.Task) -> None:
250
+ self._interrupt_count += 1
251
+ if self._interrupt_count == 1 and not main_task.done():
252
+ main_task.cancel()
253
+ # wakeup loop if it is blocked by select() with long timeout
254
+ self._loop.call_soon_threadsafe(lambda: None)
255
+ return
256
+ raise KeyboardInterrupt()
257
+
258
+ def _cancel_all_tasks(loop: AbstractEventLoop) -> None:
259
+ to_cancel = tasks.all_tasks(loop)
260
+ if not to_cancel:
261
+ return
262
+
263
+ for task in to_cancel:
264
+ task.cancel()
265
+
266
+ loop.run_until_complete(tasks.gather(*to_cancel, return_exceptions=True))
267
+
268
+ for task in to_cancel:
269
+ if task.cancelled():
270
+ continue
271
+ if task.exception() is not None:
272
+ loop.call_exception_handler(
273
+ {
274
+ "message": "unhandled exception during asyncio.run() shutdown",
275
+ "exception": task.exception(),
276
+ "task": task,
277
+ }
278
+ )
279
+
280
+ async def _shutdown_default_executor(loop: AbstractEventLoop) -> None:
281
+ """Schedule the shutdown of the default executor."""
282
+
283
+ def _do_shutdown(future: asyncio.futures.Future) -> None:
284
+ try:
285
+ loop._default_executor.shutdown(wait=True) # type: ignore[attr-defined]
286
+ loop.call_soon_threadsafe(future.set_result, None)
287
+ except Exception as ex:
288
+ loop.call_soon_threadsafe(future.set_exception, ex)
289
+
290
+ loop._executor_shutdown_called = True
291
+ if loop._default_executor is None:
292
+ return
293
+ future = loop.create_future()
294
+ thread = threading.Thread(target=_do_shutdown, args=(future,))
295
+ thread.start()
296
+ try:
297
+ await future
298
+ finally:
299
+ thread.join()
300
+
301
+
302
+ T_Retval = TypeVar("T_Retval")
303
+ T_contra = TypeVar("T_contra", contravariant=True)
304
+ PosArgsT = TypeVarTuple("PosArgsT")
305
+ P = ParamSpec("P")
306
+
307
+ _root_task: RunVar[asyncio.Task | None] = RunVar("_root_task")
308
+
309
+
310
+ def find_root_task() -> asyncio.Task:
311
+ root_task = _root_task.get(None)
312
+ if root_task is not None and not root_task.done():
313
+ return root_task
314
+
315
+ # Look for a task that has been started via run_until_complete()
316
+ for task in all_tasks():
317
+ if task._callbacks and not task.done():
318
+ callbacks = [cb for cb, context in task._callbacks]
319
+ for cb in callbacks:
320
+ if (
321
+ cb is _run_until_complete_cb
322
+ or getattr(cb, "__module__", None) == "uvloop.loop"
323
+ ):
324
+ _root_task.set(task)
325
+ return task
326
+
327
+ # Look up the topmost task in the AnyIO task tree, if possible
328
+ task = cast(asyncio.Task, current_task())
329
+ state = _task_states.get(task)
330
+ if state:
331
+ cancel_scope = state.cancel_scope
332
+ while cancel_scope and cancel_scope._parent_scope is not None:
333
+ cancel_scope = cancel_scope._parent_scope
334
+
335
+ if cancel_scope is not None:
336
+ return cast(asyncio.Task, cancel_scope._host_task)
337
+
338
+ return task
339
+
340
+
341
+ def get_callable_name(func: Callable) -> str:
342
+ module = getattr(func, "__module__", None)
343
+ qualname = getattr(func, "__qualname__", None)
344
+ return ".".join([x for x in (module, qualname) if x])
345
+
346
+
347
+ #
348
+ # Event loop
349
+ #
350
+
351
+ _run_vars: WeakKeyDictionary[asyncio.AbstractEventLoop, Any] = WeakKeyDictionary()
352
+
353
+
354
+ def _task_started(task: asyncio.Task) -> bool:
355
+ """Return ``True`` if the task has been started and has not finished."""
356
+ # The task coro should never be None here, as we never add finished tasks to the
357
+ # task list
358
+ coro = task.get_coro()
359
+ assert coro is not None
360
+ try:
361
+ return getcoroutinestate(coro) in (CORO_RUNNING, CORO_SUSPENDED)
362
+ except AttributeError:
363
+ # task coro is async_genenerator_asend https://bugs.python.org/issue37771
364
+ raise Exception(f"Cannot determine if task {task} has started or not") from None
365
+
366
+
367
+ #
368
+ # Timeouts and cancellation
369
+ #
370
+
371
+
372
+ def is_anyio_cancellation(exc: CancelledError) -> bool:
373
+ # Sometimes third party frameworks catch a CancelledError and raise a new one, so as
374
+ # a workaround we have to look at the previous ones in __context__ too for a
375
+ # matching cancel message
376
+ while True:
377
+ if (
378
+ exc.args
379
+ and isinstance(exc.args[0], str)
380
+ and exc.args[0].startswith("Cancelled by cancel scope ")
381
+ ):
382
+ return True
383
+
384
+ if isinstance(exc.__context__, CancelledError):
385
+ exc = exc.__context__
386
+ continue
387
+
388
+ return False
389
+
390
+
391
+ class CancelScope(BaseCancelScope):
392
+ def __new__(
393
+ cls, *, deadline: float = math.inf, shield: bool = False
394
+ ) -> CancelScope:
395
+ return object.__new__(cls)
396
+
397
+ def __init__(self, deadline: float = math.inf, shield: bool = False):
398
+ self._deadline = deadline
399
+ self._shield = shield
400
+ self._parent_scope: CancelScope | None = None
401
+ self._child_scopes: set[CancelScope] = set()
402
+ self._cancel_called = False
403
+ self._cancelled_caught = False
404
+ self._active = False
405
+ self._timeout_handle: asyncio.TimerHandle | None = None
406
+ self._cancel_handle: asyncio.Handle | None = None
407
+ self._tasks: set[asyncio.Task] = set()
408
+ self._host_task: asyncio.Task | None = None
409
+ if sys.version_info >= (3, 11):
410
+ self._pending_uncancellations: int | None = 0
411
+ else:
412
+ self._pending_uncancellations = None
413
+
414
+ def __enter__(self) -> CancelScope:
415
+ if self._active:
416
+ raise RuntimeError(
417
+ "Each CancelScope may only be used for a single 'with' block"
418
+ )
419
+
420
+ self._host_task = host_task = cast(asyncio.Task, current_task())
421
+ self._tasks.add(host_task)
422
+ try:
423
+ task_state = _task_states[host_task]
424
+ except KeyError:
425
+ task_state = TaskState(None, self)
426
+ _task_states[host_task] = task_state
427
+ else:
428
+ self._parent_scope = task_state.cancel_scope
429
+ task_state.cancel_scope = self
430
+ if self._parent_scope is not None:
431
+ # If using an eager task factory, the parent scope may not even contain
432
+ # the host task
433
+ self._parent_scope._child_scopes.add(self)
434
+ self._parent_scope._tasks.discard(host_task)
435
+
436
+ self._timeout()
437
+ self._active = True
438
+
439
+ # Start cancelling the host task if the scope was cancelled before entering
440
+ if self._cancel_called:
441
+ self._deliver_cancellation(self)
442
+
443
+ return self
444
+
445
+ def __exit__(
446
+ self,
447
+ exc_type: type[BaseException] | None,
448
+ exc_val: BaseException | None,
449
+ exc_tb: TracebackType | None,
450
+ ) -> bool:
451
+ del exc_tb
452
+
453
+ if not self._active:
454
+ raise RuntimeError("This cancel scope is not active")
455
+ if current_task() is not self._host_task:
456
+ raise RuntimeError(
457
+ "Attempted to exit cancel scope in a different task than it was "
458
+ "entered in"
459
+ )
460
+
461
+ assert self._host_task is not None
462
+ host_task_state = _task_states.get(self._host_task)
463
+ if host_task_state is None or host_task_state.cancel_scope is not self:
464
+ raise RuntimeError(
465
+ "Attempted to exit a cancel scope that isn't the current tasks's "
466
+ "current cancel scope"
467
+ )
468
+
469
+ try:
470
+ self._active = False
471
+ if self._timeout_handle:
472
+ self._timeout_handle.cancel()
473
+ self._timeout_handle = None
474
+
475
+ self._tasks.remove(self._host_task)
476
+ if self._parent_scope is not None:
477
+ self._parent_scope._child_scopes.remove(self)
478
+ self._parent_scope._tasks.add(self._host_task)
479
+
480
+ host_task_state.cancel_scope = self._parent_scope
481
+
482
+ # Restart the cancellation effort in the closest visible, cancelled parent
483
+ # scope if necessary
484
+ self._restart_cancellation_in_parent()
485
+
486
+ # We only swallow the exception iff it was an AnyIO CancelledError, either
487
+ # directly as exc_val or inside an exception group and there are no cancelled
488
+ # parent cancel scopes visible to us here
489
+ if self._cancel_called and not self._parent_cancellation_is_visible_to_us:
490
+ # For each level-cancel() call made on the host task, call uncancel()
491
+ while self._pending_uncancellations:
492
+ self._host_task.uncancel()
493
+ self._pending_uncancellations -= 1
494
+
495
+ # Update cancelled_caught and check for exceptions we must not swallow
496
+ cannot_swallow_exc_val = False
497
+ if exc_val is not None:
498
+ for exc in iterate_exceptions(exc_val):
499
+ if isinstance(exc, CancelledError) and is_anyio_cancellation(
500
+ exc
501
+ ):
502
+ self._cancelled_caught = True
503
+ else:
504
+ cannot_swallow_exc_val = True
505
+
506
+ return self._cancelled_caught and not cannot_swallow_exc_val
507
+ else:
508
+ if self._pending_uncancellations:
509
+ assert self._parent_scope is not None
510
+ assert self._parent_scope._pending_uncancellations is not None
511
+ self._parent_scope._pending_uncancellations += (
512
+ self._pending_uncancellations
513
+ )
514
+ self._pending_uncancellations = 0
515
+
516
+ return False
517
+ finally:
518
+ self._host_task = None
519
+ del exc_val
520
+
521
+ @property
522
+ def _effectively_cancelled(self) -> bool:
523
+ cancel_scope: CancelScope | None = self
524
+ while cancel_scope is not None:
525
+ if cancel_scope._cancel_called:
526
+ return True
527
+
528
+ if cancel_scope.shield:
529
+ return False
530
+
531
+ cancel_scope = cancel_scope._parent_scope
532
+
533
+ return False
534
+
535
+ @property
536
+ def _parent_cancellation_is_visible_to_us(self) -> bool:
537
+ return (
538
+ self._parent_scope is not None
539
+ and not self.shield
540
+ and self._parent_scope._effectively_cancelled
541
+ )
542
+
543
+ def _timeout(self) -> None:
544
+ if self._deadline != math.inf:
545
+ loop = get_running_loop()
546
+ if loop.time() >= self._deadline:
547
+ self.cancel()
548
+ else:
549
+ self._timeout_handle = loop.call_at(self._deadline, self._timeout)
550
+
551
+ def _deliver_cancellation(self, origin: CancelScope) -> bool:
552
+ """
553
+ Deliver cancellation to directly contained tasks and nested cancel scopes.
554
+
555
+ Schedule another run at the end if we still have tasks eligible for
556
+ cancellation.
557
+
558
+ :param origin: the cancel scope that originated the cancellation
559
+ :return: ``True`` if the delivery needs to be retried on the next cycle
560
+
561
+ """
562
+ should_retry = False
563
+ current = current_task()
564
+ for task in self._tasks:
565
+ should_retry = True
566
+ if task._must_cancel: # type: ignore[attr-defined]
567
+ continue
568
+
569
+ # The task is eligible for cancellation if it has started
570
+ if task is not current and (task is self._host_task or _task_started(task)):
571
+ waiter = task._fut_waiter # type: ignore[attr-defined]
572
+ if not isinstance(waiter, asyncio.Future) or not waiter.done():
573
+ task.cancel(f"Cancelled by cancel scope {id(origin):x}")
574
+ if (
575
+ task is origin._host_task
576
+ and origin._pending_uncancellations is not None
577
+ ):
578
+ origin._pending_uncancellations += 1
579
+
580
+ # Deliver cancellation to child scopes that aren't shielded or running their own
581
+ # cancellation callbacks
582
+ for scope in self._child_scopes:
583
+ if not scope._shield and not scope.cancel_called:
584
+ should_retry = scope._deliver_cancellation(origin) or should_retry
585
+
586
+ # Schedule another callback if there are still tasks left
587
+ if origin is self:
588
+ if should_retry:
589
+ self._cancel_handle = get_running_loop().call_soon(
590
+ self._deliver_cancellation, origin
591
+ )
592
+ else:
593
+ self._cancel_handle = None
594
+
595
+ return should_retry
596
+
597
+ def _restart_cancellation_in_parent(self) -> None:
598
+ """
599
+ Restart the cancellation effort in the closest directly cancelled parent scope.
600
+
601
+ """
602
+ scope = self._parent_scope
603
+ while scope is not None:
604
+ if scope._cancel_called:
605
+ if scope._cancel_handle is None:
606
+ scope._deliver_cancellation(scope)
607
+
608
+ break
609
+
610
+ # No point in looking beyond any shielded scope
611
+ if scope._shield:
612
+ break
613
+
614
+ scope = scope._parent_scope
615
+
616
+ def cancel(self) -> None:
617
+ if not self._cancel_called:
618
+ if self._timeout_handle:
619
+ self._timeout_handle.cancel()
620
+ self._timeout_handle = None
621
+
622
+ self._cancel_called = True
623
+ if self._host_task is not None:
624
+ self._deliver_cancellation(self)
625
+
626
+ @property
627
+ def deadline(self) -> float:
628
+ return self._deadline
629
+
630
+ @deadline.setter
631
+ def deadline(self, value: float) -> None:
632
+ self._deadline = float(value)
633
+ if self._timeout_handle is not None:
634
+ self._timeout_handle.cancel()
635
+ self._timeout_handle = None
636
+
637
+ if self._active and not self._cancel_called:
638
+ self._timeout()
639
+
640
+ @property
641
+ def cancel_called(self) -> bool:
642
+ return self._cancel_called
643
+
644
+ @property
645
+ def cancelled_caught(self) -> bool:
646
+ return self._cancelled_caught
647
+
648
+ @property
649
+ def shield(self) -> bool:
650
+ return self._shield
651
+
652
+ @shield.setter
653
+ def shield(self, value: bool) -> None:
654
+ if self._shield != value:
655
+ self._shield = value
656
+ if not value:
657
+ self._restart_cancellation_in_parent()
658
+
659
+
660
+ #
661
+ # Task states
662
+ #
663
+
664
+
665
+ class TaskState:
666
+ """
667
+ Encapsulates auxiliary task information that cannot be added to the Task instance
668
+ itself because there are no guarantees about its implementation.
669
+ """
670
+
671
+ __slots__ = "parent_id", "cancel_scope", "__weakref__"
672
+
673
+ def __init__(self, parent_id: int | None, cancel_scope: CancelScope | None):
674
+ self.parent_id = parent_id
675
+ self.cancel_scope = cancel_scope
676
+
677
+
678
+ _task_states: WeakKeyDictionary[asyncio.Task, TaskState] = WeakKeyDictionary()
679
+
680
+
681
+ #
682
+ # Task groups
683
+ #
684
+
685
+
686
+ class _AsyncioTaskStatus(abc.TaskStatus):
687
+ def __init__(self, future: asyncio.Future, parent_id: int):
688
+ self._future = future
689
+ self._parent_id = parent_id
690
+
691
+ def started(self, value: T_contra | None = None) -> None:
692
+ try:
693
+ self._future.set_result(value)
694
+ except asyncio.InvalidStateError:
695
+ if not self._future.cancelled():
696
+ raise RuntimeError(
697
+ "called 'started' twice on the same task status"
698
+ ) from None
699
+
700
+ task = cast(asyncio.Task, current_task())
701
+ _task_states[task].parent_id = self._parent_id
702
+
703
+
704
+ if sys.version_info >= (3, 12):
705
+ _eager_task_factory_code: CodeType | None = asyncio.eager_task_factory.__code__
706
+ else:
707
+ _eager_task_factory_code = None
708
+
709
+
710
+ class TaskGroup(abc.TaskGroup):
711
+ def __init__(self) -> None:
712
+ self.cancel_scope: CancelScope = CancelScope()
713
+ self._active = False
714
+ self._exceptions: list[BaseException] = []
715
+ self._tasks: set[asyncio.Task] = set()
716
+ self._on_completed_fut: asyncio.Future[None] | None = None
717
+
718
+ async def __aenter__(self) -> TaskGroup:
719
+ self.cancel_scope.__enter__()
720
+ self._active = True
721
+ return self
722
+
723
+ async def __aexit__(
724
+ self,
725
+ exc_type: type[BaseException] | None,
726
+ exc_val: BaseException | None,
727
+ exc_tb: TracebackType | None,
728
+ ) -> bool | None:
729
+ try:
730
+ if exc_val is not None:
731
+ self.cancel_scope.cancel()
732
+ if not isinstance(exc_val, CancelledError):
733
+ self._exceptions.append(exc_val)
734
+
735
+ loop = get_running_loop()
736
+ try:
737
+ if self._tasks:
738
+ with CancelScope() as wait_scope:
739
+ while self._tasks:
740
+ self._on_completed_fut = loop.create_future()
741
+
742
+ try:
743
+ await self._on_completed_fut
744
+ except CancelledError as exc:
745
+ # Shield the scope against further cancellation attempts,
746
+ # as they're not productive (#695)
747
+ wait_scope.shield = True
748
+ self.cancel_scope.cancel()
749
+
750
+ # Set exc_val from the cancellation exception if it was
751
+ # previously unset. However, we should not replace a native
752
+ # cancellation exception with one raise by a cancel scope.
753
+ if exc_val is None or (
754
+ isinstance(exc_val, CancelledError)
755
+ and not is_anyio_cancellation(exc)
756
+ ):
757
+ exc_val = exc
758
+
759
+ self._on_completed_fut = None
760
+ else:
761
+ # If there are no child tasks to wait on, run at least one checkpoint
762
+ # anyway
763
+ await AsyncIOBackend.cancel_shielded_checkpoint()
764
+
765
+ self._active = False
766
+ if self._exceptions:
767
+ raise BaseExceptionGroup(
768
+ "unhandled errors in a TaskGroup", self._exceptions
769
+ )
770
+ elif exc_val:
771
+ raise exc_val
772
+ except BaseException as exc:
773
+ if self.cancel_scope.__exit__(type(exc), exc, exc.__traceback__):
774
+ return True
775
+
776
+ raise
777
+
778
+ return self.cancel_scope.__exit__(exc_type, exc_val, exc_tb)
779
+ finally:
780
+ del exc_val, exc_tb, self._exceptions
781
+
782
+ def _spawn(
783
+ self,
784
+ func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
785
+ args: tuple[Unpack[PosArgsT]],
786
+ name: object,
787
+ task_status_future: asyncio.Future | None = None,
788
+ ) -> asyncio.Task:
789
+ def task_done(_task: asyncio.Task) -> None:
790
+ task_state = _task_states[_task]
791
+ assert task_state.cancel_scope is not None
792
+ assert _task in task_state.cancel_scope._tasks
793
+ task_state.cancel_scope._tasks.remove(_task)
794
+ self._tasks.remove(task)
795
+ del _task_states[_task]
796
+
797
+ if self._on_completed_fut is not None and not self._tasks:
798
+ try:
799
+ self._on_completed_fut.set_result(None)
800
+ except asyncio.InvalidStateError:
801
+ pass
802
+
803
+ try:
804
+ exc = _task.exception()
805
+ except CancelledError as e:
806
+ while isinstance(e.__context__, CancelledError):
807
+ e = e.__context__
808
+
809
+ exc = e
810
+
811
+ if exc is not None:
812
+ # The future can only be in the cancelled state if the host task was
813
+ # cancelled, so return immediately instead of adding one more
814
+ # CancelledError to the exceptions list
815
+ if task_status_future is not None and task_status_future.cancelled():
816
+ return
817
+
818
+ if task_status_future is None or task_status_future.done():
819
+ if not isinstance(exc, CancelledError):
820
+ self._exceptions.append(exc)
821
+
822
+ if not self.cancel_scope._effectively_cancelled:
823
+ self.cancel_scope.cancel()
824
+ else:
825
+ task_status_future.set_exception(exc)
826
+ elif task_status_future is not None and not task_status_future.done():
827
+ task_status_future.set_exception(
828
+ RuntimeError("Child exited without calling task_status.started()")
829
+ )
830
+
831
+ if not self._active:
832
+ raise RuntimeError(
833
+ "This task group is not active; no new tasks can be started."
834
+ )
835
+
836
+ kwargs = {}
837
+ if task_status_future:
838
+ parent_id = id(current_task())
839
+ kwargs["task_status"] = _AsyncioTaskStatus(
840
+ task_status_future, id(self.cancel_scope._host_task)
841
+ )
842
+ else:
843
+ parent_id = id(self.cancel_scope._host_task)
844
+
845
+ coro = func(*args, **kwargs)
846
+ if not iscoroutine(coro):
847
+ prefix = f"{func.__module__}." if hasattr(func, "__module__") else ""
848
+ raise TypeError(
849
+ f"Expected {prefix}{func.__qualname__}() to return a coroutine, but "
850
+ f"the return value ({coro!r}) is not a coroutine object"
851
+ )
852
+
853
+ name = get_callable_name(func) if name is None else str(name)
854
+ loop = asyncio.get_running_loop()
855
+ if (
856
+ (factory := loop.get_task_factory())
857
+ and getattr(factory, "__code__", None) is _eager_task_factory_code
858
+ and (closure := getattr(factory, "__closure__", None))
859
+ ):
860
+ custom_task_constructor = closure[0].cell_contents
861
+ task = custom_task_constructor(coro, loop=loop, name=name)
862
+ else:
863
+ task = create_task(coro, name=name)
864
+
865
+ # Make the spawned task inherit the task group's cancel scope
866
+ _task_states[task] = TaskState(
867
+ parent_id=parent_id, cancel_scope=self.cancel_scope
868
+ )
869
+ self.cancel_scope._tasks.add(task)
870
+ self._tasks.add(task)
871
+ task.add_done_callback(task_done)
872
+ return task
873
+
874
+ def start_soon(
875
+ self,
876
+ func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
877
+ *args: Unpack[PosArgsT],
878
+ name: object = None,
879
+ ) -> None:
880
+ self._spawn(func, args, name)
881
+
882
+ async def start(
883
+ self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
884
+ ) -> Any:
885
+ future: asyncio.Future = asyncio.Future()
886
+ task = self._spawn(func, args, name, future)
887
+
888
+ # If the task raises an exception after sending a start value without a switch
889
+ # point between, the task group is cancelled and this method never proceeds to
890
+ # process the completed future. That's why we have to have a shielded cancel
891
+ # scope here.
892
+ try:
893
+ return await future
894
+ except CancelledError:
895
+ # Cancel the task and wait for it to exit before returning
896
+ task.cancel()
897
+ with CancelScope(shield=True), suppress(CancelledError):
898
+ await task
899
+
900
+ raise
901
+
902
+
903
+ #
904
+ # Threads
905
+ #
906
+
907
+ _Retval_Queue_Type = tuple[Optional[T_Retval], Optional[BaseException]]
908
+
909
+
910
+ class WorkerThread(Thread):
911
+ MAX_IDLE_TIME = 10 # seconds
912
+
913
+ def __init__(
914
+ self,
915
+ root_task: asyncio.Task,
916
+ workers: set[WorkerThread],
917
+ idle_workers: deque[WorkerThread],
918
+ ):
919
+ super().__init__(name="AnyIO worker thread")
920
+ self.root_task = root_task
921
+ self.workers = workers
922
+ self.idle_workers = idle_workers
923
+ self.loop = root_task._loop
924
+ self.queue: Queue[
925
+ tuple[Context, Callable, tuple, asyncio.Future, CancelScope] | None
926
+ ] = Queue(2)
927
+ self.idle_since = AsyncIOBackend.current_time()
928
+ self.stopping = False
929
+
930
+ def _report_result(
931
+ self, future: asyncio.Future, result: Any, exc: BaseException | None
932
+ ) -> None:
933
+ self.idle_since = AsyncIOBackend.current_time()
934
+ if not self.stopping:
935
+ self.idle_workers.append(self)
936
+
937
+ if not future.cancelled():
938
+ if exc is not None:
939
+ if isinstance(exc, StopIteration):
940
+ new_exc = RuntimeError("coroutine raised StopIteration")
941
+ new_exc.__cause__ = exc
942
+ exc = new_exc
943
+
944
+ future.set_exception(exc)
945
+ else:
946
+ future.set_result(result)
947
+
948
+ def run(self) -> None:
949
+ with claim_worker_thread(AsyncIOBackend, self.loop):
950
+ while True:
951
+ item = self.queue.get()
952
+ if item is None:
953
+ # Shutdown command received
954
+ return
955
+
956
+ context, func, args, future, cancel_scope = item
957
+ if not future.cancelled():
958
+ result = None
959
+ exception: BaseException | None = None
960
+ threadlocals.current_cancel_scope = cancel_scope
961
+ try:
962
+ result = context.run(func, *args)
963
+ except BaseException as exc:
964
+ exception = exc
965
+ finally:
966
+ del threadlocals.current_cancel_scope
967
+
968
+ if not self.loop.is_closed():
969
+ self.loop.call_soon_threadsafe(
970
+ self._report_result, future, result, exception
971
+ )
972
+
973
+ self.queue.task_done()
974
+
975
+ def stop(self, f: asyncio.Task | None = None) -> None:
976
+ self.stopping = True
977
+ self.queue.put_nowait(None)
978
+ self.workers.discard(self)
979
+ try:
980
+ self.idle_workers.remove(self)
981
+ except ValueError:
982
+ pass
983
+
984
+
985
+ _threadpool_idle_workers: RunVar[deque[WorkerThread]] = RunVar(
986
+ "_threadpool_idle_workers"
987
+ )
988
+ _threadpool_workers: RunVar[set[WorkerThread]] = RunVar("_threadpool_workers")
989
+
990
+
991
+ class BlockingPortal(abc.BlockingPortal):
992
+ def __new__(cls) -> BlockingPortal:
993
+ return object.__new__(cls)
994
+
995
+ def __init__(self) -> None:
996
+ super().__init__()
997
+ self._loop = get_running_loop()
998
+
999
+ def _spawn_task_from_thread(
1000
+ self,
1001
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
1002
+ args: tuple[Unpack[PosArgsT]],
1003
+ kwargs: dict[str, Any],
1004
+ name: object,
1005
+ future: Future[T_Retval],
1006
+ ) -> None:
1007
+ AsyncIOBackend.run_sync_from_thread(
1008
+ partial(self._task_group.start_soon, name=name),
1009
+ (self._call_func, func, args, kwargs, future),
1010
+ self._loop,
1011
+ )
1012
+
1013
+
1014
+ #
1015
+ # Subprocesses
1016
+ #
1017
+
1018
+
1019
+ @dataclass(eq=False)
1020
+ class StreamReaderWrapper(abc.ByteReceiveStream):
1021
+ _stream: asyncio.StreamReader
1022
+
1023
+ async def receive(self, max_bytes: int = 65536) -> bytes:
1024
+ data = await self._stream.read(max_bytes)
1025
+ if data:
1026
+ return data
1027
+ else:
1028
+ raise EndOfStream
1029
+
1030
+ async def aclose(self) -> None:
1031
+ self._stream.set_exception(ClosedResourceError())
1032
+ await AsyncIOBackend.checkpoint()
1033
+
1034
+
1035
+ @dataclass(eq=False)
1036
+ class StreamWriterWrapper(abc.ByteSendStream):
1037
+ _stream: asyncio.StreamWriter
1038
+
1039
+ async def send(self, item: bytes) -> None:
1040
+ self._stream.write(item)
1041
+ await self._stream.drain()
1042
+
1043
+ async def aclose(self) -> None:
1044
+ self._stream.close()
1045
+ await AsyncIOBackend.checkpoint()
1046
+
1047
+
1048
+ @dataclass(eq=False)
1049
+ class Process(abc.Process):
1050
+ _process: asyncio.subprocess.Process
1051
+ _stdin: StreamWriterWrapper | None
1052
+ _stdout: StreamReaderWrapper | None
1053
+ _stderr: StreamReaderWrapper | None
1054
+
1055
+ async def aclose(self) -> None:
1056
+ with CancelScope(shield=True) as scope:
1057
+ if self._stdin:
1058
+ await self._stdin.aclose()
1059
+ if self._stdout:
1060
+ await self._stdout.aclose()
1061
+ if self._stderr:
1062
+ await self._stderr.aclose()
1063
+
1064
+ scope.shield = False
1065
+ try:
1066
+ await self.wait()
1067
+ except BaseException:
1068
+ scope.shield = True
1069
+ self.kill()
1070
+ await self.wait()
1071
+ raise
1072
+
1073
+ async def wait(self) -> int:
1074
+ return await self._process.wait()
1075
+
1076
+ def terminate(self) -> None:
1077
+ self._process.terminate()
1078
+
1079
+ def kill(self) -> None:
1080
+ self._process.kill()
1081
+
1082
+ def send_signal(self, signal: int) -> None:
1083
+ self._process.send_signal(signal)
1084
+
1085
+ @property
1086
+ def pid(self) -> int:
1087
+ return self._process.pid
1088
+
1089
+ @property
1090
+ def returncode(self) -> int | None:
1091
+ return self._process.returncode
1092
+
1093
+ @property
1094
+ def stdin(self) -> abc.ByteSendStream | None:
1095
+ return self._stdin
1096
+
1097
+ @property
1098
+ def stdout(self) -> abc.ByteReceiveStream | None:
1099
+ return self._stdout
1100
+
1101
+ @property
1102
+ def stderr(self) -> abc.ByteReceiveStream | None:
1103
+ return self._stderr
1104
+
1105
+
1106
+ def _forcibly_shutdown_process_pool_on_exit(
1107
+ workers: set[Process], _task: object
1108
+ ) -> None:
1109
+ """
1110
+ Forcibly shuts down worker processes belonging to this event loop."""
1111
+ child_watcher: asyncio.AbstractChildWatcher | None = None
1112
+ if sys.version_info < (3, 12):
1113
+ try:
1114
+ child_watcher = asyncio.get_event_loop_policy().get_child_watcher()
1115
+ except NotImplementedError:
1116
+ pass
1117
+
1118
+ # Close as much as possible (w/o async/await) to avoid warnings
1119
+ for process in workers:
1120
+ if process.returncode is None:
1121
+ continue
1122
+
1123
+ process._stdin._stream._transport.close() # type: ignore[union-attr]
1124
+ process._stdout._stream._transport.close() # type: ignore[union-attr]
1125
+ process._stderr._stream._transport.close() # type: ignore[union-attr]
1126
+ process.kill()
1127
+ if child_watcher:
1128
+ child_watcher.remove_child_handler(process.pid)
1129
+
1130
+
1131
+ async def _shutdown_process_pool_on_exit(workers: set[abc.Process]) -> None:
1132
+ """
1133
+ Shuts down worker processes belonging to this event loop.
1134
+
1135
+ NOTE: this only works when the event loop was started using asyncio.run() or
1136
+ anyio.run().
1137
+
1138
+ """
1139
+ process: abc.Process
1140
+ try:
1141
+ await sleep(math.inf)
1142
+ except asyncio.CancelledError:
1143
+ for process in workers:
1144
+ if process.returncode is None:
1145
+ process.kill()
1146
+
1147
+ for process in workers:
1148
+ await process.aclose()
1149
+
1150
+
1151
+ #
1152
+ # Sockets and networking
1153
+ #
1154
+
1155
+
1156
+ class StreamProtocol(asyncio.Protocol):
1157
+ read_queue: deque[bytes]
1158
+ read_event: asyncio.Event
1159
+ write_event: asyncio.Event
1160
+ exception: Exception | None = None
1161
+ is_at_eof: bool = False
1162
+
1163
+ def connection_made(self, transport: asyncio.BaseTransport) -> None:
1164
+ self.read_queue = deque()
1165
+ self.read_event = asyncio.Event()
1166
+ self.write_event = asyncio.Event()
1167
+ self.write_event.set()
1168
+ cast(asyncio.Transport, transport).set_write_buffer_limits(0)
1169
+
1170
+ def connection_lost(self, exc: Exception | None) -> None:
1171
+ if exc:
1172
+ self.exception = BrokenResourceError()
1173
+ self.exception.__cause__ = exc
1174
+
1175
+ self.read_event.set()
1176
+ self.write_event.set()
1177
+
1178
+ def data_received(self, data: bytes) -> None:
1179
+ # ProactorEventloop sometimes sends bytearray instead of bytes
1180
+ self.read_queue.append(bytes(data))
1181
+ self.read_event.set()
1182
+
1183
+ def eof_received(self) -> bool | None:
1184
+ self.is_at_eof = True
1185
+ self.read_event.set()
1186
+ return True
1187
+
1188
+ def pause_writing(self) -> None:
1189
+ self.write_event = asyncio.Event()
1190
+
1191
+ def resume_writing(self) -> None:
1192
+ self.write_event.set()
1193
+
1194
+
1195
+ class DatagramProtocol(asyncio.DatagramProtocol):
1196
+ read_queue: deque[tuple[bytes, IPSockAddrType]]
1197
+ read_event: asyncio.Event
1198
+ write_event: asyncio.Event
1199
+ exception: Exception | None = None
1200
+
1201
+ def connection_made(self, transport: asyncio.BaseTransport) -> None:
1202
+ self.read_queue = deque(maxlen=100) # arbitrary value
1203
+ self.read_event = asyncio.Event()
1204
+ self.write_event = asyncio.Event()
1205
+ self.write_event.set()
1206
+
1207
+ def connection_lost(self, exc: Exception | None) -> None:
1208
+ self.read_event.set()
1209
+ self.write_event.set()
1210
+
1211
+ def datagram_received(self, data: bytes, addr: IPSockAddrType) -> None:
1212
+ addr = convert_ipv6_sockaddr(addr)
1213
+ self.read_queue.append((data, addr))
1214
+ self.read_event.set()
1215
+
1216
+ def error_received(self, exc: Exception) -> None:
1217
+ self.exception = exc
1218
+
1219
+ def pause_writing(self) -> None:
1220
+ self.write_event.clear()
1221
+
1222
+ def resume_writing(self) -> None:
1223
+ self.write_event.set()
1224
+
1225
+
1226
+ class SocketStream(abc.SocketStream):
1227
+ def __init__(self, transport: asyncio.Transport, protocol: StreamProtocol):
1228
+ self._transport = transport
1229
+ self._protocol = protocol
1230
+ self._receive_guard = ResourceGuard("reading from")
1231
+ self._send_guard = ResourceGuard("writing to")
1232
+ self._closed = False
1233
+
1234
+ @property
1235
+ def _raw_socket(self) -> socket.socket:
1236
+ return self._transport.get_extra_info("socket")
1237
+
1238
+ async def receive(self, max_bytes: int = 65536) -> bytes:
1239
+ with self._receive_guard:
1240
+ if (
1241
+ not self._protocol.read_event.is_set()
1242
+ and not self._transport.is_closing()
1243
+ and not self._protocol.is_at_eof
1244
+ ):
1245
+ self._transport.resume_reading()
1246
+ await self._protocol.read_event.wait()
1247
+ self._transport.pause_reading()
1248
+ else:
1249
+ await AsyncIOBackend.checkpoint()
1250
+
1251
+ try:
1252
+ chunk = self._protocol.read_queue.popleft()
1253
+ except IndexError:
1254
+ if self._closed:
1255
+ raise ClosedResourceError from None
1256
+ elif self._protocol.exception:
1257
+ raise self._protocol.exception from None
1258
+ else:
1259
+ raise EndOfStream from None
1260
+
1261
+ if len(chunk) > max_bytes:
1262
+ # Split the oversized chunk
1263
+ chunk, leftover = chunk[:max_bytes], chunk[max_bytes:]
1264
+ self._protocol.read_queue.appendleft(leftover)
1265
+
1266
+ # If the read queue is empty, clear the flag so that the next call will
1267
+ # block until data is available
1268
+ if not self._protocol.read_queue:
1269
+ self._protocol.read_event.clear()
1270
+
1271
+ return chunk
1272
+
1273
+ async def send(self, item: bytes) -> None:
1274
+ with self._send_guard:
1275
+ await AsyncIOBackend.checkpoint()
1276
+
1277
+ if self._closed:
1278
+ raise ClosedResourceError
1279
+ elif self._protocol.exception is not None:
1280
+ raise self._protocol.exception
1281
+
1282
+ try:
1283
+ self._transport.write(item)
1284
+ except RuntimeError as exc:
1285
+ if self._transport.is_closing():
1286
+ raise BrokenResourceError from exc
1287
+ else:
1288
+ raise
1289
+
1290
+ await self._protocol.write_event.wait()
1291
+
1292
+ async def send_eof(self) -> None:
1293
+ try:
1294
+ self._transport.write_eof()
1295
+ except OSError:
1296
+ pass
1297
+
1298
+ async def aclose(self) -> None:
1299
+ if not self._transport.is_closing():
1300
+ self._closed = True
1301
+ try:
1302
+ self._transport.write_eof()
1303
+ except OSError:
1304
+ pass
1305
+
1306
+ self._transport.close()
1307
+ await sleep(0)
1308
+ self._transport.abort()
1309
+
1310
+
1311
+ class _RawSocketMixin:
1312
+ _receive_future: asyncio.Future | None = None
1313
+ _send_future: asyncio.Future | None = None
1314
+ _closing = False
1315
+
1316
+ def __init__(self, raw_socket: socket.socket):
1317
+ self.__raw_socket = raw_socket
1318
+ self._receive_guard = ResourceGuard("reading from")
1319
+ self._send_guard = ResourceGuard("writing to")
1320
+
1321
+ @property
1322
+ def _raw_socket(self) -> socket.socket:
1323
+ return self.__raw_socket
1324
+
1325
+ def _wait_until_readable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future:
1326
+ def callback(f: object) -> None:
1327
+ del self._receive_future
1328
+ loop.remove_reader(self.__raw_socket)
1329
+
1330
+ f = self._receive_future = asyncio.Future()
1331
+ loop.add_reader(self.__raw_socket, f.set_result, None)
1332
+ f.add_done_callback(callback)
1333
+ return f
1334
+
1335
+ def _wait_until_writable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future:
1336
+ def callback(f: object) -> None:
1337
+ del self._send_future
1338
+ loop.remove_writer(self.__raw_socket)
1339
+
1340
+ f = self._send_future = asyncio.Future()
1341
+ loop.add_writer(self.__raw_socket, f.set_result, None)
1342
+ f.add_done_callback(callback)
1343
+ return f
1344
+
1345
+ async def aclose(self) -> None:
1346
+ if not self._closing:
1347
+ self._closing = True
1348
+ if self.__raw_socket.fileno() != -1:
1349
+ self.__raw_socket.close()
1350
+
1351
+ if self._receive_future:
1352
+ self._receive_future.set_result(None)
1353
+ if self._send_future:
1354
+ self._send_future.set_result(None)
1355
+
1356
+
1357
+ class UNIXSocketStream(_RawSocketMixin, abc.UNIXSocketStream):
1358
+ async def send_eof(self) -> None:
1359
+ with self._send_guard:
1360
+ self._raw_socket.shutdown(socket.SHUT_WR)
1361
+
1362
+ async def receive(self, max_bytes: int = 65536) -> bytes:
1363
+ loop = get_running_loop()
1364
+ await AsyncIOBackend.checkpoint()
1365
+ with self._receive_guard:
1366
+ while True:
1367
+ try:
1368
+ data = self._raw_socket.recv(max_bytes)
1369
+ except BlockingIOError:
1370
+ await self._wait_until_readable(loop)
1371
+ except OSError as exc:
1372
+ if self._closing:
1373
+ raise ClosedResourceError from None
1374
+ else:
1375
+ raise BrokenResourceError from exc
1376
+ else:
1377
+ if not data:
1378
+ raise EndOfStream
1379
+
1380
+ return data
1381
+
1382
+ async def send(self, item: bytes) -> None:
1383
+ loop = get_running_loop()
1384
+ await AsyncIOBackend.checkpoint()
1385
+ with self._send_guard:
1386
+ view = memoryview(item)
1387
+ while view:
1388
+ try:
1389
+ bytes_sent = self._raw_socket.send(view)
1390
+ except BlockingIOError:
1391
+ await self._wait_until_writable(loop)
1392
+ except OSError as exc:
1393
+ if self._closing:
1394
+ raise ClosedResourceError from None
1395
+ else:
1396
+ raise BrokenResourceError from exc
1397
+ else:
1398
+ view = view[bytes_sent:]
1399
+
1400
+ async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
1401
+ if not isinstance(msglen, int) or msglen < 0:
1402
+ raise ValueError("msglen must be a non-negative integer")
1403
+ if not isinstance(maxfds, int) or maxfds < 1:
1404
+ raise ValueError("maxfds must be a positive integer")
1405
+
1406
+ loop = get_running_loop()
1407
+ fds = array.array("i")
1408
+ await AsyncIOBackend.checkpoint()
1409
+ with self._receive_guard:
1410
+ while True:
1411
+ try:
1412
+ message, ancdata, flags, addr = self._raw_socket.recvmsg(
1413
+ msglen, socket.CMSG_LEN(maxfds * fds.itemsize)
1414
+ )
1415
+ except BlockingIOError:
1416
+ await self._wait_until_readable(loop)
1417
+ except OSError as exc:
1418
+ if self._closing:
1419
+ raise ClosedResourceError from None
1420
+ else:
1421
+ raise BrokenResourceError from exc
1422
+ else:
1423
+ if not message and not ancdata:
1424
+ raise EndOfStream
1425
+
1426
+ break
1427
+
1428
+ for cmsg_level, cmsg_type, cmsg_data in ancdata:
1429
+ if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS:
1430
+ raise RuntimeError(
1431
+ f"Received unexpected ancillary data; message = {message!r}, "
1432
+ f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}"
1433
+ )
1434
+
1435
+ fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
1436
+
1437
+ return message, list(fds)
1438
+
1439
+ async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
1440
+ if not message:
1441
+ raise ValueError("message must not be empty")
1442
+ if not fds:
1443
+ raise ValueError("fds must not be empty")
1444
+
1445
+ loop = get_running_loop()
1446
+ filenos: list[int] = []
1447
+ for fd in fds:
1448
+ if isinstance(fd, int):
1449
+ filenos.append(fd)
1450
+ elif isinstance(fd, IOBase):
1451
+ filenos.append(fd.fileno())
1452
+
1453
+ fdarray = array.array("i", filenos)
1454
+ await AsyncIOBackend.checkpoint()
1455
+ with self._send_guard:
1456
+ while True:
1457
+ try:
1458
+ # The ignore can be removed after mypy picks up
1459
+ # https://github.com/python/typeshed/pull/5545
1460
+ self._raw_socket.sendmsg(
1461
+ [message], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fdarray)]
1462
+ )
1463
+ break
1464
+ except BlockingIOError:
1465
+ await self._wait_until_writable(loop)
1466
+ except OSError as exc:
1467
+ if self._closing:
1468
+ raise ClosedResourceError from None
1469
+ else:
1470
+ raise BrokenResourceError from exc
1471
+
1472
+
1473
+ class TCPSocketListener(abc.SocketListener):
1474
+ _accept_scope: CancelScope | None = None
1475
+ _closed = False
1476
+
1477
+ def __init__(self, raw_socket: socket.socket):
1478
+ self.__raw_socket = raw_socket
1479
+ self._loop = cast(asyncio.BaseEventLoop, get_running_loop())
1480
+ self._accept_guard = ResourceGuard("accepting connections from")
1481
+
1482
+ @property
1483
+ def _raw_socket(self) -> socket.socket:
1484
+ return self.__raw_socket
1485
+
1486
+ async def accept(self) -> abc.SocketStream:
1487
+ if self._closed:
1488
+ raise ClosedResourceError
1489
+
1490
+ with self._accept_guard:
1491
+ await AsyncIOBackend.checkpoint()
1492
+ with CancelScope() as self._accept_scope:
1493
+ try:
1494
+ client_sock, _addr = await self._loop.sock_accept(self._raw_socket)
1495
+ except asyncio.CancelledError:
1496
+ # Workaround for https://bugs.python.org/issue41317
1497
+ try:
1498
+ self._loop.remove_reader(self._raw_socket)
1499
+ except (ValueError, NotImplementedError):
1500
+ pass
1501
+
1502
+ if self._closed:
1503
+ raise ClosedResourceError from None
1504
+
1505
+ raise
1506
+ finally:
1507
+ self._accept_scope = None
1508
+
1509
+ client_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
1510
+ transport, protocol = await self._loop.connect_accepted_socket(
1511
+ StreamProtocol, client_sock
1512
+ )
1513
+ return SocketStream(transport, protocol)
1514
+
1515
+ async def aclose(self) -> None:
1516
+ if self._closed:
1517
+ return
1518
+
1519
+ self._closed = True
1520
+ if self._accept_scope:
1521
+ # Workaround for https://bugs.python.org/issue41317
1522
+ try:
1523
+ self._loop.remove_reader(self._raw_socket)
1524
+ except (ValueError, NotImplementedError):
1525
+ pass
1526
+
1527
+ self._accept_scope.cancel()
1528
+ await sleep(0)
1529
+
1530
+ self._raw_socket.close()
1531
+
1532
+
1533
+ class UNIXSocketListener(abc.SocketListener):
1534
+ def __init__(self, raw_socket: socket.socket):
1535
+ self.__raw_socket = raw_socket
1536
+ self._loop = get_running_loop()
1537
+ self._accept_guard = ResourceGuard("accepting connections from")
1538
+ self._closed = False
1539
+
1540
+ async def accept(self) -> abc.SocketStream:
1541
+ await AsyncIOBackend.checkpoint()
1542
+ with self._accept_guard:
1543
+ while True:
1544
+ try:
1545
+ client_sock, _ = self.__raw_socket.accept()
1546
+ client_sock.setblocking(False)
1547
+ return UNIXSocketStream(client_sock)
1548
+ except BlockingIOError:
1549
+ f: asyncio.Future = asyncio.Future()
1550
+ self._loop.add_reader(self.__raw_socket, f.set_result, None)
1551
+ f.add_done_callback(
1552
+ lambda _: self._loop.remove_reader(self.__raw_socket)
1553
+ )
1554
+ await f
1555
+ except OSError as exc:
1556
+ if self._closed:
1557
+ raise ClosedResourceError from None
1558
+ else:
1559
+ raise BrokenResourceError from exc
1560
+
1561
+ async def aclose(self) -> None:
1562
+ self._closed = True
1563
+ self.__raw_socket.close()
1564
+
1565
+ @property
1566
+ def _raw_socket(self) -> socket.socket:
1567
+ return self.__raw_socket
1568
+
1569
+
1570
+ class UDPSocket(abc.UDPSocket):
1571
+ def __init__(
1572
+ self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol
1573
+ ):
1574
+ self._transport = transport
1575
+ self._protocol = protocol
1576
+ self._receive_guard = ResourceGuard("reading from")
1577
+ self._send_guard = ResourceGuard("writing to")
1578
+ self._closed = False
1579
+
1580
+ @property
1581
+ def _raw_socket(self) -> socket.socket:
1582
+ return self._transport.get_extra_info("socket")
1583
+
1584
+ async def aclose(self) -> None:
1585
+ if not self._transport.is_closing():
1586
+ self._closed = True
1587
+ self._transport.close()
1588
+
1589
+ async def receive(self) -> tuple[bytes, IPSockAddrType]:
1590
+ with self._receive_guard:
1591
+ await AsyncIOBackend.checkpoint()
1592
+
1593
+ # If the buffer is empty, ask for more data
1594
+ if not self._protocol.read_queue and not self._transport.is_closing():
1595
+ self._protocol.read_event.clear()
1596
+ await self._protocol.read_event.wait()
1597
+
1598
+ try:
1599
+ return self._protocol.read_queue.popleft()
1600
+ except IndexError:
1601
+ if self._closed:
1602
+ raise ClosedResourceError from None
1603
+ else:
1604
+ raise BrokenResourceError from None
1605
+
1606
+ async def send(self, item: UDPPacketType) -> None:
1607
+ with self._send_guard:
1608
+ await AsyncIOBackend.checkpoint()
1609
+ await self._protocol.write_event.wait()
1610
+ if self._closed:
1611
+ raise ClosedResourceError
1612
+ elif self._transport.is_closing():
1613
+ raise BrokenResourceError
1614
+ else:
1615
+ self._transport.sendto(*item)
1616
+
1617
+
1618
+ class ConnectedUDPSocket(abc.ConnectedUDPSocket):
1619
+ def __init__(
1620
+ self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol
1621
+ ):
1622
+ self._transport = transport
1623
+ self._protocol = protocol
1624
+ self._receive_guard = ResourceGuard("reading from")
1625
+ self._send_guard = ResourceGuard("writing to")
1626
+ self._closed = False
1627
+
1628
+ @property
1629
+ def _raw_socket(self) -> socket.socket:
1630
+ return self._transport.get_extra_info("socket")
1631
+
1632
+ async def aclose(self) -> None:
1633
+ if not self._transport.is_closing():
1634
+ self._closed = True
1635
+ self._transport.close()
1636
+
1637
+ async def receive(self) -> bytes:
1638
+ with self._receive_guard:
1639
+ await AsyncIOBackend.checkpoint()
1640
+
1641
+ # If the buffer is empty, ask for more data
1642
+ if not self._protocol.read_queue and not self._transport.is_closing():
1643
+ self._protocol.read_event.clear()
1644
+ await self._protocol.read_event.wait()
1645
+
1646
+ try:
1647
+ packet = self._protocol.read_queue.popleft()
1648
+ except IndexError:
1649
+ if self._closed:
1650
+ raise ClosedResourceError from None
1651
+ else:
1652
+ raise BrokenResourceError from None
1653
+
1654
+ return packet[0]
1655
+
1656
+ async def send(self, item: bytes) -> None:
1657
+ with self._send_guard:
1658
+ await AsyncIOBackend.checkpoint()
1659
+ await self._protocol.write_event.wait()
1660
+ if self._closed:
1661
+ raise ClosedResourceError
1662
+ elif self._transport.is_closing():
1663
+ raise BrokenResourceError
1664
+ else:
1665
+ self._transport.sendto(item)
1666
+
1667
+
1668
+ class UNIXDatagramSocket(_RawSocketMixin, abc.UNIXDatagramSocket):
1669
+ async def receive(self) -> UNIXDatagramPacketType:
1670
+ loop = get_running_loop()
1671
+ await AsyncIOBackend.checkpoint()
1672
+ with self._receive_guard:
1673
+ while True:
1674
+ try:
1675
+ data = self._raw_socket.recvfrom(65536)
1676
+ except BlockingIOError:
1677
+ await self._wait_until_readable(loop)
1678
+ except OSError as exc:
1679
+ if self._closing:
1680
+ raise ClosedResourceError from None
1681
+ else:
1682
+ raise BrokenResourceError from exc
1683
+ else:
1684
+ return data
1685
+
1686
+ async def send(self, item: UNIXDatagramPacketType) -> None:
1687
+ loop = get_running_loop()
1688
+ await AsyncIOBackend.checkpoint()
1689
+ with self._send_guard:
1690
+ while True:
1691
+ try:
1692
+ self._raw_socket.sendto(*item)
1693
+ except BlockingIOError:
1694
+ await self._wait_until_writable(loop)
1695
+ except OSError as exc:
1696
+ if self._closing:
1697
+ raise ClosedResourceError from None
1698
+ else:
1699
+ raise BrokenResourceError from exc
1700
+ else:
1701
+ return
1702
+
1703
+
1704
+ class ConnectedUNIXDatagramSocket(_RawSocketMixin, abc.ConnectedUNIXDatagramSocket):
1705
+ async def receive(self) -> bytes:
1706
+ loop = get_running_loop()
1707
+ await AsyncIOBackend.checkpoint()
1708
+ with self._receive_guard:
1709
+ while True:
1710
+ try:
1711
+ data = self._raw_socket.recv(65536)
1712
+ except BlockingIOError:
1713
+ await self._wait_until_readable(loop)
1714
+ except OSError as exc:
1715
+ if self._closing:
1716
+ raise ClosedResourceError from None
1717
+ else:
1718
+ raise BrokenResourceError from exc
1719
+ else:
1720
+ return data
1721
+
1722
+ async def send(self, item: bytes) -> None:
1723
+ loop = get_running_loop()
1724
+ await AsyncIOBackend.checkpoint()
1725
+ with self._send_guard:
1726
+ while True:
1727
+ try:
1728
+ self._raw_socket.send(item)
1729
+ except BlockingIOError:
1730
+ await self._wait_until_writable(loop)
1731
+ except OSError as exc:
1732
+ if self._closing:
1733
+ raise ClosedResourceError from None
1734
+ else:
1735
+ raise BrokenResourceError from exc
1736
+ else:
1737
+ return
1738
+
1739
+
1740
+ _read_events: RunVar[dict[int, asyncio.Event]] = RunVar("read_events")
1741
+ _write_events: RunVar[dict[int, asyncio.Event]] = RunVar("write_events")
1742
+
1743
+
1744
+ #
1745
+ # Synchronization
1746
+ #
1747
+
1748
+
1749
+ class Event(BaseEvent):
1750
+ def __new__(cls) -> Event:
1751
+ return object.__new__(cls)
1752
+
1753
+ def __init__(self) -> None:
1754
+ self._event = asyncio.Event()
1755
+
1756
+ def set(self) -> None:
1757
+ self._event.set()
1758
+
1759
+ def is_set(self) -> bool:
1760
+ return self._event.is_set()
1761
+
1762
+ async def wait(self) -> None:
1763
+ if self.is_set():
1764
+ await AsyncIOBackend.checkpoint()
1765
+ else:
1766
+ await self._event.wait()
1767
+
1768
+ def statistics(self) -> EventStatistics:
1769
+ return EventStatistics(len(self._event._waiters))
1770
+
1771
+
1772
+ class Lock(BaseLock):
1773
+ def __new__(cls, *, fast_acquire: bool = False) -> Lock:
1774
+ return object.__new__(cls)
1775
+
1776
+ def __init__(self, *, fast_acquire: bool = False) -> None:
1777
+ self._fast_acquire = fast_acquire
1778
+ self._owner_task: asyncio.Task | None = None
1779
+ self._waiters: deque[tuple[asyncio.Task, asyncio.Future]] = deque()
1780
+
1781
+ async def acquire(self) -> None:
1782
+ task = cast(asyncio.Task, current_task())
1783
+ if self._owner_task is None and not self._waiters:
1784
+ await AsyncIOBackend.checkpoint_if_cancelled()
1785
+ self._owner_task = task
1786
+
1787
+ # Unless on the "fast path", yield control of the event loop so that other
1788
+ # tasks can run too
1789
+ if not self._fast_acquire:
1790
+ try:
1791
+ await AsyncIOBackend.cancel_shielded_checkpoint()
1792
+ except CancelledError:
1793
+ self.release()
1794
+ raise
1795
+
1796
+ return
1797
+
1798
+ if self._owner_task == task:
1799
+ raise RuntimeError("Attempted to acquire an already held Lock")
1800
+
1801
+ fut: asyncio.Future[None] = asyncio.Future()
1802
+ item = task, fut
1803
+ self._waiters.append(item)
1804
+ try:
1805
+ await fut
1806
+ except CancelledError:
1807
+ self._waiters.remove(item)
1808
+ if self._owner_task is task:
1809
+ self.release()
1810
+
1811
+ raise
1812
+
1813
+ self._waiters.remove(item)
1814
+
1815
+ def acquire_nowait(self) -> None:
1816
+ task = cast(asyncio.Task, current_task())
1817
+ if self._owner_task is None and not self._waiters:
1818
+ self._owner_task = task
1819
+ return
1820
+
1821
+ if self._owner_task is task:
1822
+ raise RuntimeError("Attempted to acquire an already held Lock")
1823
+
1824
+ raise WouldBlock
1825
+
1826
+ def locked(self) -> bool:
1827
+ return self._owner_task is not None
1828
+
1829
+ def release(self) -> None:
1830
+ if self._owner_task != current_task():
1831
+ raise RuntimeError("The current task is not holding this lock")
1832
+
1833
+ for task, fut in self._waiters:
1834
+ if not fut.cancelled():
1835
+ self._owner_task = task
1836
+ fut.set_result(None)
1837
+ return
1838
+
1839
+ self._owner_task = None
1840
+
1841
+ def statistics(self) -> LockStatistics:
1842
+ task_info = AsyncIOTaskInfo(self._owner_task) if self._owner_task else None
1843
+ return LockStatistics(self.locked(), task_info, len(self._waiters))
1844
+
1845
+
1846
+ class Semaphore(BaseSemaphore):
1847
+ def __new__(
1848
+ cls,
1849
+ initial_value: int,
1850
+ *,
1851
+ max_value: int | None = None,
1852
+ fast_acquire: bool = False,
1853
+ ) -> Semaphore:
1854
+ return object.__new__(cls)
1855
+
1856
+ def __init__(
1857
+ self,
1858
+ initial_value: int,
1859
+ *,
1860
+ max_value: int | None = None,
1861
+ fast_acquire: bool = False,
1862
+ ):
1863
+ super().__init__(initial_value, max_value=max_value)
1864
+ self._value = initial_value
1865
+ self._max_value = max_value
1866
+ self._fast_acquire = fast_acquire
1867
+ self._waiters: deque[asyncio.Future[None]] = deque()
1868
+
1869
+ async def acquire(self) -> None:
1870
+ if self._value > 0 and not self._waiters:
1871
+ await AsyncIOBackend.checkpoint_if_cancelled()
1872
+ self._value -= 1
1873
+
1874
+ # Unless on the "fast path", yield control of the event loop so that other
1875
+ # tasks can run too
1876
+ if not self._fast_acquire:
1877
+ try:
1878
+ await AsyncIOBackend.cancel_shielded_checkpoint()
1879
+ except CancelledError:
1880
+ self.release()
1881
+ raise
1882
+
1883
+ return
1884
+
1885
+ fut: asyncio.Future[None] = asyncio.Future()
1886
+ self._waiters.append(fut)
1887
+ try:
1888
+ await fut
1889
+ except CancelledError:
1890
+ try:
1891
+ self._waiters.remove(fut)
1892
+ except ValueError:
1893
+ self.release()
1894
+
1895
+ raise
1896
+
1897
+ def acquire_nowait(self) -> None:
1898
+ if self._value == 0:
1899
+ raise WouldBlock
1900
+
1901
+ self._value -= 1
1902
+
1903
+ def release(self) -> None:
1904
+ if self._max_value is not None and self._value == self._max_value:
1905
+ raise ValueError("semaphore released too many times")
1906
+
1907
+ for fut in self._waiters:
1908
+ if not fut.cancelled():
1909
+ fut.set_result(None)
1910
+ self._waiters.remove(fut)
1911
+ return
1912
+
1913
+ self._value += 1
1914
+
1915
+ @property
1916
+ def value(self) -> int:
1917
+ return self._value
1918
+
1919
+ @property
1920
+ def max_value(self) -> int | None:
1921
+ return self._max_value
1922
+
1923
+ def statistics(self) -> SemaphoreStatistics:
1924
+ return SemaphoreStatistics(len(self._waiters))
1925
+
1926
+
1927
+ class CapacityLimiter(BaseCapacityLimiter):
1928
+ _total_tokens: float = 0
1929
+
1930
+ def __new__(cls, total_tokens: float) -> CapacityLimiter:
1931
+ return object.__new__(cls)
1932
+
1933
+ def __init__(self, total_tokens: float):
1934
+ self._borrowers: set[Any] = set()
1935
+ self._wait_queue: OrderedDict[Any, asyncio.Event] = OrderedDict()
1936
+ self.total_tokens = total_tokens
1937
+
1938
+ async def __aenter__(self) -> None:
1939
+ await self.acquire()
1940
+
1941
+ async def __aexit__(
1942
+ self,
1943
+ exc_type: type[BaseException] | None,
1944
+ exc_val: BaseException | None,
1945
+ exc_tb: TracebackType | None,
1946
+ ) -> None:
1947
+ self.release()
1948
+
1949
+ @property
1950
+ def total_tokens(self) -> float:
1951
+ return self._total_tokens
1952
+
1953
+ @total_tokens.setter
1954
+ def total_tokens(self, value: float) -> None:
1955
+ if not isinstance(value, int) and not math.isinf(value):
1956
+ raise TypeError("total_tokens must be an int or math.inf")
1957
+ if value < 1:
1958
+ raise ValueError("total_tokens must be >= 1")
1959
+
1960
+ waiters_to_notify = max(value - self._total_tokens, 0)
1961
+ self._total_tokens = value
1962
+
1963
+ # Notify waiting tasks that they have acquired the limiter
1964
+ while self._wait_queue and waiters_to_notify:
1965
+ event = self._wait_queue.popitem(last=False)[1]
1966
+ event.set()
1967
+ waiters_to_notify -= 1
1968
+
1969
+ @property
1970
+ def borrowed_tokens(self) -> int:
1971
+ return len(self._borrowers)
1972
+
1973
+ @property
1974
+ def available_tokens(self) -> float:
1975
+ return self._total_tokens - len(self._borrowers)
1976
+
1977
+ def acquire_nowait(self) -> None:
1978
+ self.acquire_on_behalf_of_nowait(current_task())
1979
+
1980
+ def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
1981
+ if borrower in self._borrowers:
1982
+ raise RuntimeError(
1983
+ "this borrower is already holding one of this CapacityLimiter's "
1984
+ "tokens"
1985
+ )
1986
+
1987
+ if self._wait_queue or len(self._borrowers) >= self._total_tokens:
1988
+ raise WouldBlock
1989
+
1990
+ self._borrowers.add(borrower)
1991
+
1992
+ async def acquire(self) -> None:
1993
+ return await self.acquire_on_behalf_of(current_task())
1994
+
1995
+ async def acquire_on_behalf_of(self, borrower: object) -> None:
1996
+ await AsyncIOBackend.checkpoint_if_cancelled()
1997
+ try:
1998
+ self.acquire_on_behalf_of_nowait(borrower)
1999
+ except WouldBlock:
2000
+ event = asyncio.Event()
2001
+ self._wait_queue[borrower] = event
2002
+ try:
2003
+ await event.wait()
2004
+ except BaseException:
2005
+ self._wait_queue.pop(borrower, None)
2006
+ raise
2007
+
2008
+ self._borrowers.add(borrower)
2009
+ else:
2010
+ try:
2011
+ await AsyncIOBackend.cancel_shielded_checkpoint()
2012
+ except BaseException:
2013
+ self.release()
2014
+ raise
2015
+
2016
+ def release(self) -> None:
2017
+ self.release_on_behalf_of(current_task())
2018
+
2019
+ def release_on_behalf_of(self, borrower: object) -> None:
2020
+ try:
2021
+ self._borrowers.remove(borrower)
2022
+ except KeyError:
2023
+ raise RuntimeError(
2024
+ "this borrower isn't holding any of this CapacityLimiter's tokens"
2025
+ ) from None
2026
+
2027
+ # Notify the next task in line if this limiter has free capacity now
2028
+ if self._wait_queue and len(self._borrowers) < self._total_tokens:
2029
+ event = self._wait_queue.popitem(last=False)[1]
2030
+ event.set()
2031
+
2032
+ def statistics(self) -> CapacityLimiterStatistics:
2033
+ return CapacityLimiterStatistics(
2034
+ self.borrowed_tokens,
2035
+ self.total_tokens,
2036
+ tuple(self._borrowers),
2037
+ len(self._wait_queue),
2038
+ )
2039
+
2040
+
2041
+ _default_thread_limiter: RunVar[CapacityLimiter] = RunVar("_default_thread_limiter")
2042
+
2043
+
2044
+ #
2045
+ # Operating system signals
2046
+ #
2047
+
2048
+
2049
+ class _SignalReceiver:
2050
+ def __init__(self, signals: tuple[Signals, ...]):
2051
+ self._signals = signals
2052
+ self._loop = get_running_loop()
2053
+ self._signal_queue: deque[Signals] = deque()
2054
+ self._future: asyncio.Future = asyncio.Future()
2055
+ self._handled_signals: set[Signals] = set()
2056
+
2057
+ def _deliver(self, signum: Signals) -> None:
2058
+ self._signal_queue.append(signum)
2059
+ if not self._future.done():
2060
+ self._future.set_result(None)
2061
+
2062
+ def __enter__(self) -> _SignalReceiver:
2063
+ for sig in set(self._signals):
2064
+ self._loop.add_signal_handler(sig, self._deliver, sig)
2065
+ self._handled_signals.add(sig)
2066
+
2067
+ return self
2068
+
2069
+ def __exit__(
2070
+ self,
2071
+ exc_type: type[BaseException] | None,
2072
+ exc_val: BaseException | None,
2073
+ exc_tb: TracebackType | None,
2074
+ ) -> None:
2075
+ for sig in self._handled_signals:
2076
+ self._loop.remove_signal_handler(sig)
2077
+
2078
+ def __aiter__(self) -> _SignalReceiver:
2079
+ return self
2080
+
2081
+ async def __anext__(self) -> Signals:
2082
+ await AsyncIOBackend.checkpoint()
2083
+ if not self._signal_queue:
2084
+ self._future = asyncio.Future()
2085
+ await self._future
2086
+
2087
+ return self._signal_queue.popleft()
2088
+
2089
+
2090
+ #
2091
+ # Testing and debugging
2092
+ #
2093
+
2094
+
2095
+ class AsyncIOTaskInfo(TaskInfo):
2096
+ def __init__(self, task: asyncio.Task):
2097
+ task_state = _task_states.get(task)
2098
+ if task_state is None:
2099
+ parent_id = None
2100
+ else:
2101
+ parent_id = task_state.parent_id
2102
+
2103
+ coro = task.get_coro()
2104
+ assert coro is not None, "created TaskInfo from a completed Task"
2105
+ super().__init__(id(task), parent_id, task.get_name(), coro)
2106
+ self._task = weakref.ref(task)
2107
+
2108
+ def has_pending_cancellation(self) -> bool:
2109
+ if not (task := self._task()):
2110
+ # If the task isn't around anymore, it won't have a pending cancellation
2111
+ return False
2112
+
2113
+ if task._must_cancel: # type: ignore[attr-defined]
2114
+ return True
2115
+ elif (
2116
+ isinstance(task._fut_waiter, asyncio.Future) # type: ignore[attr-defined]
2117
+ and task._fut_waiter.cancelled() # type: ignore[attr-defined]
2118
+ ):
2119
+ return True
2120
+
2121
+ if task_state := _task_states.get(task):
2122
+ if cancel_scope := task_state.cancel_scope:
2123
+ return cancel_scope._effectively_cancelled
2124
+
2125
+ return False
2126
+
2127
+
2128
+ class TestRunner(abc.TestRunner):
2129
+ _send_stream: MemoryObjectSendStream[tuple[Awaitable[Any], asyncio.Future[Any]]]
2130
+
2131
+ def __init__(
2132
+ self,
2133
+ *,
2134
+ debug: bool | None = None,
2135
+ use_uvloop: bool = False,
2136
+ loop_factory: Callable[[], AbstractEventLoop] | None = None,
2137
+ ) -> None:
2138
+ if use_uvloop and loop_factory is None:
2139
+ import uvloop
2140
+
2141
+ loop_factory = uvloop.new_event_loop
2142
+
2143
+ self._runner = Runner(debug=debug, loop_factory=loop_factory)
2144
+ self._exceptions: list[BaseException] = []
2145
+ self._runner_task: asyncio.Task | None = None
2146
+
2147
+ def __enter__(self) -> TestRunner:
2148
+ self._runner.__enter__()
2149
+ self.get_loop().set_exception_handler(self._exception_handler)
2150
+ return self
2151
+
2152
+ def __exit__(
2153
+ self,
2154
+ exc_type: type[BaseException] | None,
2155
+ exc_val: BaseException | None,
2156
+ exc_tb: TracebackType | None,
2157
+ ) -> None:
2158
+ self._runner.__exit__(exc_type, exc_val, exc_tb)
2159
+
2160
+ def get_loop(self) -> AbstractEventLoop:
2161
+ return self._runner.get_loop()
2162
+
2163
+ def _exception_handler(
2164
+ self, loop: asyncio.AbstractEventLoop, context: dict[str, Any]
2165
+ ) -> None:
2166
+ if isinstance(context.get("exception"), Exception):
2167
+ self._exceptions.append(context["exception"])
2168
+ else:
2169
+ loop.default_exception_handler(context)
2170
+
2171
+ def _raise_async_exceptions(self) -> None:
2172
+ # Re-raise any exceptions raised in asynchronous callbacks
2173
+ if self._exceptions:
2174
+ exceptions, self._exceptions = self._exceptions, []
2175
+ if len(exceptions) == 1:
2176
+ raise exceptions[0]
2177
+ elif exceptions:
2178
+ raise BaseExceptionGroup(
2179
+ "Multiple exceptions occurred in asynchronous callbacks", exceptions
2180
+ )
2181
+
2182
+ async def _run_tests_and_fixtures(
2183
+ self,
2184
+ receive_stream: MemoryObjectReceiveStream[
2185
+ tuple[Awaitable[T_Retval], asyncio.Future[T_Retval]]
2186
+ ],
2187
+ ) -> None:
2188
+ from _pytest.outcomes import OutcomeException
2189
+
2190
+ with receive_stream, self._send_stream:
2191
+ async for coro, future in receive_stream:
2192
+ try:
2193
+ retval = await coro
2194
+ except CancelledError as exc:
2195
+ if not future.cancelled():
2196
+ future.cancel(*exc.args)
2197
+
2198
+ raise
2199
+ except BaseException as exc:
2200
+ if not future.cancelled():
2201
+ future.set_exception(exc)
2202
+
2203
+ if not isinstance(exc, (Exception, OutcomeException)):
2204
+ raise
2205
+ else:
2206
+ if not future.cancelled():
2207
+ future.set_result(retval)
2208
+
2209
+ async def _call_in_runner_task(
2210
+ self,
2211
+ func: Callable[P, Awaitable[T_Retval]],
2212
+ *args: P.args,
2213
+ **kwargs: P.kwargs,
2214
+ ) -> T_Retval:
2215
+ if not self._runner_task:
2216
+ self._send_stream, receive_stream = create_memory_object_stream[
2217
+ tuple[Awaitable[Any], asyncio.Future]
2218
+ ](1)
2219
+ self._runner_task = self.get_loop().create_task(
2220
+ self._run_tests_and_fixtures(receive_stream)
2221
+ )
2222
+
2223
+ coro = func(*args, **kwargs)
2224
+ future: asyncio.Future[T_Retval] = self.get_loop().create_future()
2225
+ self._send_stream.send_nowait((coro, future))
2226
+ return await future
2227
+
2228
+ def run_asyncgen_fixture(
2229
+ self,
2230
+ fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]],
2231
+ kwargs: dict[str, Any],
2232
+ ) -> Iterable[T_Retval]:
2233
+ asyncgen = fixture_func(**kwargs)
2234
+ fixturevalue: T_Retval = self.get_loop().run_until_complete(
2235
+ self._call_in_runner_task(asyncgen.asend, None)
2236
+ )
2237
+ self._raise_async_exceptions()
2238
+
2239
+ yield fixturevalue
2240
+
2241
+ try:
2242
+ self.get_loop().run_until_complete(
2243
+ self._call_in_runner_task(asyncgen.asend, None)
2244
+ )
2245
+ except StopAsyncIteration:
2246
+ self._raise_async_exceptions()
2247
+ else:
2248
+ self.get_loop().run_until_complete(asyncgen.aclose())
2249
+ raise RuntimeError("Async generator fixture did not stop")
2250
+
2251
+ def run_fixture(
2252
+ self,
2253
+ fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]],
2254
+ kwargs: dict[str, Any],
2255
+ ) -> T_Retval:
2256
+ retval = self.get_loop().run_until_complete(
2257
+ self._call_in_runner_task(fixture_func, **kwargs)
2258
+ )
2259
+ self._raise_async_exceptions()
2260
+ return retval
2261
+
2262
+ def run_test(
2263
+ self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
2264
+ ) -> None:
2265
+ try:
2266
+ self.get_loop().run_until_complete(
2267
+ self._call_in_runner_task(test_func, **kwargs)
2268
+ )
2269
+ except Exception as exc:
2270
+ self._exceptions.append(exc)
2271
+
2272
+ self._raise_async_exceptions()
2273
+
2274
+
2275
+ class AsyncIOBackend(AsyncBackend):
2276
+ @classmethod
2277
+ def run(
2278
+ cls,
2279
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
2280
+ args: tuple[Unpack[PosArgsT]],
2281
+ kwargs: dict[str, Any],
2282
+ options: dict[str, Any],
2283
+ ) -> T_Retval:
2284
+ @wraps(func)
2285
+ async def wrapper() -> T_Retval:
2286
+ task = cast(asyncio.Task, current_task())
2287
+ task.set_name(get_callable_name(func))
2288
+ _task_states[task] = TaskState(None, None)
2289
+
2290
+ try:
2291
+ return await func(*args)
2292
+ finally:
2293
+ del _task_states[task]
2294
+
2295
+ debug = options.get("debug", None)
2296
+ loop_factory = options.get("loop_factory", None)
2297
+ if loop_factory is None and options.get("use_uvloop", False):
2298
+ import uvloop
2299
+
2300
+ loop_factory = uvloop.new_event_loop
2301
+
2302
+ with Runner(debug=debug, loop_factory=loop_factory) as runner:
2303
+ return runner.run(wrapper())
2304
+
2305
+ @classmethod
2306
+ def current_token(cls) -> object:
2307
+ return get_running_loop()
2308
+
2309
+ @classmethod
2310
+ def current_time(cls) -> float:
2311
+ return get_running_loop().time()
2312
+
2313
+ @classmethod
2314
+ def cancelled_exception_class(cls) -> type[BaseException]:
2315
+ return CancelledError
2316
+
2317
+ @classmethod
2318
+ async def checkpoint(cls) -> None:
2319
+ await sleep(0)
2320
+
2321
+ @classmethod
2322
+ async def checkpoint_if_cancelled(cls) -> None:
2323
+ task = current_task()
2324
+ if task is None:
2325
+ return
2326
+
2327
+ try:
2328
+ cancel_scope = _task_states[task].cancel_scope
2329
+ except KeyError:
2330
+ return
2331
+
2332
+ while cancel_scope:
2333
+ if cancel_scope.cancel_called:
2334
+ await sleep(0)
2335
+ elif cancel_scope.shield:
2336
+ break
2337
+ else:
2338
+ cancel_scope = cancel_scope._parent_scope
2339
+
2340
+ @classmethod
2341
+ async def cancel_shielded_checkpoint(cls) -> None:
2342
+ with CancelScope(shield=True):
2343
+ await sleep(0)
2344
+
2345
+ @classmethod
2346
+ async def sleep(cls, delay: float) -> None:
2347
+ await sleep(delay)
2348
+
2349
+ @classmethod
2350
+ def create_cancel_scope(
2351
+ cls, *, deadline: float = math.inf, shield: bool = False
2352
+ ) -> CancelScope:
2353
+ return CancelScope(deadline=deadline, shield=shield)
2354
+
2355
+ @classmethod
2356
+ def current_effective_deadline(cls) -> float:
2357
+ if (task := current_task()) is None:
2358
+ return math.inf
2359
+
2360
+ try:
2361
+ cancel_scope = _task_states[task].cancel_scope
2362
+ except KeyError:
2363
+ return math.inf
2364
+
2365
+ deadline = math.inf
2366
+ while cancel_scope:
2367
+ deadline = min(deadline, cancel_scope.deadline)
2368
+ if cancel_scope._cancel_called:
2369
+ deadline = -math.inf
2370
+ break
2371
+ elif cancel_scope.shield:
2372
+ break
2373
+ else:
2374
+ cancel_scope = cancel_scope._parent_scope
2375
+
2376
+ return deadline
2377
+
2378
+ @classmethod
2379
+ def create_task_group(cls) -> abc.TaskGroup:
2380
+ return TaskGroup()
2381
+
2382
+ @classmethod
2383
+ def create_event(cls) -> abc.Event:
2384
+ return Event()
2385
+
2386
+ @classmethod
2387
+ def create_lock(cls, *, fast_acquire: bool) -> abc.Lock:
2388
+ return Lock(fast_acquire=fast_acquire)
2389
+
2390
+ @classmethod
2391
+ def create_semaphore(
2392
+ cls,
2393
+ initial_value: int,
2394
+ *,
2395
+ max_value: int | None = None,
2396
+ fast_acquire: bool = False,
2397
+ ) -> abc.Semaphore:
2398
+ return Semaphore(initial_value, max_value=max_value, fast_acquire=fast_acquire)
2399
+
2400
+ @classmethod
2401
+ def create_capacity_limiter(cls, total_tokens: float) -> abc.CapacityLimiter:
2402
+ return CapacityLimiter(total_tokens)
2403
+
2404
+ @classmethod
2405
+ async def run_sync_in_worker_thread( # type: ignore[return]
2406
+ cls,
2407
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
2408
+ args: tuple[Unpack[PosArgsT]],
2409
+ abandon_on_cancel: bool = False,
2410
+ limiter: abc.CapacityLimiter | None = None,
2411
+ ) -> T_Retval:
2412
+ await cls.checkpoint()
2413
+
2414
+ # If this is the first run in this event loop thread, set up the necessary
2415
+ # variables
2416
+ try:
2417
+ idle_workers = _threadpool_idle_workers.get()
2418
+ workers = _threadpool_workers.get()
2419
+ except LookupError:
2420
+ idle_workers = deque()
2421
+ workers = set()
2422
+ _threadpool_idle_workers.set(idle_workers)
2423
+ _threadpool_workers.set(workers)
2424
+
2425
+ async with limiter or cls.current_default_thread_limiter():
2426
+ with CancelScope(shield=not abandon_on_cancel) as scope:
2427
+ future = asyncio.Future[T_Retval]()
2428
+ root_task = find_root_task()
2429
+ if not idle_workers:
2430
+ worker = WorkerThread(root_task, workers, idle_workers)
2431
+ worker.start()
2432
+ workers.add(worker)
2433
+ root_task.add_done_callback(worker.stop)
2434
+ else:
2435
+ worker = idle_workers.pop()
2436
+
2437
+ # Prune any other workers that have been idle for MAX_IDLE_TIME
2438
+ # seconds or longer
2439
+ now = cls.current_time()
2440
+ while idle_workers:
2441
+ if (
2442
+ now - idle_workers[0].idle_since
2443
+ < WorkerThread.MAX_IDLE_TIME
2444
+ ):
2445
+ break
2446
+
2447
+ expired_worker = idle_workers.popleft()
2448
+ expired_worker.root_task.remove_done_callback(
2449
+ expired_worker.stop
2450
+ )
2451
+ expired_worker.stop()
2452
+
2453
+ context = copy_context()
2454
+ context.run(sniffio.current_async_library_cvar.set, None)
2455
+ if abandon_on_cancel or scope._parent_scope is None:
2456
+ worker_scope = scope
2457
+ else:
2458
+ worker_scope = scope._parent_scope
2459
+
2460
+ worker.queue.put_nowait((context, func, args, future, worker_scope))
2461
+ return await future
2462
+
2463
+ @classmethod
2464
+ def check_cancelled(cls) -> None:
2465
+ scope: CancelScope | None = threadlocals.current_cancel_scope
2466
+ while scope is not None:
2467
+ if scope.cancel_called:
2468
+ raise CancelledError(f"Cancelled by cancel scope {id(scope):x}")
2469
+
2470
+ if scope.shield:
2471
+ return
2472
+
2473
+ scope = scope._parent_scope
2474
+
2475
+ @classmethod
2476
+ def run_async_from_thread(
2477
+ cls,
2478
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
2479
+ args: tuple[Unpack[PosArgsT]],
2480
+ token: object,
2481
+ ) -> T_Retval:
2482
+ async def task_wrapper(scope: CancelScope) -> T_Retval:
2483
+ __tracebackhide__ = True
2484
+ task = cast(asyncio.Task, current_task())
2485
+ _task_states[task] = TaskState(None, scope)
2486
+ scope._tasks.add(task)
2487
+ try:
2488
+ return await func(*args)
2489
+ except CancelledError as exc:
2490
+ raise concurrent.futures.CancelledError(str(exc)) from None
2491
+ finally:
2492
+ scope._tasks.discard(task)
2493
+
2494
+ loop = cast(AbstractEventLoop, token)
2495
+ context = copy_context()
2496
+ context.run(sniffio.current_async_library_cvar.set, "asyncio")
2497
+ wrapper = task_wrapper(threadlocals.current_cancel_scope)
2498
+ f: concurrent.futures.Future[T_Retval] = context.run(
2499
+ asyncio.run_coroutine_threadsafe, wrapper, loop
2500
+ )
2501
+ return f.result()
2502
+
2503
+ @classmethod
2504
+ def run_sync_from_thread(
2505
+ cls,
2506
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
2507
+ args: tuple[Unpack[PosArgsT]],
2508
+ token: object,
2509
+ ) -> T_Retval:
2510
+ @wraps(func)
2511
+ def wrapper() -> None:
2512
+ try:
2513
+ sniffio.current_async_library_cvar.set("asyncio")
2514
+ f.set_result(func(*args))
2515
+ except BaseException as exc:
2516
+ f.set_exception(exc)
2517
+ if not isinstance(exc, Exception):
2518
+ raise
2519
+
2520
+ f: concurrent.futures.Future[T_Retval] = Future()
2521
+ loop = cast(AbstractEventLoop, token)
2522
+ loop.call_soon_threadsafe(wrapper)
2523
+ return f.result()
2524
+
2525
+ @classmethod
2526
+ def create_blocking_portal(cls) -> abc.BlockingPortal:
2527
+ return BlockingPortal()
2528
+
2529
+ @classmethod
2530
+ async def open_process(
2531
+ cls,
2532
+ command: StrOrBytesPath | Sequence[StrOrBytesPath],
2533
+ *,
2534
+ stdin: int | IO[Any] | None,
2535
+ stdout: int | IO[Any] | None,
2536
+ stderr: int | IO[Any] | None,
2537
+ **kwargs: Any,
2538
+ ) -> Process:
2539
+ await cls.checkpoint()
2540
+ if isinstance(command, PathLike):
2541
+ command = os.fspath(command)
2542
+
2543
+ if isinstance(command, (str, bytes)):
2544
+ process = await asyncio.create_subprocess_shell(
2545
+ command,
2546
+ stdin=stdin,
2547
+ stdout=stdout,
2548
+ stderr=stderr,
2549
+ **kwargs,
2550
+ )
2551
+ else:
2552
+ process = await asyncio.create_subprocess_exec(
2553
+ *command,
2554
+ stdin=stdin,
2555
+ stdout=stdout,
2556
+ stderr=stderr,
2557
+ **kwargs,
2558
+ )
2559
+
2560
+ stdin_stream = StreamWriterWrapper(process.stdin) if process.stdin else None
2561
+ stdout_stream = StreamReaderWrapper(process.stdout) if process.stdout else None
2562
+ stderr_stream = StreamReaderWrapper(process.stderr) if process.stderr else None
2563
+ return Process(process, stdin_stream, stdout_stream, stderr_stream)
2564
+
2565
+ @classmethod
2566
+ def setup_process_pool_exit_at_shutdown(cls, workers: set[abc.Process]) -> None:
2567
+ create_task(
2568
+ _shutdown_process_pool_on_exit(workers),
2569
+ name="AnyIO process pool shutdown task",
2570
+ )
2571
+ find_root_task().add_done_callback(
2572
+ partial(_forcibly_shutdown_process_pool_on_exit, workers) # type:ignore[arg-type]
2573
+ )
2574
+
2575
+ @classmethod
2576
+ async def connect_tcp(
2577
+ cls, host: str, port: int, local_address: IPSockAddrType | None = None
2578
+ ) -> abc.SocketStream:
2579
+ transport, protocol = cast(
2580
+ tuple[asyncio.Transport, StreamProtocol],
2581
+ await get_running_loop().create_connection(
2582
+ StreamProtocol, host, port, local_addr=local_address
2583
+ ),
2584
+ )
2585
+ transport.pause_reading()
2586
+ return SocketStream(transport, protocol)
2587
+
2588
+ @classmethod
2589
+ async def connect_unix(cls, path: str | bytes) -> abc.UNIXSocketStream:
2590
+ await cls.checkpoint()
2591
+ loop = get_running_loop()
2592
+ raw_socket = socket.socket(socket.AF_UNIX)
2593
+ raw_socket.setblocking(False)
2594
+ while True:
2595
+ try:
2596
+ raw_socket.connect(path)
2597
+ except BlockingIOError:
2598
+ f: asyncio.Future = asyncio.Future()
2599
+ loop.add_writer(raw_socket, f.set_result, None)
2600
+ f.add_done_callback(lambda _: loop.remove_writer(raw_socket))
2601
+ await f
2602
+ except BaseException:
2603
+ raw_socket.close()
2604
+ raise
2605
+ else:
2606
+ return UNIXSocketStream(raw_socket)
2607
+
2608
+ @classmethod
2609
+ def create_tcp_listener(cls, sock: socket.socket) -> SocketListener:
2610
+ return TCPSocketListener(sock)
2611
+
2612
+ @classmethod
2613
+ def create_unix_listener(cls, sock: socket.socket) -> SocketListener:
2614
+ return UNIXSocketListener(sock)
2615
+
2616
+ @classmethod
2617
+ async def create_udp_socket(
2618
+ cls,
2619
+ family: AddressFamily,
2620
+ local_address: IPSockAddrType | None,
2621
+ remote_address: IPSockAddrType | None,
2622
+ reuse_port: bool,
2623
+ ) -> UDPSocket | ConnectedUDPSocket:
2624
+ transport, protocol = await get_running_loop().create_datagram_endpoint(
2625
+ DatagramProtocol,
2626
+ local_addr=local_address,
2627
+ remote_addr=remote_address,
2628
+ family=family,
2629
+ reuse_port=reuse_port,
2630
+ )
2631
+ if protocol.exception:
2632
+ transport.close()
2633
+ raise protocol.exception
2634
+
2635
+ if not remote_address:
2636
+ return UDPSocket(transport, protocol)
2637
+ else:
2638
+ return ConnectedUDPSocket(transport, protocol)
2639
+
2640
+ @classmethod
2641
+ async def create_unix_datagram_socket( # type: ignore[override]
2642
+ cls, raw_socket: socket.socket, remote_path: str | bytes | None
2643
+ ) -> abc.UNIXDatagramSocket | abc.ConnectedUNIXDatagramSocket:
2644
+ await cls.checkpoint()
2645
+ loop = get_running_loop()
2646
+
2647
+ if remote_path:
2648
+ while True:
2649
+ try:
2650
+ raw_socket.connect(remote_path)
2651
+ except BlockingIOError:
2652
+ f: asyncio.Future = asyncio.Future()
2653
+ loop.add_writer(raw_socket, f.set_result, None)
2654
+ f.add_done_callback(lambda _: loop.remove_writer(raw_socket))
2655
+ await f
2656
+ except BaseException:
2657
+ raw_socket.close()
2658
+ raise
2659
+ else:
2660
+ return ConnectedUNIXDatagramSocket(raw_socket)
2661
+ else:
2662
+ return UNIXDatagramSocket(raw_socket)
2663
+
2664
+ @classmethod
2665
+ async def getaddrinfo(
2666
+ cls,
2667
+ host: bytes | str | None,
2668
+ port: str | int | None,
2669
+ *,
2670
+ family: int | AddressFamily = 0,
2671
+ type: int | SocketKind = 0,
2672
+ proto: int = 0,
2673
+ flags: int = 0,
2674
+ ) -> list[
2675
+ tuple[
2676
+ AddressFamily,
2677
+ SocketKind,
2678
+ int,
2679
+ str,
2680
+ tuple[str, int] | tuple[str, int, int, int],
2681
+ ]
2682
+ ]:
2683
+ return await get_running_loop().getaddrinfo(
2684
+ host, port, family=family, type=type, proto=proto, flags=flags
2685
+ )
2686
+
2687
+ @classmethod
2688
+ async def getnameinfo(
2689
+ cls, sockaddr: IPSockAddrType, flags: int = 0
2690
+ ) -> tuple[str, str]:
2691
+ return await get_running_loop().getnameinfo(sockaddr, flags)
2692
+
2693
+ @classmethod
2694
+ async def wait_readable(cls, obj: FileDescriptorLike) -> None:
2695
+ await cls.checkpoint()
2696
+ try:
2697
+ read_events = _read_events.get()
2698
+ except LookupError:
2699
+ read_events = {}
2700
+ _read_events.set(read_events)
2701
+
2702
+ if not isinstance(obj, int):
2703
+ obj = obj.fileno()
2704
+
2705
+ if read_events.get(obj):
2706
+ raise BusyResourceError("reading from")
2707
+
2708
+ loop = get_running_loop()
2709
+ event = asyncio.Event()
2710
+ try:
2711
+ loop.add_reader(obj, event.set)
2712
+ except NotImplementedError:
2713
+ from anyio._core._asyncio_selector_thread import get_selector
2714
+
2715
+ selector = get_selector()
2716
+ selector.add_reader(obj, event.set)
2717
+ remove_reader = selector.remove_reader
2718
+ else:
2719
+ remove_reader = loop.remove_reader
2720
+
2721
+ read_events[obj] = event
2722
+ try:
2723
+ await event.wait()
2724
+ finally:
2725
+ remove_reader(obj)
2726
+ del read_events[obj]
2727
+
2728
+ @classmethod
2729
+ async def wait_writable(cls, obj: FileDescriptorLike) -> None:
2730
+ await cls.checkpoint()
2731
+ try:
2732
+ write_events = _write_events.get()
2733
+ except LookupError:
2734
+ write_events = {}
2735
+ _write_events.set(write_events)
2736
+
2737
+ if not isinstance(obj, int):
2738
+ obj = obj.fileno()
2739
+
2740
+ if write_events.get(obj):
2741
+ raise BusyResourceError("writing to")
2742
+
2743
+ loop = get_running_loop()
2744
+ event = asyncio.Event()
2745
+ try:
2746
+ loop.add_writer(obj, event.set)
2747
+ except NotImplementedError:
2748
+ from anyio._core._asyncio_selector_thread import get_selector
2749
+
2750
+ selector = get_selector()
2751
+ selector.add_writer(obj, event.set)
2752
+ remove_writer = selector.remove_writer
2753
+ else:
2754
+ remove_writer = loop.remove_writer
2755
+
2756
+ write_events[obj] = event
2757
+ try:
2758
+ await event.wait()
2759
+ finally:
2760
+ del write_events[obj]
2761
+ remove_writer(obj)
2762
+
2763
+ @classmethod
2764
+ def current_default_thread_limiter(cls) -> CapacityLimiter:
2765
+ try:
2766
+ return _default_thread_limiter.get()
2767
+ except LookupError:
2768
+ limiter = CapacityLimiter(40)
2769
+ _default_thread_limiter.set(limiter)
2770
+ return limiter
2771
+
2772
+ @classmethod
2773
+ def open_signal_receiver(
2774
+ cls, *signals: Signals
2775
+ ) -> AbstractContextManager[AsyncIterator[Signals]]:
2776
+ return _SignalReceiver(signals)
2777
+
2778
+ @classmethod
2779
+ def get_current_task(cls) -> TaskInfo:
2780
+ return AsyncIOTaskInfo(current_task()) # type: ignore[arg-type]
2781
+
2782
+ @classmethod
2783
+ def get_running_tasks(cls) -> Sequence[TaskInfo]:
2784
+ return [AsyncIOTaskInfo(task) for task in all_tasks() if not task.done()]
2785
+
2786
+ @classmethod
2787
+ async def wait_all_tasks_blocked(cls) -> None:
2788
+ await cls.checkpoint()
2789
+ this_task = current_task()
2790
+ while True:
2791
+ for task in all_tasks():
2792
+ if task is this_task:
2793
+ continue
2794
+
2795
+ waiter = task._fut_waiter # type: ignore[attr-defined]
2796
+ if waiter is None or waiter.done():
2797
+ await sleep(0.1)
2798
+ break
2799
+ else:
2800
+ return
2801
+
2802
+ @classmethod
2803
+ def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:
2804
+ return TestRunner(**options)
2805
+
2806
+
2807
+ backend_class = AsyncIOBackend
.venv/lib/python3.11/site-packages/anyio/_backends/_trio.py ADDED
@@ -0,0 +1,1334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import array
4
+ import math
5
+ import os
6
+ import socket
7
+ import sys
8
+ import types
9
+ import weakref
10
+ from collections.abc import (
11
+ AsyncGenerator,
12
+ AsyncIterator,
13
+ Awaitable,
14
+ Callable,
15
+ Collection,
16
+ Coroutine,
17
+ Iterable,
18
+ Sequence,
19
+ )
20
+ from concurrent.futures import Future
21
+ from contextlib import AbstractContextManager
22
+ from dataclasses import dataclass
23
+ from functools import partial
24
+ from io import IOBase
25
+ from os import PathLike
26
+ from signal import Signals
27
+ from socket import AddressFamily, SocketKind
28
+ from types import TracebackType
29
+ from typing import (
30
+ IO,
31
+ TYPE_CHECKING,
32
+ Any,
33
+ Generic,
34
+ NoReturn,
35
+ TypeVar,
36
+ cast,
37
+ overload,
38
+ )
39
+
40
+ import trio.from_thread
41
+ import trio.lowlevel
42
+ from outcome import Error, Outcome, Value
43
+ from trio.lowlevel import (
44
+ current_root_task,
45
+ current_task,
46
+ wait_readable,
47
+ wait_writable,
48
+ )
49
+ from trio.socket import SocketType as TrioSocketType
50
+ from trio.to_thread import run_sync
51
+
52
+ from .. import (
53
+ CapacityLimiterStatistics,
54
+ EventStatistics,
55
+ LockStatistics,
56
+ TaskInfo,
57
+ WouldBlock,
58
+ abc,
59
+ )
60
+ from .._core._eventloop import claim_worker_thread
61
+ from .._core._exceptions import (
62
+ BrokenResourceError,
63
+ BusyResourceError,
64
+ ClosedResourceError,
65
+ EndOfStream,
66
+ )
67
+ from .._core._sockets import convert_ipv6_sockaddr
68
+ from .._core._streams import create_memory_object_stream
69
+ from .._core._synchronization import (
70
+ CapacityLimiter as BaseCapacityLimiter,
71
+ )
72
+ from .._core._synchronization import Event as BaseEvent
73
+ from .._core._synchronization import Lock as BaseLock
74
+ from .._core._synchronization import (
75
+ ResourceGuard,
76
+ SemaphoreStatistics,
77
+ )
78
+ from .._core._synchronization import Semaphore as BaseSemaphore
79
+ from .._core._tasks import CancelScope as BaseCancelScope
80
+ from ..abc import IPSockAddrType, UDPPacketType, UNIXDatagramPacketType
81
+ from ..abc._eventloop import AsyncBackend, StrOrBytesPath
82
+ from ..streams.memory import MemoryObjectSendStream
83
+
84
+ if TYPE_CHECKING:
85
+ from _typeshed import HasFileno
86
+
87
+ if sys.version_info >= (3, 10):
88
+ from typing import ParamSpec
89
+ else:
90
+ from typing_extensions import ParamSpec
91
+
92
+ if sys.version_info >= (3, 11):
93
+ from typing import TypeVarTuple, Unpack
94
+ else:
95
+ from exceptiongroup import BaseExceptionGroup
96
+ from typing_extensions import TypeVarTuple, Unpack
97
+
98
+ T = TypeVar("T")
99
+ T_Retval = TypeVar("T_Retval")
100
+ T_SockAddr = TypeVar("T_SockAddr", str, IPSockAddrType)
101
+ PosArgsT = TypeVarTuple("PosArgsT")
102
+ P = ParamSpec("P")
103
+
104
+
105
+ #
106
+ # Event loop
107
+ #
108
+
109
+ RunVar = trio.lowlevel.RunVar
110
+
111
+
112
+ #
113
+ # Timeouts and cancellation
114
+ #
115
+
116
+
117
+ class CancelScope(BaseCancelScope):
118
+ def __new__(
119
+ cls, original: trio.CancelScope | None = None, **kwargs: object
120
+ ) -> CancelScope:
121
+ return object.__new__(cls)
122
+
123
+ def __init__(self, original: trio.CancelScope | None = None, **kwargs: Any) -> None:
124
+ self.__original = original or trio.CancelScope(**kwargs)
125
+
126
+ def __enter__(self) -> CancelScope:
127
+ self.__original.__enter__()
128
+ return self
129
+
130
+ def __exit__(
131
+ self,
132
+ exc_type: type[BaseException] | None,
133
+ exc_val: BaseException | None,
134
+ exc_tb: TracebackType | None,
135
+ ) -> bool:
136
+ return self.__original.__exit__(exc_type, exc_val, exc_tb)
137
+
138
+ def cancel(self) -> None:
139
+ self.__original.cancel()
140
+
141
+ @property
142
+ def deadline(self) -> float:
143
+ return self.__original.deadline
144
+
145
+ @deadline.setter
146
+ def deadline(self, value: float) -> None:
147
+ self.__original.deadline = value
148
+
149
+ @property
150
+ def cancel_called(self) -> bool:
151
+ return self.__original.cancel_called
152
+
153
+ @property
154
+ def cancelled_caught(self) -> bool:
155
+ return self.__original.cancelled_caught
156
+
157
+ @property
158
+ def shield(self) -> bool:
159
+ return self.__original.shield
160
+
161
+ @shield.setter
162
+ def shield(self, value: bool) -> None:
163
+ self.__original.shield = value
164
+
165
+
166
+ #
167
+ # Task groups
168
+ #
169
+
170
+
171
+ class TaskGroup(abc.TaskGroup):
172
+ def __init__(self) -> None:
173
+ self._active = False
174
+ self._nursery_manager = trio.open_nursery(strict_exception_groups=True)
175
+ self.cancel_scope = None # type: ignore[assignment]
176
+
177
+ async def __aenter__(self) -> TaskGroup:
178
+ self._active = True
179
+ self._nursery = await self._nursery_manager.__aenter__()
180
+ self.cancel_scope = CancelScope(self._nursery.cancel_scope)
181
+ return self
182
+
183
+ async def __aexit__(
184
+ self,
185
+ exc_type: type[BaseException] | None,
186
+ exc_val: BaseException | None,
187
+ exc_tb: TracebackType | None,
188
+ ) -> bool:
189
+ try:
190
+ # trio.Nursery.__exit__ returns bool; .open_nursery has wrong type
191
+ return await self._nursery_manager.__aexit__(exc_type, exc_val, exc_tb) # type: ignore[return-value]
192
+ except BaseExceptionGroup as exc:
193
+ if not exc.split(trio.Cancelled)[1]:
194
+ raise trio.Cancelled._create() from exc
195
+
196
+ raise
197
+ finally:
198
+ del exc_val, exc_tb
199
+ self._active = False
200
+
201
+ def start_soon(
202
+ self,
203
+ func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
204
+ *args: Unpack[PosArgsT],
205
+ name: object = None,
206
+ ) -> None:
207
+ if not self._active:
208
+ raise RuntimeError(
209
+ "This task group is not active; no new tasks can be started."
210
+ )
211
+
212
+ self._nursery.start_soon(func, *args, name=name)
213
+
214
+ async def start(
215
+ self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
216
+ ) -> Any:
217
+ if not self._active:
218
+ raise RuntimeError(
219
+ "This task group is not active; no new tasks can be started."
220
+ )
221
+
222
+ return await self._nursery.start(func, *args, name=name)
223
+
224
+
225
+ #
226
+ # Threads
227
+ #
228
+
229
+
230
+ class BlockingPortal(abc.BlockingPortal):
231
+ def __new__(cls) -> BlockingPortal:
232
+ return object.__new__(cls)
233
+
234
+ def __init__(self) -> None:
235
+ super().__init__()
236
+ self._token = trio.lowlevel.current_trio_token()
237
+
238
+ def _spawn_task_from_thread(
239
+ self,
240
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
241
+ args: tuple[Unpack[PosArgsT]],
242
+ kwargs: dict[str, Any],
243
+ name: object,
244
+ future: Future[T_Retval],
245
+ ) -> None:
246
+ trio.from_thread.run_sync(
247
+ partial(self._task_group.start_soon, name=name),
248
+ self._call_func,
249
+ func,
250
+ args,
251
+ kwargs,
252
+ future,
253
+ trio_token=self._token,
254
+ )
255
+
256
+
257
+ #
258
+ # Subprocesses
259
+ #
260
+
261
+
262
+ @dataclass(eq=False)
263
+ class ReceiveStreamWrapper(abc.ByteReceiveStream):
264
+ _stream: trio.abc.ReceiveStream
265
+
266
+ async def receive(self, max_bytes: int | None = None) -> bytes:
267
+ try:
268
+ data = await self._stream.receive_some(max_bytes)
269
+ except trio.ClosedResourceError as exc:
270
+ raise ClosedResourceError from exc.__cause__
271
+ except trio.BrokenResourceError as exc:
272
+ raise BrokenResourceError from exc.__cause__
273
+
274
+ if data:
275
+ return data
276
+ else:
277
+ raise EndOfStream
278
+
279
+ async def aclose(self) -> None:
280
+ await self._stream.aclose()
281
+
282
+
283
+ @dataclass(eq=False)
284
+ class SendStreamWrapper(abc.ByteSendStream):
285
+ _stream: trio.abc.SendStream
286
+
287
+ async def send(self, item: bytes) -> None:
288
+ try:
289
+ await self._stream.send_all(item)
290
+ except trio.ClosedResourceError as exc:
291
+ raise ClosedResourceError from exc.__cause__
292
+ except trio.BrokenResourceError as exc:
293
+ raise BrokenResourceError from exc.__cause__
294
+
295
+ async def aclose(self) -> None:
296
+ await self._stream.aclose()
297
+
298
+
299
+ @dataclass(eq=False)
300
+ class Process(abc.Process):
301
+ _process: trio.Process
302
+ _stdin: abc.ByteSendStream | None
303
+ _stdout: abc.ByteReceiveStream | None
304
+ _stderr: abc.ByteReceiveStream | None
305
+
306
+ async def aclose(self) -> None:
307
+ with CancelScope(shield=True):
308
+ if self._stdin:
309
+ await self._stdin.aclose()
310
+ if self._stdout:
311
+ await self._stdout.aclose()
312
+ if self._stderr:
313
+ await self._stderr.aclose()
314
+
315
+ try:
316
+ await self.wait()
317
+ except BaseException:
318
+ self.kill()
319
+ with CancelScope(shield=True):
320
+ await self.wait()
321
+ raise
322
+
323
+ async def wait(self) -> int:
324
+ return await self._process.wait()
325
+
326
+ def terminate(self) -> None:
327
+ self._process.terminate()
328
+
329
+ def kill(self) -> None:
330
+ self._process.kill()
331
+
332
+ def send_signal(self, signal: Signals) -> None:
333
+ self._process.send_signal(signal)
334
+
335
+ @property
336
+ def pid(self) -> int:
337
+ return self._process.pid
338
+
339
+ @property
340
+ def returncode(self) -> int | None:
341
+ return self._process.returncode
342
+
343
+ @property
344
+ def stdin(self) -> abc.ByteSendStream | None:
345
+ return self._stdin
346
+
347
+ @property
348
+ def stdout(self) -> abc.ByteReceiveStream | None:
349
+ return self._stdout
350
+
351
+ @property
352
+ def stderr(self) -> abc.ByteReceiveStream | None:
353
+ return self._stderr
354
+
355
+
356
+ class _ProcessPoolShutdownInstrument(trio.abc.Instrument):
357
+ def after_run(self) -> None:
358
+ super().after_run()
359
+
360
+
361
+ current_default_worker_process_limiter: trio.lowlevel.RunVar = RunVar(
362
+ "current_default_worker_process_limiter"
363
+ )
364
+
365
+
366
+ async def _shutdown_process_pool(workers: set[abc.Process]) -> None:
367
+ try:
368
+ await trio.sleep(math.inf)
369
+ except trio.Cancelled:
370
+ for process in workers:
371
+ if process.returncode is None:
372
+ process.kill()
373
+
374
+ with CancelScope(shield=True):
375
+ for process in workers:
376
+ await process.aclose()
377
+
378
+
379
+ #
380
+ # Sockets and networking
381
+ #
382
+
383
+
384
+ class _TrioSocketMixin(Generic[T_SockAddr]):
385
+ def __init__(self, trio_socket: TrioSocketType) -> None:
386
+ self._trio_socket = trio_socket
387
+ self._closed = False
388
+
389
+ def _check_closed(self) -> None:
390
+ if self._closed:
391
+ raise ClosedResourceError
392
+ if self._trio_socket.fileno() < 0:
393
+ raise BrokenResourceError
394
+
395
+ @property
396
+ def _raw_socket(self) -> socket.socket:
397
+ return self._trio_socket._sock # type: ignore[attr-defined]
398
+
399
+ async def aclose(self) -> None:
400
+ if self._trio_socket.fileno() >= 0:
401
+ self._closed = True
402
+ self._trio_socket.close()
403
+
404
+ def _convert_socket_error(self, exc: BaseException) -> NoReturn:
405
+ if isinstance(exc, trio.ClosedResourceError):
406
+ raise ClosedResourceError from exc
407
+ elif self._trio_socket.fileno() < 0 and self._closed:
408
+ raise ClosedResourceError from None
409
+ elif isinstance(exc, OSError):
410
+ raise BrokenResourceError from exc
411
+ else:
412
+ raise exc
413
+
414
+
415
+ class SocketStream(_TrioSocketMixin, abc.SocketStream):
416
+ def __init__(self, trio_socket: TrioSocketType) -> None:
417
+ super().__init__(trio_socket)
418
+ self._receive_guard = ResourceGuard("reading from")
419
+ self._send_guard = ResourceGuard("writing to")
420
+
421
+ async def receive(self, max_bytes: int = 65536) -> bytes:
422
+ with self._receive_guard:
423
+ try:
424
+ data = await self._trio_socket.recv(max_bytes)
425
+ except BaseException as exc:
426
+ self._convert_socket_error(exc)
427
+
428
+ if data:
429
+ return data
430
+ else:
431
+ raise EndOfStream
432
+
433
+ async def send(self, item: bytes) -> None:
434
+ with self._send_guard:
435
+ view = memoryview(item)
436
+ while view:
437
+ try:
438
+ bytes_sent = await self._trio_socket.send(view)
439
+ except BaseException as exc:
440
+ self._convert_socket_error(exc)
441
+
442
+ view = view[bytes_sent:]
443
+
444
+ async def send_eof(self) -> None:
445
+ self._trio_socket.shutdown(socket.SHUT_WR)
446
+
447
+
448
+ class UNIXSocketStream(SocketStream, abc.UNIXSocketStream):
449
+ async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
450
+ if not isinstance(msglen, int) or msglen < 0:
451
+ raise ValueError("msglen must be a non-negative integer")
452
+ if not isinstance(maxfds, int) or maxfds < 1:
453
+ raise ValueError("maxfds must be a positive integer")
454
+
455
+ fds = array.array("i")
456
+ await trio.lowlevel.checkpoint()
457
+ with self._receive_guard:
458
+ while True:
459
+ try:
460
+ message, ancdata, flags, addr = await self._trio_socket.recvmsg(
461
+ msglen, socket.CMSG_LEN(maxfds * fds.itemsize)
462
+ )
463
+ except BaseException as exc:
464
+ self._convert_socket_error(exc)
465
+ else:
466
+ if not message and not ancdata:
467
+ raise EndOfStream
468
+
469
+ break
470
+
471
+ for cmsg_level, cmsg_type, cmsg_data in ancdata:
472
+ if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS:
473
+ raise RuntimeError(
474
+ f"Received unexpected ancillary data; message = {message!r}, "
475
+ f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}"
476
+ )
477
+
478
+ fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
479
+
480
+ return message, list(fds)
481
+
482
+ async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
483
+ if not message:
484
+ raise ValueError("message must not be empty")
485
+ if not fds:
486
+ raise ValueError("fds must not be empty")
487
+
488
+ filenos: list[int] = []
489
+ for fd in fds:
490
+ if isinstance(fd, int):
491
+ filenos.append(fd)
492
+ elif isinstance(fd, IOBase):
493
+ filenos.append(fd.fileno())
494
+
495
+ fdarray = array.array("i", filenos)
496
+ await trio.lowlevel.checkpoint()
497
+ with self._send_guard:
498
+ while True:
499
+ try:
500
+ await self._trio_socket.sendmsg(
501
+ [message],
502
+ [
503
+ (
504
+ socket.SOL_SOCKET,
505
+ socket.SCM_RIGHTS,
506
+ fdarray,
507
+ )
508
+ ],
509
+ )
510
+ break
511
+ except BaseException as exc:
512
+ self._convert_socket_error(exc)
513
+
514
+
515
+ class TCPSocketListener(_TrioSocketMixin, abc.SocketListener):
516
+ def __init__(self, raw_socket: socket.socket):
517
+ super().__init__(trio.socket.from_stdlib_socket(raw_socket))
518
+ self._accept_guard = ResourceGuard("accepting connections from")
519
+
520
+ async def accept(self) -> SocketStream:
521
+ with self._accept_guard:
522
+ try:
523
+ trio_socket, _addr = await self._trio_socket.accept()
524
+ except BaseException as exc:
525
+ self._convert_socket_error(exc)
526
+
527
+ trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
528
+ return SocketStream(trio_socket)
529
+
530
+
531
+ class UNIXSocketListener(_TrioSocketMixin, abc.SocketListener):
532
+ def __init__(self, raw_socket: socket.socket):
533
+ super().__init__(trio.socket.from_stdlib_socket(raw_socket))
534
+ self._accept_guard = ResourceGuard("accepting connections from")
535
+
536
+ async def accept(self) -> UNIXSocketStream:
537
+ with self._accept_guard:
538
+ try:
539
+ trio_socket, _addr = await self._trio_socket.accept()
540
+ except BaseException as exc:
541
+ self._convert_socket_error(exc)
542
+
543
+ return UNIXSocketStream(trio_socket)
544
+
545
+
546
+ class UDPSocket(_TrioSocketMixin[IPSockAddrType], abc.UDPSocket):
547
+ def __init__(self, trio_socket: TrioSocketType) -> None:
548
+ super().__init__(trio_socket)
549
+ self._receive_guard = ResourceGuard("reading from")
550
+ self._send_guard = ResourceGuard("writing to")
551
+
552
+ async def receive(self) -> tuple[bytes, IPSockAddrType]:
553
+ with self._receive_guard:
554
+ try:
555
+ data, addr = await self._trio_socket.recvfrom(65536)
556
+ return data, convert_ipv6_sockaddr(addr)
557
+ except BaseException as exc:
558
+ self._convert_socket_error(exc)
559
+
560
+ async def send(self, item: UDPPacketType) -> None:
561
+ with self._send_guard:
562
+ try:
563
+ await self._trio_socket.sendto(*item)
564
+ except BaseException as exc:
565
+ self._convert_socket_error(exc)
566
+
567
+
568
+ class ConnectedUDPSocket(_TrioSocketMixin[IPSockAddrType], abc.ConnectedUDPSocket):
569
+ def __init__(self, trio_socket: TrioSocketType) -> None:
570
+ super().__init__(trio_socket)
571
+ self._receive_guard = ResourceGuard("reading from")
572
+ self._send_guard = ResourceGuard("writing to")
573
+
574
+ async def receive(self) -> bytes:
575
+ with self._receive_guard:
576
+ try:
577
+ return await self._trio_socket.recv(65536)
578
+ except BaseException as exc:
579
+ self._convert_socket_error(exc)
580
+
581
+ async def send(self, item: bytes) -> None:
582
+ with self._send_guard:
583
+ try:
584
+ await self._trio_socket.send(item)
585
+ except BaseException as exc:
586
+ self._convert_socket_error(exc)
587
+
588
+
589
+ class UNIXDatagramSocket(_TrioSocketMixin[str], abc.UNIXDatagramSocket):
590
+ def __init__(self, trio_socket: TrioSocketType) -> None:
591
+ super().__init__(trio_socket)
592
+ self._receive_guard = ResourceGuard("reading from")
593
+ self._send_guard = ResourceGuard("writing to")
594
+
595
+ async def receive(self) -> UNIXDatagramPacketType:
596
+ with self._receive_guard:
597
+ try:
598
+ data, addr = await self._trio_socket.recvfrom(65536)
599
+ return data, addr
600
+ except BaseException as exc:
601
+ self._convert_socket_error(exc)
602
+
603
+ async def send(self, item: UNIXDatagramPacketType) -> None:
604
+ with self._send_guard:
605
+ try:
606
+ await self._trio_socket.sendto(*item)
607
+ except BaseException as exc:
608
+ self._convert_socket_error(exc)
609
+
610
+
611
+ class ConnectedUNIXDatagramSocket(
612
+ _TrioSocketMixin[str], abc.ConnectedUNIXDatagramSocket
613
+ ):
614
+ def __init__(self, trio_socket: TrioSocketType) -> None:
615
+ super().__init__(trio_socket)
616
+ self._receive_guard = ResourceGuard("reading from")
617
+ self._send_guard = ResourceGuard("writing to")
618
+
619
+ async def receive(self) -> bytes:
620
+ with self._receive_guard:
621
+ try:
622
+ return await self._trio_socket.recv(65536)
623
+ except BaseException as exc:
624
+ self._convert_socket_error(exc)
625
+
626
+ async def send(self, item: bytes) -> None:
627
+ with self._send_guard:
628
+ try:
629
+ await self._trio_socket.send(item)
630
+ except BaseException as exc:
631
+ self._convert_socket_error(exc)
632
+
633
+
634
+ #
635
+ # Synchronization
636
+ #
637
+
638
+
639
+ class Event(BaseEvent):
640
+ def __new__(cls) -> Event:
641
+ return object.__new__(cls)
642
+
643
+ def __init__(self) -> None:
644
+ self.__original = trio.Event()
645
+
646
+ def is_set(self) -> bool:
647
+ return self.__original.is_set()
648
+
649
+ async def wait(self) -> None:
650
+ return await self.__original.wait()
651
+
652
+ def statistics(self) -> EventStatistics:
653
+ orig_statistics = self.__original.statistics()
654
+ return EventStatistics(tasks_waiting=orig_statistics.tasks_waiting)
655
+
656
+ def set(self) -> None:
657
+ self.__original.set()
658
+
659
+
660
+ class Lock(BaseLock):
661
+ def __new__(cls, *, fast_acquire: bool = False) -> Lock:
662
+ return object.__new__(cls)
663
+
664
+ def __init__(self, *, fast_acquire: bool = False) -> None:
665
+ self._fast_acquire = fast_acquire
666
+ self.__original = trio.Lock()
667
+
668
+ @staticmethod
669
+ def _convert_runtime_error_msg(exc: RuntimeError) -> None:
670
+ if exc.args == ("attempt to re-acquire an already held Lock",):
671
+ exc.args = ("Attempted to acquire an already held Lock",)
672
+
673
+ async def acquire(self) -> None:
674
+ if not self._fast_acquire:
675
+ try:
676
+ await self.__original.acquire()
677
+ except RuntimeError as exc:
678
+ self._convert_runtime_error_msg(exc)
679
+ raise
680
+
681
+ return
682
+
683
+ # This is the "fast path" where we don't let other tasks run
684
+ await trio.lowlevel.checkpoint_if_cancelled()
685
+ try:
686
+ self.__original.acquire_nowait()
687
+ except trio.WouldBlock:
688
+ await self.__original._lot.park()
689
+ except RuntimeError as exc:
690
+ self._convert_runtime_error_msg(exc)
691
+ raise
692
+
693
+ def acquire_nowait(self) -> None:
694
+ try:
695
+ self.__original.acquire_nowait()
696
+ except trio.WouldBlock:
697
+ raise WouldBlock from None
698
+ except RuntimeError as exc:
699
+ self._convert_runtime_error_msg(exc)
700
+ raise
701
+
702
+ def locked(self) -> bool:
703
+ return self.__original.locked()
704
+
705
+ def release(self) -> None:
706
+ self.__original.release()
707
+
708
+ def statistics(self) -> LockStatistics:
709
+ orig_statistics = self.__original.statistics()
710
+ owner = TrioTaskInfo(orig_statistics.owner) if orig_statistics.owner else None
711
+ return LockStatistics(
712
+ orig_statistics.locked, owner, orig_statistics.tasks_waiting
713
+ )
714
+
715
+
716
+ class Semaphore(BaseSemaphore):
717
+ def __new__(
718
+ cls,
719
+ initial_value: int,
720
+ *,
721
+ max_value: int | None = None,
722
+ fast_acquire: bool = False,
723
+ ) -> Semaphore:
724
+ return object.__new__(cls)
725
+
726
+ def __init__(
727
+ self,
728
+ initial_value: int,
729
+ *,
730
+ max_value: int | None = None,
731
+ fast_acquire: bool = False,
732
+ ) -> None:
733
+ super().__init__(initial_value, max_value=max_value, fast_acquire=fast_acquire)
734
+ self.__original = trio.Semaphore(initial_value, max_value=max_value)
735
+
736
+ async def acquire(self) -> None:
737
+ if not self._fast_acquire:
738
+ await self.__original.acquire()
739
+ return
740
+
741
+ # This is the "fast path" where we don't let other tasks run
742
+ await trio.lowlevel.checkpoint_if_cancelled()
743
+ try:
744
+ self.__original.acquire_nowait()
745
+ except trio.WouldBlock:
746
+ await self.__original._lot.park()
747
+
748
+ def acquire_nowait(self) -> None:
749
+ try:
750
+ self.__original.acquire_nowait()
751
+ except trio.WouldBlock:
752
+ raise WouldBlock from None
753
+
754
+ @property
755
+ def max_value(self) -> int | None:
756
+ return self.__original.max_value
757
+
758
+ @property
759
+ def value(self) -> int:
760
+ return self.__original.value
761
+
762
+ def release(self) -> None:
763
+ self.__original.release()
764
+
765
+ def statistics(self) -> SemaphoreStatistics:
766
+ orig_statistics = self.__original.statistics()
767
+ return SemaphoreStatistics(orig_statistics.tasks_waiting)
768
+
769
+
770
+ class CapacityLimiter(BaseCapacityLimiter):
771
+ def __new__(
772
+ cls,
773
+ total_tokens: float | None = None,
774
+ *,
775
+ original: trio.CapacityLimiter | None = None,
776
+ ) -> CapacityLimiter:
777
+ return object.__new__(cls)
778
+
779
+ def __init__(
780
+ self,
781
+ total_tokens: float | None = None,
782
+ *,
783
+ original: trio.CapacityLimiter | None = None,
784
+ ) -> None:
785
+ if original is not None:
786
+ self.__original = original
787
+ else:
788
+ assert total_tokens is not None
789
+ self.__original = trio.CapacityLimiter(total_tokens)
790
+
791
+ async def __aenter__(self) -> None:
792
+ return await self.__original.__aenter__()
793
+
794
+ async def __aexit__(
795
+ self,
796
+ exc_type: type[BaseException] | None,
797
+ exc_val: BaseException | None,
798
+ exc_tb: TracebackType | None,
799
+ ) -> None:
800
+ await self.__original.__aexit__(exc_type, exc_val, exc_tb)
801
+
802
+ @property
803
+ def total_tokens(self) -> float:
804
+ return self.__original.total_tokens
805
+
806
+ @total_tokens.setter
807
+ def total_tokens(self, value: float) -> None:
808
+ self.__original.total_tokens = value
809
+
810
+ @property
811
+ def borrowed_tokens(self) -> int:
812
+ return self.__original.borrowed_tokens
813
+
814
+ @property
815
+ def available_tokens(self) -> float:
816
+ return self.__original.available_tokens
817
+
818
+ def acquire_nowait(self) -> None:
819
+ self.__original.acquire_nowait()
820
+
821
+ def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
822
+ self.__original.acquire_on_behalf_of_nowait(borrower)
823
+
824
+ async def acquire(self) -> None:
825
+ await self.__original.acquire()
826
+
827
+ async def acquire_on_behalf_of(self, borrower: object) -> None:
828
+ await self.__original.acquire_on_behalf_of(borrower)
829
+
830
+ def release(self) -> None:
831
+ return self.__original.release()
832
+
833
+ def release_on_behalf_of(self, borrower: object) -> None:
834
+ return self.__original.release_on_behalf_of(borrower)
835
+
836
+ def statistics(self) -> CapacityLimiterStatistics:
837
+ orig = self.__original.statistics()
838
+ return CapacityLimiterStatistics(
839
+ borrowed_tokens=orig.borrowed_tokens,
840
+ total_tokens=orig.total_tokens,
841
+ borrowers=tuple(orig.borrowers),
842
+ tasks_waiting=orig.tasks_waiting,
843
+ )
844
+
845
+
846
+ _capacity_limiter_wrapper: trio.lowlevel.RunVar = RunVar("_capacity_limiter_wrapper")
847
+
848
+
849
+ #
850
+ # Signal handling
851
+ #
852
+
853
+
854
+ class _SignalReceiver:
855
+ _iterator: AsyncIterator[int]
856
+
857
+ def __init__(self, signals: tuple[Signals, ...]):
858
+ self._signals = signals
859
+
860
+ def __enter__(self) -> _SignalReceiver:
861
+ self._cm = trio.open_signal_receiver(*self._signals)
862
+ self._iterator = self._cm.__enter__()
863
+ return self
864
+
865
+ def __exit__(
866
+ self,
867
+ exc_type: type[BaseException] | None,
868
+ exc_val: BaseException | None,
869
+ exc_tb: TracebackType | None,
870
+ ) -> bool | None:
871
+ return self._cm.__exit__(exc_type, exc_val, exc_tb)
872
+
873
+ def __aiter__(self) -> _SignalReceiver:
874
+ return self
875
+
876
+ async def __anext__(self) -> Signals:
877
+ signum = await self._iterator.__anext__()
878
+ return Signals(signum)
879
+
880
+
881
+ #
882
+ # Testing and debugging
883
+ #
884
+
885
+
886
+ class TestRunner(abc.TestRunner):
887
+ def __init__(self, **options: Any) -> None:
888
+ from queue import Queue
889
+
890
+ self._call_queue: Queue[Callable[[], object]] = Queue()
891
+ self._send_stream: MemoryObjectSendStream | None = None
892
+ self._options = options
893
+
894
+ def __exit__(
895
+ self,
896
+ exc_type: type[BaseException] | None,
897
+ exc_val: BaseException | None,
898
+ exc_tb: types.TracebackType | None,
899
+ ) -> None:
900
+ if self._send_stream:
901
+ self._send_stream.close()
902
+ while self._send_stream is not None:
903
+ self._call_queue.get()()
904
+
905
+ async def _run_tests_and_fixtures(self) -> None:
906
+ self._send_stream, receive_stream = create_memory_object_stream(1)
907
+ with receive_stream:
908
+ async for coro, outcome_holder in receive_stream:
909
+ try:
910
+ retval = await coro
911
+ except BaseException as exc:
912
+ outcome_holder.append(Error(exc))
913
+ else:
914
+ outcome_holder.append(Value(retval))
915
+
916
+ def _main_task_finished(self, outcome: object) -> None:
917
+ self._send_stream = None
918
+
919
+ def _call_in_runner_task(
920
+ self,
921
+ func: Callable[P, Awaitable[T_Retval]],
922
+ *args: P.args,
923
+ **kwargs: P.kwargs,
924
+ ) -> T_Retval:
925
+ if self._send_stream is None:
926
+ trio.lowlevel.start_guest_run(
927
+ self._run_tests_and_fixtures,
928
+ run_sync_soon_threadsafe=self._call_queue.put,
929
+ done_callback=self._main_task_finished,
930
+ **self._options,
931
+ )
932
+ while self._send_stream is None:
933
+ self._call_queue.get()()
934
+
935
+ outcome_holder: list[Outcome] = []
936
+ self._send_stream.send_nowait((func(*args, **kwargs), outcome_holder))
937
+ while not outcome_holder:
938
+ self._call_queue.get()()
939
+
940
+ return outcome_holder[0].unwrap()
941
+
942
+ def run_asyncgen_fixture(
943
+ self,
944
+ fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]],
945
+ kwargs: dict[str, Any],
946
+ ) -> Iterable[T_Retval]:
947
+ asyncgen = fixture_func(**kwargs)
948
+ fixturevalue: T_Retval = self._call_in_runner_task(asyncgen.asend, None)
949
+
950
+ yield fixturevalue
951
+
952
+ try:
953
+ self._call_in_runner_task(asyncgen.asend, None)
954
+ except StopAsyncIteration:
955
+ pass
956
+ else:
957
+ self._call_in_runner_task(asyncgen.aclose)
958
+ raise RuntimeError("Async generator fixture did not stop")
959
+
960
+ def run_fixture(
961
+ self,
962
+ fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]],
963
+ kwargs: dict[str, Any],
964
+ ) -> T_Retval:
965
+ return self._call_in_runner_task(fixture_func, **kwargs)
966
+
967
+ def run_test(
968
+ self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
969
+ ) -> None:
970
+ self._call_in_runner_task(test_func, **kwargs)
971
+
972
+
973
+ class TrioTaskInfo(TaskInfo):
974
+ def __init__(self, task: trio.lowlevel.Task):
975
+ parent_id = None
976
+ if task.parent_nursery and task.parent_nursery.parent_task:
977
+ parent_id = id(task.parent_nursery.parent_task)
978
+
979
+ super().__init__(id(task), parent_id, task.name, task.coro)
980
+ self._task = weakref.proxy(task)
981
+
982
+ def has_pending_cancellation(self) -> bool:
983
+ try:
984
+ return self._task._cancel_status.effectively_cancelled
985
+ except ReferenceError:
986
+ # If the task is no longer around, it surely doesn't have a cancellation
987
+ # pending
988
+ return False
989
+
990
+
991
+ class TrioBackend(AsyncBackend):
992
+ @classmethod
993
+ def run(
994
+ cls,
995
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
996
+ args: tuple[Unpack[PosArgsT]],
997
+ kwargs: dict[str, Any],
998
+ options: dict[str, Any],
999
+ ) -> T_Retval:
1000
+ return trio.run(func, *args)
1001
+
1002
+ @classmethod
1003
+ def current_token(cls) -> object:
1004
+ return trio.lowlevel.current_trio_token()
1005
+
1006
+ @classmethod
1007
+ def current_time(cls) -> float:
1008
+ return trio.current_time()
1009
+
1010
+ @classmethod
1011
+ def cancelled_exception_class(cls) -> type[BaseException]:
1012
+ return trio.Cancelled
1013
+
1014
+ @classmethod
1015
+ async def checkpoint(cls) -> None:
1016
+ await trio.lowlevel.checkpoint()
1017
+
1018
+ @classmethod
1019
+ async def checkpoint_if_cancelled(cls) -> None:
1020
+ await trio.lowlevel.checkpoint_if_cancelled()
1021
+
1022
+ @classmethod
1023
+ async def cancel_shielded_checkpoint(cls) -> None:
1024
+ await trio.lowlevel.cancel_shielded_checkpoint()
1025
+
1026
+ @classmethod
1027
+ async def sleep(cls, delay: float) -> None:
1028
+ await trio.sleep(delay)
1029
+
1030
+ @classmethod
1031
+ def create_cancel_scope(
1032
+ cls, *, deadline: float = math.inf, shield: bool = False
1033
+ ) -> abc.CancelScope:
1034
+ return CancelScope(deadline=deadline, shield=shield)
1035
+
1036
+ @classmethod
1037
+ def current_effective_deadline(cls) -> float:
1038
+ return trio.current_effective_deadline()
1039
+
1040
+ @classmethod
1041
+ def create_task_group(cls) -> abc.TaskGroup:
1042
+ return TaskGroup()
1043
+
1044
+ @classmethod
1045
+ def create_event(cls) -> abc.Event:
1046
+ return Event()
1047
+
1048
+ @classmethod
1049
+ def create_lock(cls, *, fast_acquire: bool) -> Lock:
1050
+ return Lock(fast_acquire=fast_acquire)
1051
+
1052
+ @classmethod
1053
+ def create_semaphore(
1054
+ cls,
1055
+ initial_value: int,
1056
+ *,
1057
+ max_value: int | None = None,
1058
+ fast_acquire: bool = False,
1059
+ ) -> abc.Semaphore:
1060
+ return Semaphore(initial_value, max_value=max_value, fast_acquire=fast_acquire)
1061
+
1062
+ @classmethod
1063
+ def create_capacity_limiter(cls, total_tokens: float) -> CapacityLimiter:
1064
+ return CapacityLimiter(total_tokens)
1065
+
1066
+ @classmethod
1067
+ async def run_sync_in_worker_thread(
1068
+ cls,
1069
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
1070
+ args: tuple[Unpack[PosArgsT]],
1071
+ abandon_on_cancel: bool = False,
1072
+ limiter: abc.CapacityLimiter | None = None,
1073
+ ) -> T_Retval:
1074
+ def wrapper() -> T_Retval:
1075
+ with claim_worker_thread(TrioBackend, token):
1076
+ return func(*args)
1077
+
1078
+ token = TrioBackend.current_token()
1079
+ return await run_sync(
1080
+ wrapper,
1081
+ abandon_on_cancel=abandon_on_cancel,
1082
+ limiter=cast(trio.CapacityLimiter, limiter),
1083
+ )
1084
+
1085
+ @classmethod
1086
+ def check_cancelled(cls) -> None:
1087
+ trio.from_thread.check_cancelled()
1088
+
1089
+ @classmethod
1090
+ def run_async_from_thread(
1091
+ cls,
1092
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
1093
+ args: tuple[Unpack[PosArgsT]],
1094
+ token: object,
1095
+ ) -> T_Retval:
1096
+ return trio.from_thread.run(func, *args)
1097
+
1098
+ @classmethod
1099
+ def run_sync_from_thread(
1100
+ cls,
1101
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
1102
+ args: tuple[Unpack[PosArgsT]],
1103
+ token: object,
1104
+ ) -> T_Retval:
1105
+ return trio.from_thread.run_sync(func, *args)
1106
+
1107
+ @classmethod
1108
+ def create_blocking_portal(cls) -> abc.BlockingPortal:
1109
+ return BlockingPortal()
1110
+
1111
+ @classmethod
1112
+ async def open_process(
1113
+ cls,
1114
+ command: StrOrBytesPath | Sequence[StrOrBytesPath],
1115
+ *,
1116
+ stdin: int | IO[Any] | None,
1117
+ stdout: int | IO[Any] | None,
1118
+ stderr: int | IO[Any] | None,
1119
+ **kwargs: Any,
1120
+ ) -> Process:
1121
+ def convert_item(item: StrOrBytesPath) -> str:
1122
+ str_or_bytes = os.fspath(item)
1123
+ if isinstance(str_or_bytes, str):
1124
+ return str_or_bytes
1125
+ else:
1126
+ return os.fsdecode(str_or_bytes)
1127
+
1128
+ if isinstance(command, (str, bytes, PathLike)):
1129
+ process = await trio.lowlevel.open_process(
1130
+ convert_item(command),
1131
+ stdin=stdin,
1132
+ stdout=stdout,
1133
+ stderr=stderr,
1134
+ shell=True,
1135
+ **kwargs,
1136
+ )
1137
+ else:
1138
+ process = await trio.lowlevel.open_process(
1139
+ [convert_item(item) for item in command],
1140
+ stdin=stdin,
1141
+ stdout=stdout,
1142
+ stderr=stderr,
1143
+ shell=False,
1144
+ **kwargs,
1145
+ )
1146
+
1147
+ stdin_stream = SendStreamWrapper(process.stdin) if process.stdin else None
1148
+ stdout_stream = ReceiveStreamWrapper(process.stdout) if process.stdout else None
1149
+ stderr_stream = ReceiveStreamWrapper(process.stderr) if process.stderr else None
1150
+ return Process(process, stdin_stream, stdout_stream, stderr_stream)
1151
+
1152
+ @classmethod
1153
+ def setup_process_pool_exit_at_shutdown(cls, workers: set[abc.Process]) -> None:
1154
+ trio.lowlevel.spawn_system_task(_shutdown_process_pool, workers)
1155
+
1156
+ @classmethod
1157
+ async def connect_tcp(
1158
+ cls, host: str, port: int, local_address: IPSockAddrType | None = None
1159
+ ) -> SocketStream:
1160
+ family = socket.AF_INET6 if ":" in host else socket.AF_INET
1161
+ trio_socket = trio.socket.socket(family)
1162
+ trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
1163
+ if local_address:
1164
+ await trio_socket.bind(local_address)
1165
+
1166
+ try:
1167
+ await trio_socket.connect((host, port))
1168
+ except BaseException:
1169
+ trio_socket.close()
1170
+ raise
1171
+
1172
+ return SocketStream(trio_socket)
1173
+
1174
+ @classmethod
1175
+ async def connect_unix(cls, path: str | bytes) -> abc.UNIXSocketStream:
1176
+ trio_socket = trio.socket.socket(socket.AF_UNIX)
1177
+ try:
1178
+ await trio_socket.connect(path)
1179
+ except BaseException:
1180
+ trio_socket.close()
1181
+ raise
1182
+
1183
+ return UNIXSocketStream(trio_socket)
1184
+
1185
+ @classmethod
1186
+ def create_tcp_listener(cls, sock: socket.socket) -> abc.SocketListener:
1187
+ return TCPSocketListener(sock)
1188
+
1189
+ @classmethod
1190
+ def create_unix_listener(cls, sock: socket.socket) -> abc.SocketListener:
1191
+ return UNIXSocketListener(sock)
1192
+
1193
+ @classmethod
1194
+ async def create_udp_socket(
1195
+ cls,
1196
+ family: socket.AddressFamily,
1197
+ local_address: IPSockAddrType | None,
1198
+ remote_address: IPSockAddrType | None,
1199
+ reuse_port: bool,
1200
+ ) -> UDPSocket | ConnectedUDPSocket:
1201
+ trio_socket = trio.socket.socket(family=family, type=socket.SOCK_DGRAM)
1202
+
1203
+ if reuse_port:
1204
+ trio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
1205
+
1206
+ if local_address:
1207
+ await trio_socket.bind(local_address)
1208
+
1209
+ if remote_address:
1210
+ await trio_socket.connect(remote_address)
1211
+ return ConnectedUDPSocket(trio_socket)
1212
+ else:
1213
+ return UDPSocket(trio_socket)
1214
+
1215
+ @classmethod
1216
+ @overload
1217
+ async def create_unix_datagram_socket(
1218
+ cls, raw_socket: socket.socket, remote_path: None
1219
+ ) -> abc.UNIXDatagramSocket: ...
1220
+
1221
+ @classmethod
1222
+ @overload
1223
+ async def create_unix_datagram_socket(
1224
+ cls, raw_socket: socket.socket, remote_path: str | bytes
1225
+ ) -> abc.ConnectedUNIXDatagramSocket: ...
1226
+
1227
+ @classmethod
1228
+ async def create_unix_datagram_socket(
1229
+ cls, raw_socket: socket.socket, remote_path: str | bytes | None
1230
+ ) -> abc.UNIXDatagramSocket | abc.ConnectedUNIXDatagramSocket:
1231
+ trio_socket = trio.socket.from_stdlib_socket(raw_socket)
1232
+
1233
+ if remote_path:
1234
+ await trio_socket.connect(remote_path)
1235
+ return ConnectedUNIXDatagramSocket(trio_socket)
1236
+ else:
1237
+ return UNIXDatagramSocket(trio_socket)
1238
+
1239
+ @classmethod
1240
+ async def getaddrinfo(
1241
+ cls,
1242
+ host: bytes | str | None,
1243
+ port: str | int | None,
1244
+ *,
1245
+ family: int | AddressFamily = 0,
1246
+ type: int | SocketKind = 0,
1247
+ proto: int = 0,
1248
+ flags: int = 0,
1249
+ ) -> list[
1250
+ tuple[
1251
+ AddressFamily,
1252
+ SocketKind,
1253
+ int,
1254
+ str,
1255
+ tuple[str, int] | tuple[str, int, int, int],
1256
+ ]
1257
+ ]:
1258
+ return await trio.socket.getaddrinfo(host, port, family, type, proto, flags)
1259
+
1260
+ @classmethod
1261
+ async def getnameinfo(
1262
+ cls, sockaddr: IPSockAddrType, flags: int = 0
1263
+ ) -> tuple[str, str]:
1264
+ return await trio.socket.getnameinfo(sockaddr, flags)
1265
+
1266
+ @classmethod
1267
+ async def wait_readable(cls, obj: HasFileno | int) -> None:
1268
+ try:
1269
+ await wait_readable(obj)
1270
+ except trio.ClosedResourceError as exc:
1271
+ raise ClosedResourceError().with_traceback(exc.__traceback__) from None
1272
+ except trio.BusyResourceError:
1273
+ raise BusyResourceError("reading from") from None
1274
+
1275
+ @classmethod
1276
+ async def wait_writable(cls, obj: HasFileno | int) -> None:
1277
+ try:
1278
+ await wait_writable(obj)
1279
+ except trio.ClosedResourceError as exc:
1280
+ raise ClosedResourceError().with_traceback(exc.__traceback__) from None
1281
+ except trio.BusyResourceError:
1282
+ raise BusyResourceError("writing to") from None
1283
+
1284
+ @classmethod
1285
+ def current_default_thread_limiter(cls) -> CapacityLimiter:
1286
+ try:
1287
+ return _capacity_limiter_wrapper.get()
1288
+ except LookupError:
1289
+ limiter = CapacityLimiter(
1290
+ original=trio.to_thread.current_default_thread_limiter()
1291
+ )
1292
+ _capacity_limiter_wrapper.set(limiter)
1293
+ return limiter
1294
+
1295
+ @classmethod
1296
+ def open_signal_receiver(
1297
+ cls, *signals: Signals
1298
+ ) -> AbstractContextManager[AsyncIterator[Signals]]:
1299
+ return _SignalReceiver(signals)
1300
+
1301
+ @classmethod
1302
+ def get_current_task(cls) -> TaskInfo:
1303
+ task = current_task()
1304
+ return TrioTaskInfo(task)
1305
+
1306
+ @classmethod
1307
+ def get_running_tasks(cls) -> Sequence[TaskInfo]:
1308
+ root_task = current_root_task()
1309
+ assert root_task
1310
+ task_infos = [TrioTaskInfo(root_task)]
1311
+ nurseries = root_task.child_nurseries
1312
+ while nurseries:
1313
+ new_nurseries: list[trio.Nursery] = []
1314
+ for nursery in nurseries:
1315
+ for task in nursery.child_tasks:
1316
+ task_infos.append(TrioTaskInfo(task))
1317
+ new_nurseries.extend(task.child_nurseries)
1318
+
1319
+ nurseries = new_nurseries
1320
+
1321
+ return task_infos
1322
+
1323
+ @classmethod
1324
+ async def wait_all_tasks_blocked(cls) -> None:
1325
+ from trio.testing import wait_all_tasks_blocked
1326
+
1327
+ await wait_all_tasks_blocked()
1328
+
1329
+ @classmethod
1330
+ def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:
1331
+ return TestRunner(**options)
1332
+
1333
+
1334
+ backend_class = TrioBackend
.venv/lib/python3.11/site-packages/anyio/_core/__init__.py ADDED
File without changes
.venv/lib/python3.11/site-packages/anyio/_core/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (184 Bytes). View file
 
.venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_asyncio_selector_thread.cpython-311.pyc ADDED
Binary file (9.09 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_exceptions.cpython-311.pyc ADDED
Binary file (7.19 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_resources.cpython-311.pyc ADDED
Binary file (1.11 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_signals.cpython-311.pyc ADDED
Binary file (1.35 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_sockets.cpython-311.pyc ADDED
Binary file (32.4 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_subprocesses.cpython-311.pyc ADDED
Binary file (9.57 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_synchronization.cpython-311.pyc ADDED
Binary file (35.7 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_tasks.cpython-311.pyc ADDED
Binary file (7.69 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_typedattr.cpython-311.pyc ADDED
Binary file (4.41 kB). View file
 
.venv/lib/python3.11/site-packages/anyio/_core/_asyncio_selector_thread.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import socket
5
+ import threading
6
+ from collections.abc import Callable
7
+ from selectors import EVENT_READ, EVENT_WRITE, DefaultSelector
8
+ from typing import TYPE_CHECKING, Any
9
+
10
+ if TYPE_CHECKING:
11
+ from _typeshed import FileDescriptorLike
12
+
13
+ _selector_lock = threading.Lock()
14
+ _selector: Selector | None = None
15
+
16
+
17
+ class Selector:
18
+ def __init__(self) -> None:
19
+ self._thread = threading.Thread(target=self.run, name="AnyIO socket selector")
20
+ self._selector = DefaultSelector()
21
+ self._send, self._receive = socket.socketpair()
22
+ self._send.setblocking(False)
23
+ self._receive.setblocking(False)
24
+ # This somewhat reduces the amount of memory wasted queueing up data
25
+ # for wakeups. With these settings, maximum number of 1-byte sends
26
+ # before getting BlockingIOError:
27
+ # Linux 4.8: 6
28
+ # macOS (darwin 15.5): 1
29
+ # Windows 10: 525347
30
+ # Windows you're weird. (And on Windows setting SNDBUF to 0 makes send
31
+ # blocking, even on non-blocking sockets, so don't do that.)
32
+ self._receive.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1)
33
+ self._send.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1)
34
+ # On Windows this is a TCP socket so this might matter. On other
35
+ # platforms this fails b/c AF_UNIX sockets aren't actually TCP.
36
+ try:
37
+ self._send.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
38
+ except OSError:
39
+ pass
40
+
41
+ self._selector.register(self._receive, EVENT_READ)
42
+ self._closed = False
43
+
44
+ def start(self) -> None:
45
+ self._thread.start()
46
+ threading._register_atexit(self._stop) # type: ignore[attr-defined]
47
+
48
+ def _stop(self) -> None:
49
+ global _selector
50
+ self._closed = True
51
+ self._notify_self()
52
+ self._send.close()
53
+ self._thread.join()
54
+ self._selector.unregister(self._receive)
55
+ self._receive.close()
56
+ self._selector.close()
57
+ _selector = None
58
+ assert (
59
+ not self._selector.get_map()
60
+ ), "selector still has registered file descriptors after shutdown"
61
+
62
+ def _notify_self(self) -> None:
63
+ try:
64
+ self._send.send(b"\x00")
65
+ except BlockingIOError:
66
+ pass
67
+
68
+ def add_reader(self, fd: FileDescriptorLike, callback: Callable[[], Any]) -> None:
69
+ loop = asyncio.get_running_loop()
70
+ try:
71
+ key = self._selector.get_key(fd)
72
+ except KeyError:
73
+ self._selector.register(fd, EVENT_READ, {EVENT_READ: (loop, callback)})
74
+ else:
75
+ if EVENT_READ in key.data:
76
+ raise ValueError(
77
+ "this file descriptor is already registered for reading"
78
+ )
79
+
80
+ key.data[EVENT_READ] = loop, callback
81
+ self._selector.modify(fd, key.events | EVENT_READ, key.data)
82
+
83
+ self._notify_self()
84
+
85
+ def add_writer(self, fd: FileDescriptorLike, callback: Callable[[], Any]) -> None:
86
+ loop = asyncio.get_running_loop()
87
+ try:
88
+ key = self._selector.get_key(fd)
89
+ except KeyError:
90
+ self._selector.register(fd, EVENT_WRITE, {EVENT_WRITE: (loop, callback)})
91
+ else:
92
+ if EVENT_WRITE in key.data:
93
+ raise ValueError(
94
+ "this file descriptor is already registered for writing"
95
+ )
96
+
97
+ key.data[EVENT_WRITE] = loop, callback
98
+ self._selector.modify(fd, key.events | EVENT_WRITE, key.data)
99
+
100
+ self._notify_self()
101
+
102
+ def remove_reader(self, fd: FileDescriptorLike) -> bool:
103
+ try:
104
+ key = self._selector.get_key(fd)
105
+ except KeyError:
106
+ return False
107
+
108
+ if new_events := key.events ^ EVENT_READ:
109
+ del key.data[EVENT_READ]
110
+ self._selector.modify(fd, new_events, key.data)
111
+ else:
112
+ self._selector.unregister(fd)
113
+
114
+ return True
115
+
116
+ def remove_writer(self, fd: FileDescriptorLike) -> bool:
117
+ try:
118
+ key = self._selector.get_key(fd)
119
+ except KeyError:
120
+ return False
121
+
122
+ if new_events := key.events ^ EVENT_WRITE:
123
+ del key.data[EVENT_WRITE]
124
+ self._selector.modify(fd, new_events, key.data)
125
+ else:
126
+ self._selector.unregister(fd)
127
+
128
+ return True
129
+
130
+ def run(self) -> None:
131
+ while not self._closed:
132
+ for key, events in self._selector.select():
133
+ if key.fileobj is self._receive:
134
+ try:
135
+ while self._receive.recv(4096):
136
+ pass
137
+ except BlockingIOError:
138
+ pass
139
+
140
+ continue
141
+
142
+ if events & EVENT_READ:
143
+ loop, callback = key.data[EVENT_READ]
144
+ self.remove_reader(key.fd)
145
+ try:
146
+ loop.call_soon_threadsafe(callback)
147
+ except RuntimeError:
148
+ pass # the loop was already closed
149
+
150
+ if events & EVENT_WRITE:
151
+ loop, callback = key.data[EVENT_WRITE]
152
+ self.remove_writer(key.fd)
153
+ try:
154
+ loop.call_soon_threadsafe(callback)
155
+ except RuntimeError:
156
+ pass # the loop was already closed
157
+
158
+
159
+ def get_selector() -> Selector:
160
+ global _selector
161
+
162
+ with _selector_lock:
163
+ if _selector is None:
164
+ _selector = Selector()
165
+ _selector.start()
166
+
167
+ return _selector
.venv/lib/python3.11/site-packages/anyio/_core/_eventloop.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import math
4
+ import sys
5
+ import threading
6
+ from collections.abc import Awaitable, Callable, Generator
7
+ from contextlib import contextmanager
8
+ from importlib import import_module
9
+ from typing import TYPE_CHECKING, Any, TypeVar
10
+
11
+ import sniffio
12
+
13
+ if sys.version_info >= (3, 11):
14
+ from typing import TypeVarTuple, Unpack
15
+ else:
16
+ from typing_extensions import TypeVarTuple, Unpack
17
+
18
+ if TYPE_CHECKING:
19
+ from ..abc import AsyncBackend
20
+
21
+ # This must be updated when new backends are introduced
22
+ BACKENDS = "asyncio", "trio"
23
+
24
+ T_Retval = TypeVar("T_Retval")
25
+ PosArgsT = TypeVarTuple("PosArgsT")
26
+
27
+ threadlocals = threading.local()
28
+ loaded_backends: dict[str, type[AsyncBackend]] = {}
29
+
30
+
31
+ def run(
32
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
33
+ *args: Unpack[PosArgsT],
34
+ backend: str = "asyncio",
35
+ backend_options: dict[str, Any] | None = None,
36
+ ) -> T_Retval:
37
+ """
38
+ Run the given coroutine function in an asynchronous event loop.
39
+
40
+ The current thread must not be already running an event loop.
41
+
42
+ :param func: a coroutine function
43
+ :param args: positional arguments to ``func``
44
+ :param backend: name of the asynchronous event loop implementation – currently
45
+ either ``asyncio`` or ``trio``
46
+ :param backend_options: keyword arguments to call the backend ``run()``
47
+ implementation with (documented :ref:`here <backend options>`)
48
+ :return: the return value of the coroutine function
49
+ :raises RuntimeError: if an asynchronous event loop is already running in this
50
+ thread
51
+ :raises LookupError: if the named backend is not found
52
+
53
+ """
54
+ try:
55
+ asynclib_name = sniffio.current_async_library()
56
+ except sniffio.AsyncLibraryNotFoundError:
57
+ pass
58
+ else:
59
+ raise RuntimeError(f"Already running {asynclib_name} in this thread")
60
+
61
+ try:
62
+ async_backend = get_async_backend(backend)
63
+ except ImportError as exc:
64
+ raise LookupError(f"No such backend: {backend}") from exc
65
+
66
+ token = None
67
+ if sniffio.current_async_library_cvar.get(None) is None:
68
+ # Since we're in control of the event loop, we can cache the name of the async
69
+ # library
70
+ token = sniffio.current_async_library_cvar.set(backend)
71
+
72
+ try:
73
+ backend_options = backend_options or {}
74
+ return async_backend.run(func, args, {}, backend_options)
75
+ finally:
76
+ if token:
77
+ sniffio.current_async_library_cvar.reset(token)
78
+
79
+
80
+ async def sleep(delay: float) -> None:
81
+ """
82
+ Pause the current task for the specified duration.
83
+
84
+ :param delay: the duration, in seconds
85
+
86
+ """
87
+ return await get_async_backend().sleep(delay)
88
+
89
+
90
+ async def sleep_forever() -> None:
91
+ """
92
+ Pause the current task until it's cancelled.
93
+
94
+ This is a shortcut for ``sleep(math.inf)``.
95
+
96
+ .. versionadded:: 3.1
97
+
98
+ """
99
+ await sleep(math.inf)
100
+
101
+
102
+ async def sleep_until(deadline: float) -> None:
103
+ """
104
+ Pause the current task until the given time.
105
+
106
+ :param deadline: the absolute time to wake up at (according to the internal
107
+ monotonic clock of the event loop)
108
+
109
+ .. versionadded:: 3.1
110
+
111
+ """
112
+ now = current_time()
113
+ await sleep(max(deadline - now, 0))
114
+
115
+
116
+ def current_time() -> float:
117
+ """
118
+ Return the current value of the event loop's internal clock.
119
+
120
+ :return: the clock value (seconds)
121
+
122
+ """
123
+ return get_async_backend().current_time()
124
+
125
+
126
+ def get_all_backends() -> tuple[str, ...]:
127
+ """Return a tuple of the names of all built-in backends."""
128
+ return BACKENDS
129
+
130
+
131
+ def get_cancelled_exc_class() -> type[BaseException]:
132
+ """Return the current async library's cancellation exception class."""
133
+ return get_async_backend().cancelled_exception_class()
134
+
135
+
136
+ #
137
+ # Private API
138
+ #
139
+
140
+
141
+ @contextmanager
142
+ def claim_worker_thread(
143
+ backend_class: type[AsyncBackend], token: object
144
+ ) -> Generator[Any, None, None]:
145
+ threadlocals.current_async_backend = backend_class
146
+ threadlocals.current_token = token
147
+ try:
148
+ yield
149
+ finally:
150
+ del threadlocals.current_async_backend
151
+ del threadlocals.current_token
152
+
153
+
154
+ def get_async_backend(asynclib_name: str | None = None) -> type[AsyncBackend]:
155
+ if asynclib_name is None:
156
+ asynclib_name = sniffio.current_async_library()
157
+
158
+ # We use our own dict instead of sys.modules to get the already imported back-end
159
+ # class because the appropriate modules in sys.modules could potentially be only
160
+ # partially initialized
161
+ try:
162
+ return loaded_backends[asynclib_name]
163
+ except KeyError:
164
+ module = import_module(f"anyio._backends._{asynclib_name}")
165
+ loaded_backends[asynclib_name] = module.backend_class
166
+ return module.backend_class
.venv/lib/python3.11/site-packages/anyio/_core/_exceptions.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ from collections.abc import Generator
5
+ from textwrap import dedent
6
+ from typing import Any
7
+
8
+ if sys.version_info < (3, 11):
9
+ from exceptiongroup import BaseExceptionGroup
10
+
11
+
12
+ class BrokenResourceError(Exception):
13
+ """
14
+ Raised when trying to use a resource that has been rendered unusable due to external
15
+ causes (e.g. a send stream whose peer has disconnected).
16
+ """
17
+
18
+
19
+ class BrokenWorkerProcess(Exception):
20
+ """
21
+ Raised by :meth:`~anyio.to_process.run_sync` if the worker process terminates abruptly or
22
+ otherwise misbehaves.
23
+ """
24
+
25
+
26
+ class BrokenWorkerIntepreter(Exception):
27
+ """
28
+ Raised by :meth:`~anyio.to_interpreter.run_sync` if an unexpected exception is
29
+ raised in the subinterpreter.
30
+ """
31
+
32
+ def __init__(self, excinfo: Any):
33
+ # This was adapted from concurrent.futures.interpreter.ExecutionFailed
34
+ msg = excinfo.formatted
35
+ if not msg:
36
+ if excinfo.type and excinfo.msg:
37
+ msg = f"{excinfo.type.__name__}: {excinfo.msg}"
38
+ else:
39
+ msg = excinfo.type.__name__ or excinfo.msg
40
+
41
+ super().__init__(msg)
42
+ self.excinfo = excinfo
43
+
44
+ def __str__(self) -> str:
45
+ try:
46
+ formatted = self.excinfo.errdisplay
47
+ except Exception:
48
+ return super().__str__()
49
+ else:
50
+ return dedent(
51
+ f"""
52
+ {super().__str__()}
53
+
54
+ Uncaught in the interpreter:
55
+
56
+ {formatted}
57
+ """.strip()
58
+ )
59
+
60
+
61
+ class BusyResourceError(Exception):
62
+ """
63
+ Raised when two tasks are trying to read from or write to the same resource
64
+ concurrently.
65
+ """
66
+
67
+ def __init__(self, action: str):
68
+ super().__init__(f"Another task is already {action} this resource")
69
+
70
+
71
+ class ClosedResourceError(Exception):
72
+ """Raised when trying to use a resource that has been closed."""
73
+
74
+
75
+ class DelimiterNotFound(Exception):
76
+ """
77
+ Raised during
78
+ :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
79
+ maximum number of bytes has been read without the delimiter being found.
80
+ """
81
+
82
+ def __init__(self, max_bytes: int) -> None:
83
+ super().__init__(
84
+ f"The delimiter was not found among the first {max_bytes} bytes"
85
+ )
86
+
87
+
88
+ class EndOfStream(Exception):
89
+ """
90
+ Raised when trying to read from a stream that has been closed from the other end.
91
+ """
92
+
93
+
94
+ class IncompleteRead(Exception):
95
+ """
96
+ Raised during
97
+ :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_exactly` or
98
+ :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
99
+ connection is closed before the requested amount of bytes has been read.
100
+ """
101
+
102
+ def __init__(self) -> None:
103
+ super().__init__(
104
+ "The stream was closed before the read operation could be completed"
105
+ )
106
+
107
+
108
+ class TypedAttributeLookupError(LookupError):
109
+ """
110
+ Raised by :meth:`~anyio.TypedAttributeProvider.extra` when the given typed attribute
111
+ is not found and no default value has been given.
112
+ """
113
+
114
+
115
+ class WouldBlock(Exception):
116
+ """Raised by ``X_nowait`` functions if ``X()`` would block."""
117
+
118
+
119
+ def iterate_exceptions(
120
+ exception: BaseException,
121
+ ) -> Generator[BaseException, None, None]:
122
+ if isinstance(exception, BaseExceptionGroup):
123
+ for exc in exception.exceptions:
124
+ yield from iterate_exceptions(exc)
125
+ else:
126
+ yield exception
.venv/lib/python3.11/site-packages/anyio/_core/_fileio.py ADDED
@@ -0,0 +1,729 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import pathlib
5
+ import sys
6
+ from collections.abc import (
7
+ AsyncIterator,
8
+ Callable,
9
+ Iterable,
10
+ Iterator,
11
+ Sequence,
12
+ )
13
+ from dataclasses import dataclass
14
+ from functools import partial
15
+ from os import PathLike
16
+ from typing import (
17
+ IO,
18
+ TYPE_CHECKING,
19
+ Any,
20
+ AnyStr,
21
+ Final,
22
+ Generic,
23
+ overload,
24
+ )
25
+
26
+ from .. import to_thread
27
+ from ..abc import AsyncResource
28
+
29
+ if TYPE_CHECKING:
30
+ from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer
31
+ else:
32
+ ReadableBuffer = OpenBinaryMode = OpenTextMode = WriteableBuffer = object
33
+
34
+
35
+ class AsyncFile(AsyncResource, Generic[AnyStr]):
36
+ """
37
+ An asynchronous file object.
38
+
39
+ This class wraps a standard file object and provides async friendly versions of the
40
+ following blocking methods (where available on the original file object):
41
+
42
+ * read
43
+ * read1
44
+ * readline
45
+ * readlines
46
+ * readinto
47
+ * readinto1
48
+ * write
49
+ * writelines
50
+ * truncate
51
+ * seek
52
+ * tell
53
+ * flush
54
+
55
+ All other methods are directly passed through.
56
+
57
+ This class supports the asynchronous context manager protocol which closes the
58
+ underlying file at the end of the context block.
59
+
60
+ This class also supports asynchronous iteration::
61
+
62
+ async with await open_file(...) as f:
63
+ async for line in f:
64
+ print(line)
65
+ """
66
+
67
+ def __init__(self, fp: IO[AnyStr]) -> None:
68
+ self._fp: Any = fp
69
+
70
+ def __getattr__(self, name: str) -> object:
71
+ return getattr(self._fp, name)
72
+
73
+ @property
74
+ def wrapped(self) -> IO[AnyStr]:
75
+ """The wrapped file object."""
76
+ return self._fp
77
+
78
+ async def __aiter__(self) -> AsyncIterator[AnyStr]:
79
+ while True:
80
+ line = await self.readline()
81
+ if line:
82
+ yield line
83
+ else:
84
+ break
85
+
86
+ async def aclose(self) -> None:
87
+ return await to_thread.run_sync(self._fp.close)
88
+
89
+ async def read(self, size: int = -1) -> AnyStr:
90
+ return await to_thread.run_sync(self._fp.read, size)
91
+
92
+ async def read1(self: AsyncFile[bytes], size: int = -1) -> bytes:
93
+ return await to_thread.run_sync(self._fp.read1, size)
94
+
95
+ async def readline(self) -> AnyStr:
96
+ return await to_thread.run_sync(self._fp.readline)
97
+
98
+ async def readlines(self) -> list[AnyStr]:
99
+ return await to_thread.run_sync(self._fp.readlines)
100
+
101
+ async def readinto(self: AsyncFile[bytes], b: WriteableBuffer) -> int:
102
+ return await to_thread.run_sync(self._fp.readinto, b)
103
+
104
+ async def readinto1(self: AsyncFile[bytes], b: WriteableBuffer) -> int:
105
+ return await to_thread.run_sync(self._fp.readinto1, b)
106
+
107
+ @overload
108
+ async def write(self: AsyncFile[bytes], b: ReadableBuffer) -> int: ...
109
+
110
+ @overload
111
+ async def write(self: AsyncFile[str], b: str) -> int: ...
112
+
113
+ async def write(self, b: ReadableBuffer | str) -> int:
114
+ return await to_thread.run_sync(self._fp.write, b)
115
+
116
+ @overload
117
+ async def writelines(
118
+ self: AsyncFile[bytes], lines: Iterable[ReadableBuffer]
119
+ ) -> None: ...
120
+
121
+ @overload
122
+ async def writelines(self: AsyncFile[str], lines: Iterable[str]) -> None: ...
123
+
124
+ async def writelines(self, lines: Iterable[ReadableBuffer] | Iterable[str]) -> None:
125
+ return await to_thread.run_sync(self._fp.writelines, lines)
126
+
127
+ async def truncate(self, size: int | None = None) -> int:
128
+ return await to_thread.run_sync(self._fp.truncate, size)
129
+
130
+ async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int:
131
+ return await to_thread.run_sync(self._fp.seek, offset, whence)
132
+
133
+ async def tell(self) -> int:
134
+ return await to_thread.run_sync(self._fp.tell)
135
+
136
+ async def flush(self) -> None:
137
+ return await to_thread.run_sync(self._fp.flush)
138
+
139
+
140
+ @overload
141
+ async def open_file(
142
+ file: str | PathLike[str] | int,
143
+ mode: OpenBinaryMode,
144
+ buffering: int = ...,
145
+ encoding: str | None = ...,
146
+ errors: str | None = ...,
147
+ newline: str | None = ...,
148
+ closefd: bool = ...,
149
+ opener: Callable[[str, int], int] | None = ...,
150
+ ) -> AsyncFile[bytes]: ...
151
+
152
+
153
+ @overload
154
+ async def open_file(
155
+ file: str | PathLike[str] | int,
156
+ mode: OpenTextMode = ...,
157
+ buffering: int = ...,
158
+ encoding: str | None = ...,
159
+ errors: str | None = ...,
160
+ newline: str | None = ...,
161
+ closefd: bool = ...,
162
+ opener: Callable[[str, int], int] | None = ...,
163
+ ) -> AsyncFile[str]: ...
164
+
165
+
166
+ async def open_file(
167
+ file: str | PathLike[str] | int,
168
+ mode: str = "r",
169
+ buffering: int = -1,
170
+ encoding: str | None = None,
171
+ errors: str | None = None,
172
+ newline: str | None = None,
173
+ closefd: bool = True,
174
+ opener: Callable[[str, int], int] | None = None,
175
+ ) -> AsyncFile[Any]:
176
+ """
177
+ Open a file asynchronously.
178
+
179
+ The arguments are exactly the same as for the builtin :func:`open`.
180
+
181
+ :return: an asynchronous file object
182
+
183
+ """
184
+ fp = await to_thread.run_sync(
185
+ open, file, mode, buffering, encoding, errors, newline, closefd, opener
186
+ )
187
+ return AsyncFile(fp)
188
+
189
+
190
+ def wrap_file(file: IO[AnyStr]) -> AsyncFile[AnyStr]:
191
+ """
192
+ Wrap an existing file as an asynchronous file.
193
+
194
+ :param file: an existing file-like object
195
+ :return: an asynchronous file object
196
+
197
+ """
198
+ return AsyncFile(file)
199
+
200
+
201
+ @dataclass(eq=False)
202
+ class _PathIterator(AsyncIterator["Path"]):
203
+ iterator: Iterator[PathLike[str]]
204
+
205
+ async def __anext__(self) -> Path:
206
+ nextval = await to_thread.run_sync(
207
+ next, self.iterator, None, abandon_on_cancel=True
208
+ )
209
+ if nextval is None:
210
+ raise StopAsyncIteration from None
211
+
212
+ return Path(nextval)
213
+
214
+
215
+ class Path:
216
+ """
217
+ An asynchronous version of :class:`pathlib.Path`.
218
+
219
+ This class cannot be substituted for :class:`pathlib.Path` or
220
+ :class:`pathlib.PurePath`, but it is compatible with the :class:`os.PathLike`
221
+ interface.
222
+
223
+ It implements the Python 3.10 version of :class:`pathlib.Path` interface, except for
224
+ the deprecated :meth:`~pathlib.Path.link_to` method.
225
+
226
+ Some methods may be unavailable or have limited functionality, based on the Python
227
+ version:
228
+
229
+ * :meth:`~pathlib.Path.copy` (available on Python 3.14 or later)
230
+ * :meth:`~pathlib.Path.copy_into` (available on Python 3.14 or later)
231
+ * :meth:`~pathlib.Path.from_uri` (available on Python 3.13 or later)
232
+ * :meth:`~pathlib.Path.full_match` (available on Python 3.13 or later)
233
+ * :meth:`~pathlib.Path.is_junction` (available on Python 3.12 or later)
234
+ * :meth:`~pathlib.Path.match` (the ``case_sensitive`` paramater is only available on
235
+ Python 3.13 or later)
236
+ * :meth:`~pathlib.Path.move` (available on Python 3.14 or later)
237
+ * :meth:`~pathlib.Path.move_into` (available on Python 3.14 or later)
238
+ * :meth:`~pathlib.Path.relative_to` (the ``walk_up`` parameter is only available on
239
+ Python 3.12 or later)
240
+ * :meth:`~pathlib.Path.walk` (available on Python 3.12 or later)
241
+
242
+ Any methods that do disk I/O need to be awaited on. These methods are:
243
+
244
+ * :meth:`~pathlib.Path.absolute`
245
+ * :meth:`~pathlib.Path.chmod`
246
+ * :meth:`~pathlib.Path.cwd`
247
+ * :meth:`~pathlib.Path.exists`
248
+ * :meth:`~pathlib.Path.expanduser`
249
+ * :meth:`~pathlib.Path.group`
250
+ * :meth:`~pathlib.Path.hardlink_to`
251
+ * :meth:`~pathlib.Path.home`
252
+ * :meth:`~pathlib.Path.is_block_device`
253
+ * :meth:`~pathlib.Path.is_char_device`
254
+ * :meth:`~pathlib.Path.is_dir`
255
+ * :meth:`~pathlib.Path.is_fifo`
256
+ * :meth:`~pathlib.Path.is_file`
257
+ * :meth:`~pathlib.Path.is_junction`
258
+ * :meth:`~pathlib.Path.is_mount`
259
+ * :meth:`~pathlib.Path.is_socket`
260
+ * :meth:`~pathlib.Path.is_symlink`
261
+ * :meth:`~pathlib.Path.lchmod`
262
+ * :meth:`~pathlib.Path.lstat`
263
+ * :meth:`~pathlib.Path.mkdir`
264
+ * :meth:`~pathlib.Path.open`
265
+ * :meth:`~pathlib.Path.owner`
266
+ * :meth:`~pathlib.Path.read_bytes`
267
+ * :meth:`~pathlib.Path.read_text`
268
+ * :meth:`~pathlib.Path.readlink`
269
+ * :meth:`~pathlib.Path.rename`
270
+ * :meth:`~pathlib.Path.replace`
271
+ * :meth:`~pathlib.Path.resolve`
272
+ * :meth:`~pathlib.Path.rmdir`
273
+ * :meth:`~pathlib.Path.samefile`
274
+ * :meth:`~pathlib.Path.stat`
275
+ * :meth:`~pathlib.Path.symlink_to`
276
+ * :meth:`~pathlib.Path.touch`
277
+ * :meth:`~pathlib.Path.unlink`
278
+ * :meth:`~pathlib.Path.walk`
279
+ * :meth:`~pathlib.Path.write_bytes`
280
+ * :meth:`~pathlib.Path.write_text`
281
+
282
+ Additionally, the following methods return an async iterator yielding
283
+ :class:`~.Path` objects:
284
+
285
+ * :meth:`~pathlib.Path.glob`
286
+ * :meth:`~pathlib.Path.iterdir`
287
+ * :meth:`~pathlib.Path.rglob`
288
+ """
289
+
290
+ __slots__ = "_path", "__weakref__"
291
+
292
+ __weakref__: Any
293
+
294
+ def __init__(self, *args: str | PathLike[str]) -> None:
295
+ self._path: Final[pathlib.Path] = pathlib.Path(*args)
296
+
297
+ def __fspath__(self) -> str:
298
+ return self._path.__fspath__()
299
+
300
+ def __str__(self) -> str:
301
+ return self._path.__str__()
302
+
303
+ def __repr__(self) -> str:
304
+ return f"{self.__class__.__name__}({self.as_posix()!r})"
305
+
306
+ def __bytes__(self) -> bytes:
307
+ return self._path.__bytes__()
308
+
309
+ def __hash__(self) -> int:
310
+ return self._path.__hash__()
311
+
312
+ def __eq__(self, other: object) -> bool:
313
+ target = other._path if isinstance(other, Path) else other
314
+ return self._path.__eq__(target)
315
+
316
+ def __lt__(self, other: pathlib.PurePath | Path) -> bool:
317
+ target = other._path if isinstance(other, Path) else other
318
+ return self._path.__lt__(target)
319
+
320
+ def __le__(self, other: pathlib.PurePath | Path) -> bool:
321
+ target = other._path if isinstance(other, Path) else other
322
+ return self._path.__le__(target)
323
+
324
+ def __gt__(self, other: pathlib.PurePath | Path) -> bool:
325
+ target = other._path if isinstance(other, Path) else other
326
+ return self._path.__gt__(target)
327
+
328
+ def __ge__(self, other: pathlib.PurePath | Path) -> bool:
329
+ target = other._path if isinstance(other, Path) else other
330
+ return self._path.__ge__(target)
331
+
332
+ def __truediv__(self, other: str | PathLike[str]) -> Path:
333
+ return Path(self._path / other)
334
+
335
+ def __rtruediv__(self, other: str | PathLike[str]) -> Path:
336
+ return Path(other) / self
337
+
338
+ @property
339
+ def parts(self) -> tuple[str, ...]:
340
+ return self._path.parts
341
+
342
+ @property
343
+ def drive(self) -> str:
344
+ return self._path.drive
345
+
346
+ @property
347
+ def root(self) -> str:
348
+ return self._path.root
349
+
350
+ @property
351
+ def anchor(self) -> str:
352
+ return self._path.anchor
353
+
354
+ @property
355
+ def parents(self) -> Sequence[Path]:
356
+ return tuple(Path(p) for p in self._path.parents)
357
+
358
+ @property
359
+ def parent(self) -> Path:
360
+ return Path(self._path.parent)
361
+
362
+ @property
363
+ def name(self) -> str:
364
+ return self._path.name
365
+
366
+ @property
367
+ def suffix(self) -> str:
368
+ return self._path.suffix
369
+
370
+ @property
371
+ def suffixes(self) -> list[str]:
372
+ return self._path.suffixes
373
+
374
+ @property
375
+ def stem(self) -> str:
376
+ return self._path.stem
377
+
378
+ async def absolute(self) -> Path:
379
+ path = await to_thread.run_sync(self._path.absolute)
380
+ return Path(path)
381
+
382
+ def as_posix(self) -> str:
383
+ return self._path.as_posix()
384
+
385
+ def as_uri(self) -> str:
386
+ return self._path.as_uri()
387
+
388
+ if sys.version_info >= (3, 13):
389
+ parser = pathlib.Path.parser
390
+
391
+ @classmethod
392
+ def from_uri(cls, uri: str) -> Path:
393
+ return Path(pathlib.Path.from_uri(uri))
394
+
395
+ def full_match(
396
+ self, path_pattern: str, *, case_sensitive: bool | None = None
397
+ ) -> bool:
398
+ return self._path.full_match(path_pattern, case_sensitive=case_sensitive)
399
+
400
+ def match(
401
+ self, path_pattern: str, *, case_sensitive: bool | None = None
402
+ ) -> bool:
403
+ return self._path.match(path_pattern, case_sensitive=case_sensitive)
404
+ else:
405
+
406
+ def match(self, path_pattern: str) -> bool:
407
+ return self._path.match(path_pattern)
408
+
409
+ if sys.version_info >= (3, 14):
410
+
411
+ async def copy(
412
+ self,
413
+ target: str | os.PathLike[str],
414
+ *,
415
+ follow_symlinks: bool = True,
416
+ dirs_exist_ok: bool = False,
417
+ preserve_metadata: bool = False,
418
+ ) -> Path:
419
+ func = partial(
420
+ self._path.copy,
421
+ follow_symlinks=follow_symlinks,
422
+ dirs_exist_ok=dirs_exist_ok,
423
+ preserve_metadata=preserve_metadata,
424
+ )
425
+ return Path(await to_thread.run_sync(func, target))
426
+
427
+ async def copy_into(
428
+ self,
429
+ target_dir: str | os.PathLike[str],
430
+ *,
431
+ follow_symlinks: bool = True,
432
+ dirs_exist_ok: bool = False,
433
+ preserve_metadata: bool = False,
434
+ ) -> Path:
435
+ func = partial(
436
+ self._path.copy_into,
437
+ follow_symlinks=follow_symlinks,
438
+ dirs_exist_ok=dirs_exist_ok,
439
+ preserve_metadata=preserve_metadata,
440
+ )
441
+ return Path(await to_thread.run_sync(func, target_dir))
442
+
443
+ async def move(self, target: str | os.PathLike[str]) -> Path:
444
+ # Upstream does not handle anyio.Path properly as a PathLike
445
+ target = pathlib.Path(target)
446
+ return Path(await to_thread.run_sync(self._path.move, target))
447
+
448
+ async def move_into(
449
+ self,
450
+ target_dir: str | os.PathLike[str],
451
+ ) -> Path:
452
+ return Path(await to_thread.run_sync(self._path.move_into, target_dir))
453
+
454
+ def is_relative_to(self, other: str | PathLike[str]) -> bool:
455
+ try:
456
+ self.relative_to(other)
457
+ return True
458
+ except ValueError:
459
+ return False
460
+
461
+ async def chmod(self, mode: int, *, follow_symlinks: bool = True) -> None:
462
+ func = partial(os.chmod, follow_symlinks=follow_symlinks)
463
+ return await to_thread.run_sync(func, self._path, mode)
464
+
465
+ @classmethod
466
+ async def cwd(cls) -> Path:
467
+ path = await to_thread.run_sync(pathlib.Path.cwd)
468
+ return cls(path)
469
+
470
+ async def exists(self) -> bool:
471
+ return await to_thread.run_sync(self._path.exists, abandon_on_cancel=True)
472
+
473
+ async def expanduser(self) -> Path:
474
+ return Path(
475
+ await to_thread.run_sync(self._path.expanduser, abandon_on_cancel=True)
476
+ )
477
+
478
+ def glob(self, pattern: str) -> AsyncIterator[Path]:
479
+ gen = self._path.glob(pattern)
480
+ return _PathIterator(gen)
481
+
482
+ async def group(self) -> str:
483
+ return await to_thread.run_sync(self._path.group, abandon_on_cancel=True)
484
+
485
+ async def hardlink_to(
486
+ self, target: str | bytes | PathLike[str] | PathLike[bytes]
487
+ ) -> None:
488
+ if isinstance(target, Path):
489
+ target = target._path
490
+
491
+ await to_thread.run_sync(os.link, target, self)
492
+
493
+ @classmethod
494
+ async def home(cls) -> Path:
495
+ home_path = await to_thread.run_sync(pathlib.Path.home)
496
+ return cls(home_path)
497
+
498
+ def is_absolute(self) -> bool:
499
+ return self._path.is_absolute()
500
+
501
+ async def is_block_device(self) -> bool:
502
+ return await to_thread.run_sync(
503
+ self._path.is_block_device, abandon_on_cancel=True
504
+ )
505
+
506
+ async def is_char_device(self) -> bool:
507
+ return await to_thread.run_sync(
508
+ self._path.is_char_device, abandon_on_cancel=True
509
+ )
510
+
511
+ async def is_dir(self) -> bool:
512
+ return await to_thread.run_sync(self._path.is_dir, abandon_on_cancel=True)
513
+
514
+ async def is_fifo(self) -> bool:
515
+ return await to_thread.run_sync(self._path.is_fifo, abandon_on_cancel=True)
516
+
517
+ async def is_file(self) -> bool:
518
+ return await to_thread.run_sync(self._path.is_file, abandon_on_cancel=True)
519
+
520
+ if sys.version_info >= (3, 12):
521
+
522
+ async def is_junction(self) -> bool:
523
+ return await to_thread.run_sync(self._path.is_junction)
524
+
525
+ async def is_mount(self) -> bool:
526
+ return await to_thread.run_sync(
527
+ os.path.ismount, self._path, abandon_on_cancel=True
528
+ )
529
+
530
+ def is_reserved(self) -> bool:
531
+ return self._path.is_reserved()
532
+
533
+ async def is_socket(self) -> bool:
534
+ return await to_thread.run_sync(self._path.is_socket, abandon_on_cancel=True)
535
+
536
+ async def is_symlink(self) -> bool:
537
+ return await to_thread.run_sync(self._path.is_symlink, abandon_on_cancel=True)
538
+
539
+ def iterdir(self) -> AsyncIterator[Path]:
540
+ gen = self._path.iterdir()
541
+ return _PathIterator(gen)
542
+
543
+ def joinpath(self, *args: str | PathLike[str]) -> Path:
544
+ return Path(self._path.joinpath(*args))
545
+
546
+ async def lchmod(self, mode: int) -> None:
547
+ await to_thread.run_sync(self._path.lchmod, mode)
548
+
549
+ async def lstat(self) -> os.stat_result:
550
+ return await to_thread.run_sync(self._path.lstat, abandon_on_cancel=True)
551
+
552
+ async def mkdir(
553
+ self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False
554
+ ) -> None:
555
+ await to_thread.run_sync(self._path.mkdir, mode, parents, exist_ok)
556
+
557
+ @overload
558
+ async def open(
559
+ self,
560
+ mode: OpenBinaryMode,
561
+ buffering: int = ...,
562
+ encoding: str | None = ...,
563
+ errors: str | None = ...,
564
+ newline: str | None = ...,
565
+ ) -> AsyncFile[bytes]: ...
566
+
567
+ @overload
568
+ async def open(
569
+ self,
570
+ mode: OpenTextMode = ...,
571
+ buffering: int = ...,
572
+ encoding: str | None = ...,
573
+ errors: str | None = ...,
574
+ newline: str | None = ...,
575
+ ) -> AsyncFile[str]: ...
576
+
577
+ async def open(
578
+ self,
579
+ mode: str = "r",
580
+ buffering: int = -1,
581
+ encoding: str | None = None,
582
+ errors: str | None = None,
583
+ newline: str | None = None,
584
+ ) -> AsyncFile[Any]:
585
+ fp = await to_thread.run_sync(
586
+ self._path.open, mode, buffering, encoding, errors, newline
587
+ )
588
+ return AsyncFile(fp)
589
+
590
+ async def owner(self) -> str:
591
+ return await to_thread.run_sync(self._path.owner, abandon_on_cancel=True)
592
+
593
+ async def read_bytes(self) -> bytes:
594
+ return await to_thread.run_sync(self._path.read_bytes)
595
+
596
+ async def read_text(
597
+ self, encoding: str | None = None, errors: str | None = None
598
+ ) -> str:
599
+ return await to_thread.run_sync(self._path.read_text, encoding, errors)
600
+
601
+ if sys.version_info >= (3, 12):
602
+
603
+ def relative_to(
604
+ self, *other: str | PathLike[str], walk_up: bool = False
605
+ ) -> Path:
606
+ return Path(self._path.relative_to(*other, walk_up=walk_up))
607
+
608
+ else:
609
+
610
+ def relative_to(self, *other: str | PathLike[str]) -> Path:
611
+ return Path(self._path.relative_to(*other))
612
+
613
+ async def readlink(self) -> Path:
614
+ target = await to_thread.run_sync(os.readlink, self._path)
615
+ return Path(target)
616
+
617
+ async def rename(self, target: str | pathlib.PurePath | Path) -> Path:
618
+ if isinstance(target, Path):
619
+ target = target._path
620
+
621
+ await to_thread.run_sync(self._path.rename, target)
622
+ return Path(target)
623
+
624
+ async def replace(self, target: str | pathlib.PurePath | Path) -> Path:
625
+ if isinstance(target, Path):
626
+ target = target._path
627
+
628
+ await to_thread.run_sync(self._path.replace, target)
629
+ return Path(target)
630
+
631
+ async def resolve(self, strict: bool = False) -> Path:
632
+ func = partial(self._path.resolve, strict=strict)
633
+ return Path(await to_thread.run_sync(func, abandon_on_cancel=True))
634
+
635
+ def rglob(self, pattern: str) -> AsyncIterator[Path]:
636
+ gen = self._path.rglob(pattern)
637
+ return _PathIterator(gen)
638
+
639
+ async def rmdir(self) -> None:
640
+ await to_thread.run_sync(self._path.rmdir)
641
+
642
+ async def samefile(self, other_path: str | PathLike[str]) -> bool:
643
+ if isinstance(other_path, Path):
644
+ other_path = other_path._path
645
+
646
+ return await to_thread.run_sync(
647
+ self._path.samefile, other_path, abandon_on_cancel=True
648
+ )
649
+
650
+ async def stat(self, *, follow_symlinks: bool = True) -> os.stat_result:
651
+ func = partial(os.stat, follow_symlinks=follow_symlinks)
652
+ return await to_thread.run_sync(func, self._path, abandon_on_cancel=True)
653
+
654
+ async def symlink_to(
655
+ self,
656
+ target: str | bytes | PathLike[str] | PathLike[bytes],
657
+ target_is_directory: bool = False,
658
+ ) -> None:
659
+ if isinstance(target, Path):
660
+ target = target._path
661
+
662
+ await to_thread.run_sync(self._path.symlink_to, target, target_is_directory)
663
+
664
+ async def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None:
665
+ await to_thread.run_sync(self._path.touch, mode, exist_ok)
666
+
667
+ async def unlink(self, missing_ok: bool = False) -> None:
668
+ try:
669
+ await to_thread.run_sync(self._path.unlink)
670
+ except FileNotFoundError:
671
+ if not missing_ok:
672
+ raise
673
+
674
+ if sys.version_info >= (3, 12):
675
+
676
+ async def walk(
677
+ self,
678
+ top_down: bool = True,
679
+ on_error: Callable[[OSError], object] | None = None,
680
+ follow_symlinks: bool = False,
681
+ ) -> AsyncIterator[tuple[Path, list[str], list[str]]]:
682
+ def get_next_value() -> tuple[pathlib.Path, list[str], list[str]] | None:
683
+ try:
684
+ return next(gen)
685
+ except StopIteration:
686
+ return None
687
+
688
+ gen = self._path.walk(top_down, on_error, follow_symlinks)
689
+ while True:
690
+ value = await to_thread.run_sync(get_next_value)
691
+ if value is None:
692
+ return
693
+
694
+ root, dirs, paths = value
695
+ yield Path(root), dirs, paths
696
+
697
+ def with_name(self, name: str) -> Path:
698
+ return Path(self._path.with_name(name))
699
+
700
+ def with_stem(self, stem: str) -> Path:
701
+ return Path(self._path.with_name(stem + self._path.suffix))
702
+
703
+ def with_suffix(self, suffix: str) -> Path:
704
+ return Path(self._path.with_suffix(suffix))
705
+
706
+ def with_segments(self, *pathsegments: str | PathLike[str]) -> Path:
707
+ return Path(*pathsegments)
708
+
709
+ async def write_bytes(self, data: bytes) -> int:
710
+ return await to_thread.run_sync(self._path.write_bytes, data)
711
+
712
+ async def write_text(
713
+ self,
714
+ data: str,
715
+ encoding: str | None = None,
716
+ errors: str | None = None,
717
+ newline: str | None = None,
718
+ ) -> int:
719
+ # Path.write_text() does not support the "newline" parameter before Python 3.10
720
+ def sync_write_text() -> int:
721
+ with self._path.open(
722
+ "w", encoding=encoding, errors=errors, newline=newline
723
+ ) as fp:
724
+ return fp.write(data)
725
+
726
+ return await to_thread.run_sync(sync_write_text)
727
+
728
+
729
+ PathLike.register(Path)
.venv/lib/python3.11/site-packages/anyio/_core/_resources.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from ..abc import AsyncResource
4
+ from ._tasks import CancelScope
5
+
6
+
7
+ async def aclose_forcefully(resource: AsyncResource) -> None:
8
+ """
9
+ Close an asynchronous resource in a cancelled scope.
10
+
11
+ Doing this closes the resource without waiting on anything.
12
+
13
+ :param resource: the resource to close
14
+
15
+ """
16
+ with CancelScope() as scope:
17
+ scope.cancel()
18
+ await resource.aclose()
.venv/lib/python3.11/site-packages/anyio/_core/_signals.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import AsyncIterator
4
+ from contextlib import AbstractContextManager
5
+ from signal import Signals
6
+
7
+ from ._eventloop import get_async_backend
8
+
9
+
10
+ def open_signal_receiver(
11
+ *signals: Signals,
12
+ ) -> AbstractContextManager[AsyncIterator[Signals]]:
13
+ """
14
+ Start receiving operating system signals.
15
+
16
+ :param signals: signals to receive (e.g. ``signal.SIGINT``)
17
+ :return: an asynchronous context manager for an asynchronous iterator which yields
18
+ signal numbers
19
+
20
+ .. warning:: Windows does not support signals natively so it is best to avoid
21
+ relying on this in cross-platform applications.
22
+
23
+ .. warning:: On asyncio, this permanently replaces any previous signal handler for
24
+ the given signals, as set via :meth:`~asyncio.loop.add_signal_handler`.
25
+
26
+ """
27
+ return get_async_backend().open_signal_receiver(*signals)
.venv/lib/python3.11/site-packages/anyio/_core/_sockets.py ADDED
@@ -0,0 +1,787 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import errno
4
+ import os
5
+ import socket
6
+ import ssl
7
+ import stat
8
+ import sys
9
+ from collections.abc import Awaitable
10
+ from ipaddress import IPv6Address, ip_address
11
+ from os import PathLike, chmod
12
+ from socket import AddressFamily, SocketKind
13
+ from typing import TYPE_CHECKING, Any, Literal, cast, overload
14
+
15
+ from .. import to_thread
16
+ from ..abc import (
17
+ ConnectedUDPSocket,
18
+ ConnectedUNIXDatagramSocket,
19
+ IPAddressType,
20
+ IPSockAddrType,
21
+ SocketListener,
22
+ SocketStream,
23
+ UDPSocket,
24
+ UNIXDatagramSocket,
25
+ UNIXSocketStream,
26
+ )
27
+ from ..streams.stapled import MultiListener
28
+ from ..streams.tls import TLSStream
29
+ from ._eventloop import get_async_backend
30
+ from ._resources import aclose_forcefully
31
+ from ._synchronization import Event
32
+ from ._tasks import create_task_group, move_on_after
33
+
34
+ if TYPE_CHECKING:
35
+ from _typeshed import FileDescriptorLike
36
+ else:
37
+ FileDescriptorLike = object
38
+
39
+ if sys.version_info < (3, 11):
40
+ from exceptiongroup import ExceptionGroup
41
+
42
+ if sys.version_info < (3, 13):
43
+ from typing_extensions import deprecated
44
+ else:
45
+ from warnings import deprecated
46
+
47
+ IPPROTO_IPV6 = getattr(socket, "IPPROTO_IPV6", 41) # https://bugs.python.org/issue29515
48
+
49
+ AnyIPAddressFamily = Literal[
50
+ AddressFamily.AF_UNSPEC, AddressFamily.AF_INET, AddressFamily.AF_INET6
51
+ ]
52
+ IPAddressFamily = Literal[AddressFamily.AF_INET, AddressFamily.AF_INET6]
53
+
54
+
55
+ # tls_hostname given
56
+ @overload
57
+ async def connect_tcp(
58
+ remote_host: IPAddressType,
59
+ remote_port: int,
60
+ *,
61
+ local_host: IPAddressType | None = ...,
62
+ ssl_context: ssl.SSLContext | None = ...,
63
+ tls_standard_compatible: bool = ...,
64
+ tls_hostname: str,
65
+ happy_eyeballs_delay: float = ...,
66
+ ) -> TLSStream: ...
67
+
68
+
69
+ # ssl_context given
70
+ @overload
71
+ async def connect_tcp(
72
+ remote_host: IPAddressType,
73
+ remote_port: int,
74
+ *,
75
+ local_host: IPAddressType | None = ...,
76
+ ssl_context: ssl.SSLContext,
77
+ tls_standard_compatible: bool = ...,
78
+ tls_hostname: str | None = ...,
79
+ happy_eyeballs_delay: float = ...,
80
+ ) -> TLSStream: ...
81
+
82
+
83
+ # tls=True
84
+ @overload
85
+ async def connect_tcp(
86
+ remote_host: IPAddressType,
87
+ remote_port: int,
88
+ *,
89
+ local_host: IPAddressType | None = ...,
90
+ tls: Literal[True],
91
+ ssl_context: ssl.SSLContext | None = ...,
92
+ tls_standard_compatible: bool = ...,
93
+ tls_hostname: str | None = ...,
94
+ happy_eyeballs_delay: float = ...,
95
+ ) -> TLSStream: ...
96
+
97
+
98
+ # tls=False
99
+ @overload
100
+ async def connect_tcp(
101
+ remote_host: IPAddressType,
102
+ remote_port: int,
103
+ *,
104
+ local_host: IPAddressType | None = ...,
105
+ tls: Literal[False],
106
+ ssl_context: ssl.SSLContext | None = ...,
107
+ tls_standard_compatible: bool = ...,
108
+ tls_hostname: str | None = ...,
109
+ happy_eyeballs_delay: float = ...,
110
+ ) -> SocketStream: ...
111
+
112
+
113
+ # No TLS arguments
114
+ @overload
115
+ async def connect_tcp(
116
+ remote_host: IPAddressType,
117
+ remote_port: int,
118
+ *,
119
+ local_host: IPAddressType | None = ...,
120
+ happy_eyeballs_delay: float = ...,
121
+ ) -> SocketStream: ...
122
+
123
+
124
+ async def connect_tcp(
125
+ remote_host: IPAddressType,
126
+ remote_port: int,
127
+ *,
128
+ local_host: IPAddressType | None = None,
129
+ tls: bool = False,
130
+ ssl_context: ssl.SSLContext | None = None,
131
+ tls_standard_compatible: bool = True,
132
+ tls_hostname: str | None = None,
133
+ happy_eyeballs_delay: float = 0.25,
134
+ ) -> SocketStream | TLSStream:
135
+ """
136
+ Connect to a host using the TCP protocol.
137
+
138
+ This function implements the stateless version of the Happy Eyeballs algorithm (RFC
139
+ 6555). If ``remote_host`` is a host name that resolves to multiple IP addresses,
140
+ each one is tried until one connection attempt succeeds. If the first attempt does
141
+ not connected within 250 milliseconds, a second attempt is started using the next
142
+ address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if
143
+ available) is tried first.
144
+
145
+ When the connection has been established, a TLS handshake will be done if either
146
+ ``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``.
147
+
148
+ :param remote_host: the IP address or host name to connect to
149
+ :param remote_port: port on the target host to connect to
150
+ :param local_host: the interface address or name to bind the socket to before
151
+ connecting
152
+ :param tls: ``True`` to do a TLS handshake with the connected stream and return a
153
+ :class:`~anyio.streams.tls.TLSStream` instead
154
+ :param ssl_context: the SSL context object to use (if omitted, a default context is
155
+ created)
156
+ :param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake
157
+ before closing the stream and requires that the server does this as well.
158
+ Otherwise, :exc:`~ssl.SSLEOFError` may be raised during reads from the stream.
159
+ Some protocols, such as HTTP, require this option to be ``False``.
160
+ See :meth:`~ssl.SSLContext.wrap_socket` for details.
161
+ :param tls_hostname: host name to check the server certificate against (defaults to
162
+ the value of ``remote_host``)
163
+ :param happy_eyeballs_delay: delay (in seconds) before starting the next connection
164
+ attempt
165
+ :return: a socket stream object if no TLS handshake was done, otherwise a TLS stream
166
+ :raises OSError: if the connection attempt fails
167
+
168
+ """
169
+ # Placed here due to https://github.com/python/mypy/issues/7057
170
+ connected_stream: SocketStream | None = None
171
+
172
+ async def try_connect(remote_host: str, event: Event) -> None:
173
+ nonlocal connected_stream
174
+ try:
175
+ stream = await asynclib.connect_tcp(remote_host, remote_port, local_address)
176
+ except OSError as exc:
177
+ oserrors.append(exc)
178
+ return
179
+ else:
180
+ if connected_stream is None:
181
+ connected_stream = stream
182
+ tg.cancel_scope.cancel()
183
+ else:
184
+ await stream.aclose()
185
+ finally:
186
+ event.set()
187
+
188
+ asynclib = get_async_backend()
189
+ local_address: IPSockAddrType | None = None
190
+ family = socket.AF_UNSPEC
191
+ if local_host:
192
+ gai_res = await getaddrinfo(str(local_host), None)
193
+ family, *_, local_address = gai_res[0]
194
+
195
+ target_host = str(remote_host)
196
+ try:
197
+ addr_obj = ip_address(remote_host)
198
+ except ValueError:
199
+ addr_obj = None
200
+
201
+ if addr_obj is not None:
202
+ if isinstance(addr_obj, IPv6Address):
203
+ target_addrs = [(socket.AF_INET6, addr_obj.compressed)]
204
+ else:
205
+ target_addrs = [(socket.AF_INET, addr_obj.compressed)]
206
+ else:
207
+ # getaddrinfo() will raise an exception if name resolution fails
208
+ gai_res = await getaddrinfo(
209
+ target_host, remote_port, family=family, type=socket.SOCK_STREAM
210
+ )
211
+
212
+ # Organize the list so that the first address is an IPv6 address (if available)
213
+ # and the second one is an IPv4 addresses. The rest can be in whatever order.
214
+ v6_found = v4_found = False
215
+ target_addrs = []
216
+ for af, *rest, sa in gai_res:
217
+ if af == socket.AF_INET6 and not v6_found:
218
+ v6_found = True
219
+ target_addrs.insert(0, (af, sa[0]))
220
+ elif af == socket.AF_INET and not v4_found and v6_found:
221
+ v4_found = True
222
+ target_addrs.insert(1, (af, sa[0]))
223
+ else:
224
+ target_addrs.append((af, sa[0]))
225
+
226
+ oserrors: list[OSError] = []
227
+ async with create_task_group() as tg:
228
+ for i, (af, addr) in enumerate(target_addrs):
229
+ event = Event()
230
+ tg.start_soon(try_connect, addr, event)
231
+ with move_on_after(happy_eyeballs_delay):
232
+ await event.wait()
233
+
234
+ if connected_stream is None:
235
+ cause = (
236
+ oserrors[0]
237
+ if len(oserrors) == 1
238
+ else ExceptionGroup("multiple connection attempts failed", oserrors)
239
+ )
240
+ raise OSError("All connection attempts failed") from cause
241
+
242
+ if tls or tls_hostname or ssl_context:
243
+ try:
244
+ return await TLSStream.wrap(
245
+ connected_stream,
246
+ server_side=False,
247
+ hostname=tls_hostname or str(remote_host),
248
+ ssl_context=ssl_context,
249
+ standard_compatible=tls_standard_compatible,
250
+ )
251
+ except BaseException:
252
+ await aclose_forcefully(connected_stream)
253
+ raise
254
+
255
+ return connected_stream
256
+
257
+
258
+ async def connect_unix(path: str | bytes | PathLike[Any]) -> UNIXSocketStream:
259
+ """
260
+ Connect to the given UNIX socket.
261
+
262
+ Not available on Windows.
263
+
264
+ :param path: path to the socket
265
+ :return: a socket stream object
266
+
267
+ """
268
+ path = os.fspath(path)
269
+ return await get_async_backend().connect_unix(path)
270
+
271
+
272
+ async def create_tcp_listener(
273
+ *,
274
+ local_host: IPAddressType | None = None,
275
+ local_port: int = 0,
276
+ family: AnyIPAddressFamily = socket.AddressFamily.AF_UNSPEC,
277
+ backlog: int = 65536,
278
+ reuse_port: bool = False,
279
+ ) -> MultiListener[SocketStream]:
280
+ """
281
+ Create a TCP socket listener.
282
+
283
+ :param local_port: port number to listen on
284
+ :param local_host: IP address of the interface to listen on. If omitted, listen on
285
+ all IPv4 and IPv6 interfaces. To listen on all interfaces on a specific address
286
+ family, use ``0.0.0.0`` for IPv4 or ``::`` for IPv6.
287
+ :param family: address family (used if ``local_host`` was omitted)
288
+ :param backlog: maximum number of queued incoming connections (up to a maximum of
289
+ 2**16, or 65536)
290
+ :param reuse_port: ``True`` to allow multiple sockets to bind to the same
291
+ address/port (not supported on Windows)
292
+ :return: a list of listener objects
293
+
294
+ """
295
+ asynclib = get_async_backend()
296
+ backlog = min(backlog, 65536)
297
+ local_host = str(local_host) if local_host is not None else None
298
+ gai_res = await getaddrinfo(
299
+ local_host,
300
+ local_port,
301
+ family=family,
302
+ type=socket.SocketKind.SOCK_STREAM if sys.platform == "win32" else 0,
303
+ flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
304
+ )
305
+ listeners: list[SocketListener] = []
306
+ try:
307
+ # The set() is here to work around a glibc bug:
308
+ # https://sourceware.org/bugzilla/show_bug.cgi?id=14969
309
+ sockaddr: tuple[str, int] | tuple[str, int, int, int]
310
+ for fam, kind, *_, sockaddr in sorted(set(gai_res)):
311
+ # Workaround for an uvloop bug where we don't get the correct scope ID for
312
+ # IPv6 link-local addresses when passing type=socket.SOCK_STREAM to
313
+ # getaddrinfo(): https://github.com/MagicStack/uvloop/issues/539
314
+ if sys.platform != "win32" and kind is not SocketKind.SOCK_STREAM:
315
+ continue
316
+
317
+ raw_socket = socket.socket(fam)
318
+ raw_socket.setblocking(False)
319
+
320
+ # For Windows, enable exclusive address use. For others, enable address
321
+ # reuse.
322
+ if sys.platform == "win32":
323
+ raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
324
+ else:
325
+ raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
326
+
327
+ if reuse_port:
328
+ raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
329
+
330
+ # If only IPv6 was requested, disable dual stack operation
331
+ if fam == socket.AF_INET6:
332
+ raw_socket.setsockopt(IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
333
+
334
+ # Workaround for #554
335
+ if "%" in sockaddr[0]:
336
+ addr, scope_id = sockaddr[0].split("%", 1)
337
+ sockaddr = (addr, sockaddr[1], 0, int(scope_id))
338
+
339
+ raw_socket.bind(sockaddr)
340
+ raw_socket.listen(backlog)
341
+ listener = asynclib.create_tcp_listener(raw_socket)
342
+ listeners.append(listener)
343
+ except BaseException:
344
+ for listener in listeners:
345
+ await listener.aclose()
346
+
347
+ raise
348
+
349
+ return MultiListener(listeners)
350
+
351
+
352
+ async def create_unix_listener(
353
+ path: str | bytes | PathLike[Any],
354
+ *,
355
+ mode: int | None = None,
356
+ backlog: int = 65536,
357
+ ) -> SocketListener:
358
+ """
359
+ Create a UNIX socket listener.
360
+
361
+ Not available on Windows.
362
+
363
+ :param path: path of the socket
364
+ :param mode: permissions to set on the socket
365
+ :param backlog: maximum number of queued incoming connections (up to a maximum of
366
+ 2**16, or 65536)
367
+ :return: a listener object
368
+
369
+ .. versionchanged:: 3.0
370
+ If a socket already exists on the file system in the given path, it will be
371
+ removed first.
372
+
373
+ """
374
+ backlog = min(backlog, 65536)
375
+ raw_socket = await setup_unix_local_socket(path, mode, socket.SOCK_STREAM)
376
+ try:
377
+ raw_socket.listen(backlog)
378
+ return get_async_backend().create_unix_listener(raw_socket)
379
+ except BaseException:
380
+ raw_socket.close()
381
+ raise
382
+
383
+
384
+ async def create_udp_socket(
385
+ family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
386
+ *,
387
+ local_host: IPAddressType | None = None,
388
+ local_port: int = 0,
389
+ reuse_port: bool = False,
390
+ ) -> UDPSocket:
391
+ """
392
+ Create a UDP socket.
393
+
394
+ If ``port`` has been given, the socket will be bound to this port on the local
395
+ machine, making this socket suitable for providing UDP based services.
396
+
397
+ :param family: address family (``AF_INET`` or ``AF_INET6``) – automatically
398
+ determined from ``local_host`` if omitted
399
+ :param local_host: IP address or host name of the local interface to bind to
400
+ :param local_port: local port to bind to
401
+ :param reuse_port: ``True`` to allow multiple sockets to bind to the same
402
+ address/port (not supported on Windows)
403
+ :return: a UDP socket
404
+
405
+ """
406
+ if family is AddressFamily.AF_UNSPEC and not local_host:
407
+ raise ValueError('Either "family" or "local_host" must be given')
408
+
409
+ if local_host:
410
+ gai_res = await getaddrinfo(
411
+ str(local_host),
412
+ local_port,
413
+ family=family,
414
+ type=socket.SOCK_DGRAM,
415
+ flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
416
+ )
417
+ family = cast(AnyIPAddressFamily, gai_res[0][0])
418
+ local_address = gai_res[0][-1]
419
+ elif family is AddressFamily.AF_INET6:
420
+ local_address = ("::", 0)
421
+ else:
422
+ local_address = ("0.0.0.0", 0)
423
+
424
+ sock = await get_async_backend().create_udp_socket(
425
+ family, local_address, None, reuse_port
426
+ )
427
+ return cast(UDPSocket, sock)
428
+
429
+
430
+ async def create_connected_udp_socket(
431
+ remote_host: IPAddressType,
432
+ remote_port: int,
433
+ *,
434
+ family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
435
+ local_host: IPAddressType | None = None,
436
+ local_port: int = 0,
437
+ reuse_port: bool = False,
438
+ ) -> ConnectedUDPSocket:
439
+ """
440
+ Create a connected UDP socket.
441
+
442
+ Connected UDP sockets can only communicate with the specified remote host/port, an
443
+ any packets sent from other sources are dropped.
444
+
445
+ :param remote_host: remote host to set as the default target
446
+ :param remote_port: port on the remote host to set as the default target
447
+ :param family: address family (``AF_INET`` or ``AF_INET6``) – automatically
448
+ determined from ``local_host`` or ``remote_host`` if omitted
449
+ :param local_host: IP address or host name of the local interface to bind to
450
+ :param local_port: local port to bind to
451
+ :param reuse_port: ``True`` to allow multiple sockets to bind to the same
452
+ address/port (not supported on Windows)
453
+ :return: a connected UDP socket
454
+
455
+ """
456
+ local_address = None
457
+ if local_host:
458
+ gai_res = await getaddrinfo(
459
+ str(local_host),
460
+ local_port,
461
+ family=family,
462
+ type=socket.SOCK_DGRAM,
463
+ flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
464
+ )
465
+ family = cast(AnyIPAddressFamily, gai_res[0][0])
466
+ local_address = gai_res[0][-1]
467
+
468
+ gai_res = await getaddrinfo(
469
+ str(remote_host), remote_port, family=family, type=socket.SOCK_DGRAM
470
+ )
471
+ family = cast(AnyIPAddressFamily, gai_res[0][0])
472
+ remote_address = gai_res[0][-1]
473
+
474
+ sock = await get_async_backend().create_udp_socket(
475
+ family, local_address, remote_address, reuse_port
476
+ )
477
+ return cast(ConnectedUDPSocket, sock)
478
+
479
+
480
+ async def create_unix_datagram_socket(
481
+ *,
482
+ local_path: None | str | bytes | PathLike[Any] = None,
483
+ local_mode: int | None = None,
484
+ ) -> UNIXDatagramSocket:
485
+ """
486
+ Create a UNIX datagram socket.
487
+
488
+ Not available on Windows.
489
+
490
+ If ``local_path`` has been given, the socket will be bound to this path, making this
491
+ socket suitable for receiving datagrams from other processes. Other processes can
492
+ send datagrams to this socket only if ``local_path`` is set.
493
+
494
+ If a socket already exists on the file system in the ``local_path``, it will be
495
+ removed first.
496
+
497
+ :param local_path: the path on which to bind to
498
+ :param local_mode: permissions to set on the local socket
499
+ :return: a UNIX datagram socket
500
+
501
+ """
502
+ raw_socket = await setup_unix_local_socket(
503
+ local_path, local_mode, socket.SOCK_DGRAM
504
+ )
505
+ return await get_async_backend().create_unix_datagram_socket(raw_socket, None)
506
+
507
+
508
+ async def create_connected_unix_datagram_socket(
509
+ remote_path: str | bytes | PathLike[Any],
510
+ *,
511
+ local_path: None | str | bytes | PathLike[Any] = None,
512
+ local_mode: int | None = None,
513
+ ) -> ConnectedUNIXDatagramSocket:
514
+ """
515
+ Create a connected UNIX datagram socket.
516
+
517
+ Connected datagram sockets can only communicate with the specified remote path.
518
+
519
+ If ``local_path`` has been given, the socket will be bound to this path, making
520
+ this socket suitable for receiving datagrams from other processes. Other processes
521
+ can send datagrams to this socket only if ``local_path`` is set.
522
+
523
+ If a socket already exists on the file system in the ``local_path``, it will be
524
+ removed first.
525
+
526
+ :param remote_path: the path to set as the default target
527
+ :param local_path: the path on which to bind to
528
+ :param local_mode: permissions to set on the local socket
529
+ :return: a connected UNIX datagram socket
530
+
531
+ """
532
+ remote_path = os.fspath(remote_path)
533
+ raw_socket = await setup_unix_local_socket(
534
+ local_path, local_mode, socket.SOCK_DGRAM
535
+ )
536
+ return await get_async_backend().create_unix_datagram_socket(
537
+ raw_socket, remote_path
538
+ )
539
+
540
+
541
+ async def getaddrinfo(
542
+ host: bytes | str | None,
543
+ port: str | int | None,
544
+ *,
545
+ family: int | AddressFamily = 0,
546
+ type: int | SocketKind = 0,
547
+ proto: int = 0,
548
+ flags: int = 0,
549
+ ) -> list[tuple[AddressFamily, SocketKind, int, str, tuple[str, int]]]:
550
+ """
551
+ Look up a numeric IP address given a host name.
552
+
553
+ Internationalized domain names are translated according to the (non-transitional)
554
+ IDNA 2008 standard.
555
+
556
+ .. note:: 4-tuple IPv6 socket addresses are automatically converted to 2-tuples of
557
+ (host, port), unlike what :func:`socket.getaddrinfo` does.
558
+
559
+ :param host: host name
560
+ :param port: port number
561
+ :param family: socket family (`'AF_INET``, ...)
562
+ :param type: socket type (``SOCK_STREAM``, ...)
563
+ :param proto: protocol number
564
+ :param flags: flags to pass to upstream ``getaddrinfo()``
565
+ :return: list of tuples containing (family, type, proto, canonname, sockaddr)
566
+
567
+ .. seealso:: :func:`socket.getaddrinfo`
568
+
569
+ """
570
+ # Handle unicode hostnames
571
+ if isinstance(host, str):
572
+ try:
573
+ encoded_host: bytes | None = host.encode("ascii")
574
+ except UnicodeEncodeError:
575
+ import idna
576
+
577
+ encoded_host = idna.encode(host, uts46=True)
578
+ else:
579
+ encoded_host = host
580
+
581
+ gai_res = await get_async_backend().getaddrinfo(
582
+ encoded_host, port, family=family, type=type, proto=proto, flags=flags
583
+ )
584
+ return [
585
+ (family, type, proto, canonname, convert_ipv6_sockaddr(sockaddr))
586
+ for family, type, proto, canonname, sockaddr in gai_res
587
+ ]
588
+
589
+
590
+ def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Awaitable[tuple[str, str]]:
591
+ """
592
+ Look up the host name of an IP address.
593
+
594
+ :param sockaddr: socket address (e.g. (ipaddress, port) for IPv4)
595
+ :param flags: flags to pass to upstream ``getnameinfo()``
596
+ :return: a tuple of (host name, service name)
597
+
598
+ .. seealso:: :func:`socket.getnameinfo`
599
+
600
+ """
601
+ return get_async_backend().getnameinfo(sockaddr, flags)
602
+
603
+
604
+ @deprecated("This function is deprecated; use `wait_readable` instead")
605
+ def wait_socket_readable(sock: socket.socket) -> Awaitable[None]:
606
+ """
607
+ .. deprecated:: 4.7.0
608
+ Use :func:`wait_readable` instead.
609
+
610
+ Wait until the given socket has data to be read.
611
+
612
+ .. warning:: Only use this on raw sockets that have not been wrapped by any higher
613
+ level constructs like socket streams!
614
+
615
+ :param sock: a socket object
616
+ :raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
617
+ socket to become readable
618
+ :raises ~anyio.BusyResourceError: if another task is already waiting for the socket
619
+ to become readable
620
+
621
+ """
622
+ return get_async_backend().wait_readable(sock.fileno())
623
+
624
+
625
+ @deprecated("This function is deprecated; use `wait_writable` instead")
626
+ def wait_socket_writable(sock: socket.socket) -> Awaitable[None]:
627
+ """
628
+ .. deprecated:: 4.7.0
629
+ Use :func:`wait_writable` instead.
630
+
631
+ Wait until the given socket can be written to.
632
+
633
+ This does **NOT** work on Windows when using the asyncio backend with a proactor
634
+ event loop (default on py3.8+).
635
+
636
+ .. warning:: Only use this on raw sockets that have not been wrapped by any higher
637
+ level constructs like socket streams!
638
+
639
+ :param sock: a socket object
640
+ :raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
641
+ socket to become writable
642
+ :raises ~anyio.BusyResourceError: if another task is already waiting for the socket
643
+ to become writable
644
+
645
+ """
646
+ return get_async_backend().wait_writable(sock.fileno())
647
+
648
+
649
+ def wait_readable(obj: FileDescriptorLike) -> Awaitable[None]:
650
+ """
651
+ Wait until the given object has data to be read.
652
+
653
+ On Unix systems, ``obj`` must either be an integer file descriptor, or else an
654
+ object with a ``.fileno()`` method which returns an integer file descriptor. Any
655
+ kind of file descriptor can be passed, though the exact semantics will depend on
656
+ your kernel. For example, this probably won't do anything useful for on-disk files.
657
+
658
+ On Windows systems, ``obj`` must either be an integer ``SOCKET`` handle, or else an
659
+ object with a ``.fileno()`` method which returns an integer ``SOCKET`` handle. File
660
+ descriptors aren't supported, and neither are handles that refer to anything besides
661
+ a ``SOCKET``.
662
+
663
+ On backends where this functionality is not natively provided (asyncio
664
+ ``ProactorEventLoop`` on Windows), it is provided using a separate selector thread
665
+ which is set to shut down when the interpreter shuts down.
666
+
667
+ .. warning:: Don't use this on raw sockets that have been wrapped by any higher
668
+ level constructs like socket streams!
669
+
670
+ :param obj: an object with a ``.fileno()`` method or an integer handle
671
+ :raises ~anyio.ClosedResourceError: if the object was closed while waiting for the
672
+ object to become readable
673
+ :raises ~anyio.BusyResourceError: if another task is already waiting for the object
674
+ to become readable
675
+
676
+ """
677
+ return get_async_backend().wait_readable(obj)
678
+
679
+
680
+ def wait_writable(obj: FileDescriptorLike) -> Awaitable[None]:
681
+ """
682
+ Wait until the given object can be written to.
683
+
684
+ :param obj: an object with a ``.fileno()`` method or an integer handle
685
+ :raises ~anyio.ClosedResourceError: if the object was closed while waiting for the
686
+ object to become writable
687
+ :raises ~anyio.BusyResourceError: if another task is already waiting for the object
688
+ to become writable
689
+
690
+ .. seealso:: See the documentation of :func:`wait_readable` for the definition of
691
+ ``obj`` and notes on backend compatibility.
692
+
693
+ .. warning:: Don't use this on raw sockets that have been wrapped by any higher
694
+ level constructs like socket streams!
695
+
696
+ """
697
+ return get_async_backend().wait_writable(obj)
698
+
699
+
700
+ #
701
+ # Private API
702
+ #
703
+
704
+
705
+ def convert_ipv6_sockaddr(
706
+ sockaddr: tuple[str, int, int, int] | tuple[str, int],
707
+ ) -> tuple[str, int]:
708
+ """
709
+ Convert a 4-tuple IPv6 socket address to a 2-tuple (address, port) format.
710
+
711
+ If the scope ID is nonzero, it is added to the address, separated with ``%``.
712
+ Otherwise the flow id and scope id are simply cut off from the tuple.
713
+ Any other kinds of socket addresses are returned as-is.
714
+
715
+ :param sockaddr: the result of :meth:`~socket.socket.getsockname`
716
+ :return: the converted socket address
717
+
718
+ """
719
+ # This is more complicated than it should be because of MyPy
720
+ if isinstance(sockaddr, tuple) and len(sockaddr) == 4:
721
+ host, port, flowinfo, scope_id = sockaddr
722
+ if scope_id:
723
+ # PyPy (as of v7.3.11) leaves the interface name in the result, so
724
+ # we discard it and only get the scope ID from the end
725
+ # (https://foss.heptapod.net/pypy/pypy/-/issues/3938)
726
+ host = host.split("%")[0]
727
+
728
+ # Add scope_id to the address
729
+ return f"{host}%{scope_id}", port
730
+ else:
731
+ return host, port
732
+ else:
733
+ return sockaddr
734
+
735
+
736
+ async def setup_unix_local_socket(
737
+ path: None | str | bytes | PathLike[Any],
738
+ mode: int | None,
739
+ socktype: int,
740
+ ) -> socket.socket:
741
+ """
742
+ Create a UNIX local socket object, deleting the socket at the given path if it
743
+ exists.
744
+
745
+ Not available on Windows.
746
+
747
+ :param path: path of the socket
748
+ :param mode: permissions to set on the socket
749
+ :param socktype: socket.SOCK_STREAM or socket.SOCK_DGRAM
750
+
751
+ """
752
+ path_str: str | None
753
+ if path is not None:
754
+ path_str = os.fsdecode(path)
755
+
756
+ # Linux abstract namespace sockets aren't backed by a concrete file so skip stat call
757
+ if not path_str.startswith("\0"):
758
+ # Copied from pathlib...
759
+ try:
760
+ stat_result = os.stat(path)
761
+ except OSError as e:
762
+ if e.errno not in (
763
+ errno.ENOENT,
764
+ errno.ENOTDIR,
765
+ errno.EBADF,
766
+ errno.ELOOP,
767
+ ):
768
+ raise
769
+ else:
770
+ if stat.S_ISSOCK(stat_result.st_mode):
771
+ os.unlink(path)
772
+ else:
773
+ path_str = None
774
+
775
+ raw_socket = socket.socket(socket.AF_UNIX, socktype)
776
+ raw_socket.setblocking(False)
777
+
778
+ if path_str is not None:
779
+ try:
780
+ await to_thread.run_sync(raw_socket.bind, path_str, abandon_on_cancel=True)
781
+ if mode is not None:
782
+ await to_thread.run_sync(chmod, path_str, mode, abandon_on_cancel=True)
783
+ except BaseException:
784
+ raw_socket.close()
785
+ raise
786
+
787
+ return raw_socket
.venv/lib/python3.11/site-packages/anyio/_core/_streams.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import math
4
+ from typing import TypeVar
5
+ from warnings import warn
6
+
7
+ from ..streams.memory import (
8
+ MemoryObjectReceiveStream,
9
+ MemoryObjectSendStream,
10
+ MemoryObjectStreamState,
11
+ )
12
+
13
+ T_Item = TypeVar("T_Item")
14
+
15
+
16
+ class create_memory_object_stream(
17
+ tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]],
18
+ ):
19
+ """
20
+ Create a memory object stream.
21
+
22
+ The stream's item type can be annotated like
23
+ :func:`create_memory_object_stream[T_Item]`.
24
+
25
+ :param max_buffer_size: number of items held in the buffer until ``send()`` starts
26
+ blocking
27
+ :param item_type: old way of marking the streams with the right generic type for
28
+ static typing (does nothing on AnyIO 4)
29
+
30
+ .. deprecated:: 4.0
31
+ Use ``create_memory_object_stream[YourItemType](...)`` instead.
32
+ :return: a tuple of (send stream, receive stream)
33
+
34
+ """
35
+
36
+ def __new__( # type: ignore[misc]
37
+ cls, max_buffer_size: float = 0, item_type: object = None
38
+ ) -> tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]]:
39
+ if max_buffer_size != math.inf and not isinstance(max_buffer_size, int):
40
+ raise ValueError("max_buffer_size must be either an integer or math.inf")
41
+ if max_buffer_size < 0:
42
+ raise ValueError("max_buffer_size cannot be negative")
43
+ if item_type is not None:
44
+ warn(
45
+ "The item_type argument has been deprecated in AnyIO 4.0. "
46
+ "Use create_memory_object_stream[YourItemType](...) instead.",
47
+ DeprecationWarning,
48
+ stacklevel=2,
49
+ )
50
+
51
+ state = MemoryObjectStreamState[T_Item](max_buffer_size)
52
+ return (MemoryObjectSendStream(state), MemoryObjectReceiveStream(state))
.venv/lib/python3.11/site-packages/anyio/_core/_subprocesses.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ from collections.abc import AsyncIterable, Iterable, Mapping, Sequence
5
+ from io import BytesIO
6
+ from os import PathLike
7
+ from subprocess import DEVNULL, PIPE, CalledProcessError, CompletedProcess
8
+ from typing import IO, Any, Union, cast
9
+
10
+ from ..abc import Process
11
+ from ._eventloop import get_async_backend
12
+ from ._tasks import create_task_group
13
+
14
+ if sys.version_info >= (3, 10):
15
+ from typing import TypeAlias
16
+ else:
17
+ from typing_extensions import TypeAlias
18
+
19
+ StrOrBytesPath: TypeAlias = Union[str, bytes, "PathLike[str]", "PathLike[bytes]"]
20
+
21
+
22
+ async def run_process(
23
+ command: StrOrBytesPath | Sequence[StrOrBytesPath],
24
+ *,
25
+ input: bytes | None = None,
26
+ stdout: int | IO[Any] | None = PIPE,
27
+ stderr: int | IO[Any] | None = PIPE,
28
+ check: bool = True,
29
+ cwd: StrOrBytesPath | None = None,
30
+ env: Mapping[str, str] | None = None,
31
+ startupinfo: Any = None,
32
+ creationflags: int = 0,
33
+ start_new_session: bool = False,
34
+ pass_fds: Sequence[int] = (),
35
+ user: str | int | None = None,
36
+ group: str | int | None = None,
37
+ extra_groups: Iterable[str | int] | None = None,
38
+ umask: int = -1,
39
+ ) -> CompletedProcess[bytes]:
40
+ """
41
+ Run an external command in a subprocess and wait until it completes.
42
+
43
+ .. seealso:: :func:`subprocess.run`
44
+
45
+ :param command: either a string to pass to the shell, or an iterable of strings
46
+ containing the executable name or path and its arguments
47
+ :param input: bytes passed to the standard input of the subprocess
48
+ :param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
49
+ a file-like object, or `None`
50
+ :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
51
+ :data:`subprocess.STDOUT`, a file-like object, or `None`
52
+ :param check: if ``True``, raise :exc:`~subprocess.CalledProcessError` if the
53
+ process terminates with a return code other than 0
54
+ :param cwd: If not ``None``, change the working directory to this before running the
55
+ command
56
+ :param env: if not ``None``, this mapping replaces the inherited environment
57
+ variables from the parent process
58
+ :param startupinfo: an instance of :class:`subprocess.STARTUPINFO` that can be used
59
+ to specify process startup parameters (Windows only)
60
+ :param creationflags: flags that can be used to control the creation of the
61
+ subprocess (see :class:`subprocess.Popen` for the specifics)
62
+ :param start_new_session: if ``true`` the setsid() system call will be made in the
63
+ child process prior to the execution of the subprocess. (POSIX only)
64
+ :param pass_fds: sequence of file descriptors to keep open between the parent and
65
+ child processes. (POSIX only)
66
+ :param user: effective user to run the process as (Python >= 3.9, POSIX only)
67
+ :param group: effective group to run the process as (Python >= 3.9, POSIX only)
68
+ :param extra_groups: supplementary groups to set in the subprocess (Python >= 3.9,
69
+ POSIX only)
70
+ :param umask: if not negative, this umask is applied in the child process before
71
+ running the given command (Python >= 3.9, POSIX only)
72
+ :return: an object representing the completed process
73
+ :raises ~subprocess.CalledProcessError: if ``check`` is ``True`` and the process
74
+ exits with a nonzero return code
75
+
76
+ """
77
+
78
+ async def drain_stream(stream: AsyncIterable[bytes], index: int) -> None:
79
+ buffer = BytesIO()
80
+ async for chunk in stream:
81
+ buffer.write(chunk)
82
+
83
+ stream_contents[index] = buffer.getvalue()
84
+
85
+ async with await open_process(
86
+ command,
87
+ stdin=PIPE if input else DEVNULL,
88
+ stdout=stdout,
89
+ stderr=stderr,
90
+ cwd=cwd,
91
+ env=env,
92
+ startupinfo=startupinfo,
93
+ creationflags=creationflags,
94
+ start_new_session=start_new_session,
95
+ pass_fds=pass_fds,
96
+ user=user,
97
+ group=group,
98
+ extra_groups=extra_groups,
99
+ umask=umask,
100
+ ) as process:
101
+ stream_contents: list[bytes | None] = [None, None]
102
+ async with create_task_group() as tg:
103
+ if process.stdout:
104
+ tg.start_soon(drain_stream, process.stdout, 0)
105
+
106
+ if process.stderr:
107
+ tg.start_soon(drain_stream, process.stderr, 1)
108
+
109
+ if process.stdin and input:
110
+ await process.stdin.send(input)
111
+ await process.stdin.aclose()
112
+
113
+ await process.wait()
114
+
115
+ output, errors = stream_contents
116
+ if check and process.returncode != 0:
117
+ raise CalledProcessError(cast(int, process.returncode), command, output, errors)
118
+
119
+ return CompletedProcess(command, cast(int, process.returncode), output, errors)
120
+
121
+
122
+ async def open_process(
123
+ command: StrOrBytesPath | Sequence[StrOrBytesPath],
124
+ *,
125
+ stdin: int | IO[Any] | None = PIPE,
126
+ stdout: int | IO[Any] | None = PIPE,
127
+ stderr: int | IO[Any] | None = PIPE,
128
+ cwd: StrOrBytesPath | None = None,
129
+ env: Mapping[str, str] | None = None,
130
+ startupinfo: Any = None,
131
+ creationflags: int = 0,
132
+ start_new_session: bool = False,
133
+ pass_fds: Sequence[int] = (),
134
+ user: str | int | None = None,
135
+ group: str | int | None = None,
136
+ extra_groups: Iterable[str | int] | None = None,
137
+ umask: int = -1,
138
+ ) -> Process:
139
+ """
140
+ Start an external command in a subprocess.
141
+
142
+ .. seealso:: :class:`subprocess.Popen`
143
+
144
+ :param command: either a string to pass to the shell, or an iterable of strings
145
+ containing the executable name or path and its arguments
146
+ :param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, a
147
+ file-like object, or ``None``
148
+ :param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
149
+ a file-like object, or ``None``
150
+ :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
151
+ :data:`subprocess.STDOUT`, a file-like object, or ``None``
152
+ :param cwd: If not ``None``, the working directory is changed before executing
153
+ :param env: If env is not ``None``, it must be a mapping that defines the
154
+ environment variables for the new process
155
+ :param creationflags: flags that can be used to control the creation of the
156
+ subprocess (see :class:`subprocess.Popen` for the specifics)
157
+ :param startupinfo: an instance of :class:`subprocess.STARTUPINFO` that can be used
158
+ to specify process startup parameters (Windows only)
159
+ :param start_new_session: if ``true`` the setsid() system call will be made in the
160
+ child process prior to the execution of the subprocess. (POSIX only)
161
+ :param pass_fds: sequence of file descriptors to keep open between the parent and
162
+ child processes. (POSIX only)
163
+ :param user: effective user to run the process as (POSIX only)
164
+ :param group: effective group to run the process as (POSIX only)
165
+ :param extra_groups: supplementary groups to set in the subprocess (POSIX only)
166
+ :param umask: if not negative, this umask is applied in the child process before
167
+ running the given command (POSIX only)
168
+ :return: an asynchronous process object
169
+
170
+ """
171
+ kwargs: dict[str, Any] = {}
172
+ if user is not None:
173
+ kwargs["user"] = user
174
+
175
+ if group is not None:
176
+ kwargs["group"] = group
177
+
178
+ if extra_groups is not None:
179
+ kwargs["extra_groups"] = group
180
+
181
+ if umask >= 0:
182
+ kwargs["umask"] = umask
183
+
184
+ return await get_async_backend().open_process(
185
+ command,
186
+ stdin=stdin,
187
+ stdout=stdout,
188
+ stderr=stderr,
189
+ cwd=cwd,
190
+ env=env,
191
+ startupinfo=startupinfo,
192
+ creationflags=creationflags,
193
+ start_new_session=start_new_session,
194
+ pass_fds=pass_fds,
195
+ **kwargs,
196
+ )
.venv/lib/python3.11/site-packages/anyio/_core/_synchronization.py ADDED
@@ -0,0 +1,732 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import math
4
+ from collections import deque
5
+ from dataclasses import dataclass
6
+ from types import TracebackType
7
+
8
+ from sniffio import AsyncLibraryNotFoundError
9
+
10
+ from ..lowlevel import checkpoint
11
+ from ._eventloop import get_async_backend
12
+ from ._exceptions import BusyResourceError
13
+ from ._tasks import CancelScope
14
+ from ._testing import TaskInfo, get_current_task
15
+
16
+
17
+ @dataclass(frozen=True)
18
+ class EventStatistics:
19
+ """
20
+ :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait`
21
+ """
22
+
23
+ tasks_waiting: int
24
+
25
+
26
+ @dataclass(frozen=True)
27
+ class CapacityLimiterStatistics:
28
+ """
29
+ :ivar int borrowed_tokens: number of tokens currently borrowed by tasks
30
+ :ivar float total_tokens: total number of available tokens
31
+ :ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from
32
+ this limiter
33
+ :ivar int tasks_waiting: number of tasks waiting on
34
+ :meth:`~.CapacityLimiter.acquire` or
35
+ :meth:`~.CapacityLimiter.acquire_on_behalf_of`
36
+ """
37
+
38
+ borrowed_tokens: int
39
+ total_tokens: float
40
+ borrowers: tuple[object, ...]
41
+ tasks_waiting: int
42
+
43
+
44
+ @dataclass(frozen=True)
45
+ class LockStatistics:
46
+ """
47
+ :ivar bool locked: flag indicating if this lock is locked or not
48
+ :ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the
49
+ lock is not held by any task)
50
+ :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire`
51
+ """
52
+
53
+ locked: bool
54
+ owner: TaskInfo | None
55
+ tasks_waiting: int
56
+
57
+
58
+ @dataclass(frozen=True)
59
+ class ConditionStatistics:
60
+ """
61
+ :ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait`
62
+ :ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying
63
+ :class:`~.Lock`
64
+ """
65
+
66
+ tasks_waiting: int
67
+ lock_statistics: LockStatistics
68
+
69
+
70
+ @dataclass(frozen=True)
71
+ class SemaphoreStatistics:
72
+ """
73
+ :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire`
74
+
75
+ """
76
+
77
+ tasks_waiting: int
78
+
79
+
80
+ class Event:
81
+ def __new__(cls) -> Event:
82
+ try:
83
+ return get_async_backend().create_event()
84
+ except AsyncLibraryNotFoundError:
85
+ return EventAdapter()
86
+
87
+ def set(self) -> None:
88
+ """Set the flag, notifying all listeners."""
89
+ raise NotImplementedError
90
+
91
+ def is_set(self) -> bool:
92
+ """Return ``True`` if the flag is set, ``False`` if not."""
93
+ raise NotImplementedError
94
+
95
+ async def wait(self) -> None:
96
+ """
97
+ Wait until the flag has been set.
98
+
99
+ If the flag has already been set when this method is called, it returns
100
+ immediately.
101
+
102
+ """
103
+ raise NotImplementedError
104
+
105
+ def statistics(self) -> EventStatistics:
106
+ """Return statistics about the current state of this event."""
107
+ raise NotImplementedError
108
+
109
+
110
+ class EventAdapter(Event):
111
+ _internal_event: Event | None = None
112
+ _is_set: bool = False
113
+
114
+ def __new__(cls) -> EventAdapter:
115
+ return object.__new__(cls)
116
+
117
+ @property
118
+ def _event(self) -> Event:
119
+ if self._internal_event is None:
120
+ self._internal_event = get_async_backend().create_event()
121
+ if self._is_set:
122
+ self._internal_event.set()
123
+
124
+ return self._internal_event
125
+
126
+ def set(self) -> None:
127
+ if self._internal_event is None:
128
+ self._is_set = True
129
+ else:
130
+ self._event.set()
131
+
132
+ def is_set(self) -> bool:
133
+ if self._internal_event is None:
134
+ return self._is_set
135
+
136
+ return self._internal_event.is_set()
137
+
138
+ async def wait(self) -> None:
139
+ await self._event.wait()
140
+
141
+ def statistics(self) -> EventStatistics:
142
+ if self._internal_event is None:
143
+ return EventStatistics(tasks_waiting=0)
144
+
145
+ return self._internal_event.statistics()
146
+
147
+
148
+ class Lock:
149
+ def __new__(cls, *, fast_acquire: bool = False) -> Lock:
150
+ try:
151
+ return get_async_backend().create_lock(fast_acquire=fast_acquire)
152
+ except AsyncLibraryNotFoundError:
153
+ return LockAdapter(fast_acquire=fast_acquire)
154
+
155
+ async def __aenter__(self) -> None:
156
+ await self.acquire()
157
+
158
+ async def __aexit__(
159
+ self,
160
+ exc_type: type[BaseException] | None,
161
+ exc_val: BaseException | None,
162
+ exc_tb: TracebackType | None,
163
+ ) -> None:
164
+ self.release()
165
+
166
+ async def acquire(self) -> None:
167
+ """Acquire the lock."""
168
+ raise NotImplementedError
169
+
170
+ def acquire_nowait(self) -> None:
171
+ """
172
+ Acquire the lock, without blocking.
173
+
174
+ :raises ~anyio.WouldBlock: if the operation would block
175
+
176
+ """
177
+ raise NotImplementedError
178
+
179
+ def release(self) -> None:
180
+ """Release the lock."""
181
+ raise NotImplementedError
182
+
183
+ def locked(self) -> bool:
184
+ """Return True if the lock is currently held."""
185
+ raise NotImplementedError
186
+
187
+ def statistics(self) -> LockStatistics:
188
+ """
189
+ Return statistics about the current state of this lock.
190
+
191
+ .. versionadded:: 3.0
192
+ """
193
+ raise NotImplementedError
194
+
195
+
196
+ class LockAdapter(Lock):
197
+ _internal_lock: Lock | None = None
198
+
199
+ def __new__(cls, *, fast_acquire: bool = False) -> LockAdapter:
200
+ return object.__new__(cls)
201
+
202
+ def __init__(self, *, fast_acquire: bool = False):
203
+ self._fast_acquire = fast_acquire
204
+
205
+ @property
206
+ def _lock(self) -> Lock:
207
+ if self._internal_lock is None:
208
+ self._internal_lock = get_async_backend().create_lock(
209
+ fast_acquire=self._fast_acquire
210
+ )
211
+
212
+ return self._internal_lock
213
+
214
+ async def __aenter__(self) -> None:
215
+ await self._lock.acquire()
216
+
217
+ async def __aexit__(
218
+ self,
219
+ exc_type: type[BaseException] | None,
220
+ exc_val: BaseException | None,
221
+ exc_tb: TracebackType | None,
222
+ ) -> None:
223
+ if self._internal_lock is not None:
224
+ self._internal_lock.release()
225
+
226
+ async def acquire(self) -> None:
227
+ """Acquire the lock."""
228
+ await self._lock.acquire()
229
+
230
+ def acquire_nowait(self) -> None:
231
+ """
232
+ Acquire the lock, without blocking.
233
+
234
+ :raises ~anyio.WouldBlock: if the operation would block
235
+
236
+ """
237
+ self._lock.acquire_nowait()
238
+
239
+ def release(self) -> None:
240
+ """Release the lock."""
241
+ self._lock.release()
242
+
243
+ def locked(self) -> bool:
244
+ """Return True if the lock is currently held."""
245
+ return self._lock.locked()
246
+
247
+ def statistics(self) -> LockStatistics:
248
+ """
249
+ Return statistics about the current state of this lock.
250
+
251
+ .. versionadded:: 3.0
252
+
253
+ """
254
+ if self._internal_lock is None:
255
+ return LockStatistics(False, None, 0)
256
+
257
+ return self._internal_lock.statistics()
258
+
259
+
260
+ class Condition:
261
+ _owner_task: TaskInfo | None = None
262
+
263
+ def __init__(self, lock: Lock | None = None):
264
+ self._lock = lock or Lock()
265
+ self._waiters: deque[Event] = deque()
266
+
267
+ async def __aenter__(self) -> None:
268
+ await self.acquire()
269
+
270
+ async def __aexit__(
271
+ self,
272
+ exc_type: type[BaseException] | None,
273
+ exc_val: BaseException | None,
274
+ exc_tb: TracebackType | None,
275
+ ) -> None:
276
+ self.release()
277
+
278
+ def _check_acquired(self) -> None:
279
+ if self._owner_task != get_current_task():
280
+ raise RuntimeError("The current task is not holding the underlying lock")
281
+
282
+ async def acquire(self) -> None:
283
+ """Acquire the underlying lock."""
284
+ await self._lock.acquire()
285
+ self._owner_task = get_current_task()
286
+
287
+ def acquire_nowait(self) -> None:
288
+ """
289
+ Acquire the underlying lock, without blocking.
290
+
291
+ :raises ~anyio.WouldBlock: if the operation would block
292
+
293
+ """
294
+ self._lock.acquire_nowait()
295
+ self._owner_task = get_current_task()
296
+
297
+ def release(self) -> None:
298
+ """Release the underlying lock."""
299
+ self._lock.release()
300
+
301
+ def locked(self) -> bool:
302
+ """Return True if the lock is set."""
303
+ return self._lock.locked()
304
+
305
+ def notify(self, n: int = 1) -> None:
306
+ """Notify exactly n listeners."""
307
+ self._check_acquired()
308
+ for _ in range(n):
309
+ try:
310
+ event = self._waiters.popleft()
311
+ except IndexError:
312
+ break
313
+
314
+ event.set()
315
+
316
+ def notify_all(self) -> None:
317
+ """Notify all the listeners."""
318
+ self._check_acquired()
319
+ for event in self._waiters:
320
+ event.set()
321
+
322
+ self._waiters.clear()
323
+
324
+ async def wait(self) -> None:
325
+ """Wait for a notification."""
326
+ await checkpoint()
327
+ event = Event()
328
+ self._waiters.append(event)
329
+ self.release()
330
+ try:
331
+ await event.wait()
332
+ except BaseException:
333
+ if not event.is_set():
334
+ self._waiters.remove(event)
335
+
336
+ raise
337
+ finally:
338
+ with CancelScope(shield=True):
339
+ await self.acquire()
340
+
341
+ def statistics(self) -> ConditionStatistics:
342
+ """
343
+ Return statistics about the current state of this condition.
344
+
345
+ .. versionadded:: 3.0
346
+ """
347
+ return ConditionStatistics(len(self._waiters), self._lock.statistics())
348
+
349
+
350
+ class Semaphore:
351
+ def __new__(
352
+ cls,
353
+ initial_value: int,
354
+ *,
355
+ max_value: int | None = None,
356
+ fast_acquire: bool = False,
357
+ ) -> Semaphore:
358
+ try:
359
+ return get_async_backend().create_semaphore(
360
+ initial_value, max_value=max_value, fast_acquire=fast_acquire
361
+ )
362
+ except AsyncLibraryNotFoundError:
363
+ return SemaphoreAdapter(initial_value, max_value=max_value)
364
+
365
+ def __init__(
366
+ self,
367
+ initial_value: int,
368
+ *,
369
+ max_value: int | None = None,
370
+ fast_acquire: bool = False,
371
+ ):
372
+ if not isinstance(initial_value, int):
373
+ raise TypeError("initial_value must be an integer")
374
+ if initial_value < 0:
375
+ raise ValueError("initial_value must be >= 0")
376
+ if max_value is not None:
377
+ if not isinstance(max_value, int):
378
+ raise TypeError("max_value must be an integer or None")
379
+ if max_value < initial_value:
380
+ raise ValueError(
381
+ "max_value must be equal to or higher than initial_value"
382
+ )
383
+
384
+ self._fast_acquire = fast_acquire
385
+
386
+ async def __aenter__(self) -> Semaphore:
387
+ await self.acquire()
388
+ return self
389
+
390
+ async def __aexit__(
391
+ self,
392
+ exc_type: type[BaseException] | None,
393
+ exc_val: BaseException | None,
394
+ exc_tb: TracebackType | None,
395
+ ) -> None:
396
+ self.release()
397
+
398
+ async def acquire(self) -> None:
399
+ """Decrement the semaphore value, blocking if necessary."""
400
+ raise NotImplementedError
401
+
402
+ def acquire_nowait(self) -> None:
403
+ """
404
+ Acquire the underlying lock, without blocking.
405
+
406
+ :raises ~anyio.WouldBlock: if the operation would block
407
+
408
+ """
409
+ raise NotImplementedError
410
+
411
+ def release(self) -> None:
412
+ """Increment the semaphore value."""
413
+ raise NotImplementedError
414
+
415
+ @property
416
+ def value(self) -> int:
417
+ """The current value of the semaphore."""
418
+ raise NotImplementedError
419
+
420
+ @property
421
+ def max_value(self) -> int | None:
422
+ """The maximum value of the semaphore."""
423
+ raise NotImplementedError
424
+
425
+ def statistics(self) -> SemaphoreStatistics:
426
+ """
427
+ Return statistics about the current state of this semaphore.
428
+
429
+ .. versionadded:: 3.0
430
+ """
431
+ raise NotImplementedError
432
+
433
+
434
+ class SemaphoreAdapter(Semaphore):
435
+ _internal_semaphore: Semaphore | None = None
436
+
437
+ def __new__(
438
+ cls,
439
+ initial_value: int,
440
+ *,
441
+ max_value: int | None = None,
442
+ fast_acquire: bool = False,
443
+ ) -> SemaphoreAdapter:
444
+ return object.__new__(cls)
445
+
446
+ def __init__(
447
+ self,
448
+ initial_value: int,
449
+ *,
450
+ max_value: int | None = None,
451
+ fast_acquire: bool = False,
452
+ ) -> None:
453
+ super().__init__(initial_value, max_value=max_value, fast_acquire=fast_acquire)
454
+ self._initial_value = initial_value
455
+ self._max_value = max_value
456
+
457
+ @property
458
+ def _semaphore(self) -> Semaphore:
459
+ if self._internal_semaphore is None:
460
+ self._internal_semaphore = get_async_backend().create_semaphore(
461
+ self._initial_value, max_value=self._max_value
462
+ )
463
+
464
+ return self._internal_semaphore
465
+
466
+ async def acquire(self) -> None:
467
+ await self._semaphore.acquire()
468
+
469
+ def acquire_nowait(self) -> None:
470
+ self._semaphore.acquire_nowait()
471
+
472
+ def release(self) -> None:
473
+ self._semaphore.release()
474
+
475
+ @property
476
+ def value(self) -> int:
477
+ if self._internal_semaphore is None:
478
+ return self._initial_value
479
+
480
+ return self._semaphore.value
481
+
482
+ @property
483
+ def max_value(self) -> int | None:
484
+ return self._max_value
485
+
486
+ def statistics(self) -> SemaphoreStatistics:
487
+ if self._internal_semaphore is None:
488
+ return SemaphoreStatistics(tasks_waiting=0)
489
+
490
+ return self._semaphore.statistics()
491
+
492
+
493
+ class CapacityLimiter:
494
+ def __new__(cls, total_tokens: float) -> CapacityLimiter:
495
+ try:
496
+ return get_async_backend().create_capacity_limiter(total_tokens)
497
+ except AsyncLibraryNotFoundError:
498
+ return CapacityLimiterAdapter(total_tokens)
499
+
500
+ async def __aenter__(self) -> None:
501
+ raise NotImplementedError
502
+
503
+ async def __aexit__(
504
+ self,
505
+ exc_type: type[BaseException] | None,
506
+ exc_val: BaseException | None,
507
+ exc_tb: TracebackType | None,
508
+ ) -> bool | None:
509
+ raise NotImplementedError
510
+
511
+ @property
512
+ def total_tokens(self) -> float:
513
+ """
514
+ The total number of tokens available for borrowing.
515
+
516
+ This is a read-write property. If the total number of tokens is increased, the
517
+ proportionate number of tasks waiting on this limiter will be granted their
518
+ tokens.
519
+
520
+ .. versionchanged:: 3.0
521
+ The property is now writable.
522
+
523
+ """
524
+ raise NotImplementedError
525
+
526
+ @total_tokens.setter
527
+ def total_tokens(self, value: float) -> None:
528
+ raise NotImplementedError
529
+
530
+ @property
531
+ def borrowed_tokens(self) -> int:
532
+ """The number of tokens that have currently been borrowed."""
533
+ raise NotImplementedError
534
+
535
+ @property
536
+ def available_tokens(self) -> float:
537
+ """The number of tokens currently available to be borrowed"""
538
+ raise NotImplementedError
539
+
540
+ def acquire_nowait(self) -> None:
541
+ """
542
+ Acquire a token for the current task without waiting for one to become
543
+ available.
544
+
545
+ :raises ~anyio.WouldBlock: if there are no tokens available for borrowing
546
+
547
+ """
548
+ raise NotImplementedError
549
+
550
+ def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
551
+ """
552
+ Acquire a token without waiting for one to become available.
553
+
554
+ :param borrower: the entity borrowing a token
555
+ :raises ~anyio.WouldBlock: if there are no tokens available for borrowing
556
+
557
+ """
558
+ raise NotImplementedError
559
+
560
+ async def acquire(self) -> None:
561
+ """
562
+ Acquire a token for the current task, waiting if necessary for one to become
563
+ available.
564
+
565
+ """
566
+ raise NotImplementedError
567
+
568
+ async def acquire_on_behalf_of(self, borrower: object) -> None:
569
+ """
570
+ Acquire a token, waiting if necessary for one to become available.
571
+
572
+ :param borrower: the entity borrowing a token
573
+
574
+ """
575
+ raise NotImplementedError
576
+
577
+ def release(self) -> None:
578
+ """
579
+ Release the token held by the current task.
580
+
581
+ :raises RuntimeError: if the current task has not borrowed a token from this
582
+ limiter.
583
+
584
+ """
585
+ raise NotImplementedError
586
+
587
+ def release_on_behalf_of(self, borrower: object) -> None:
588
+ """
589
+ Release the token held by the given borrower.
590
+
591
+ :raises RuntimeError: if the borrower has not borrowed a token from this
592
+ limiter.
593
+
594
+ """
595
+ raise NotImplementedError
596
+
597
+ def statistics(self) -> CapacityLimiterStatistics:
598
+ """
599
+ Return statistics about the current state of this limiter.
600
+
601
+ .. versionadded:: 3.0
602
+
603
+ """
604
+ raise NotImplementedError
605
+
606
+
607
+ class CapacityLimiterAdapter(CapacityLimiter):
608
+ _internal_limiter: CapacityLimiter | None = None
609
+
610
+ def __new__(cls, total_tokens: float) -> CapacityLimiterAdapter:
611
+ return object.__new__(cls)
612
+
613
+ def __init__(self, total_tokens: float) -> None:
614
+ self.total_tokens = total_tokens
615
+
616
+ @property
617
+ def _limiter(self) -> CapacityLimiter:
618
+ if self._internal_limiter is None:
619
+ self._internal_limiter = get_async_backend().create_capacity_limiter(
620
+ self._total_tokens
621
+ )
622
+
623
+ return self._internal_limiter
624
+
625
+ async def __aenter__(self) -> None:
626
+ await self._limiter.__aenter__()
627
+
628
+ async def __aexit__(
629
+ self,
630
+ exc_type: type[BaseException] | None,
631
+ exc_val: BaseException | None,
632
+ exc_tb: TracebackType | None,
633
+ ) -> bool | None:
634
+ return await self._limiter.__aexit__(exc_type, exc_val, exc_tb)
635
+
636
+ @property
637
+ def total_tokens(self) -> float:
638
+ if self._internal_limiter is None:
639
+ return self._total_tokens
640
+
641
+ return self._internal_limiter.total_tokens
642
+
643
+ @total_tokens.setter
644
+ def total_tokens(self, value: float) -> None:
645
+ if not isinstance(value, int) and value is not math.inf:
646
+ raise TypeError("total_tokens must be an int or math.inf")
647
+ elif value < 1:
648
+ raise ValueError("total_tokens must be >= 1")
649
+
650
+ if self._internal_limiter is None:
651
+ self._total_tokens = value
652
+ return
653
+
654
+ self._limiter.total_tokens = value
655
+
656
+ @property
657
+ def borrowed_tokens(self) -> int:
658
+ if self._internal_limiter is None:
659
+ return 0
660
+
661
+ return self._internal_limiter.borrowed_tokens
662
+
663
+ @property
664
+ def available_tokens(self) -> float:
665
+ if self._internal_limiter is None:
666
+ return self._total_tokens
667
+
668
+ return self._internal_limiter.available_tokens
669
+
670
+ def acquire_nowait(self) -> None:
671
+ self._limiter.acquire_nowait()
672
+
673
+ def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
674
+ self._limiter.acquire_on_behalf_of_nowait(borrower)
675
+
676
+ async def acquire(self) -> None:
677
+ await self._limiter.acquire()
678
+
679
+ async def acquire_on_behalf_of(self, borrower: object) -> None:
680
+ await self._limiter.acquire_on_behalf_of(borrower)
681
+
682
+ def release(self) -> None:
683
+ self._limiter.release()
684
+
685
+ def release_on_behalf_of(self, borrower: object) -> None:
686
+ self._limiter.release_on_behalf_of(borrower)
687
+
688
+ def statistics(self) -> CapacityLimiterStatistics:
689
+ if self._internal_limiter is None:
690
+ return CapacityLimiterStatistics(
691
+ borrowed_tokens=0,
692
+ total_tokens=self.total_tokens,
693
+ borrowers=(),
694
+ tasks_waiting=0,
695
+ )
696
+
697
+ return self._internal_limiter.statistics()
698
+
699
+
700
+ class ResourceGuard:
701
+ """
702
+ A context manager for ensuring that a resource is only used by a single task at a
703
+ time.
704
+
705
+ Entering this context manager while the previous has not exited it yet will trigger
706
+ :exc:`BusyResourceError`.
707
+
708
+ :param action: the action to guard against (visible in the :exc:`BusyResourceError`
709
+ when triggered, e.g. "Another task is already {action} this resource")
710
+
711
+ .. versionadded:: 4.1
712
+ """
713
+
714
+ __slots__ = "action", "_guarded"
715
+
716
+ def __init__(self, action: str = "using"):
717
+ self.action: str = action
718
+ self._guarded = False
719
+
720
+ def __enter__(self) -> None:
721
+ if self._guarded:
722
+ raise BusyResourceError(self.action)
723
+
724
+ self._guarded = True
725
+
726
+ def __exit__(
727
+ self,
728
+ exc_type: type[BaseException] | None,
729
+ exc_val: BaseException | None,
730
+ exc_tb: TracebackType | None,
731
+ ) -> None:
732
+ self._guarded = False
.venv/lib/python3.11/site-packages/anyio/_core/_tasks.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import math
4
+ from collections.abc import Generator
5
+ from contextlib import contextmanager
6
+ from types import TracebackType
7
+
8
+ from ..abc._tasks import TaskGroup, TaskStatus
9
+ from ._eventloop import get_async_backend
10
+
11
+
12
+ class _IgnoredTaskStatus(TaskStatus[object]):
13
+ def started(self, value: object = None) -> None:
14
+ pass
15
+
16
+
17
+ TASK_STATUS_IGNORED = _IgnoredTaskStatus()
18
+
19
+
20
+ class CancelScope:
21
+ """
22
+ Wraps a unit of work that can be made separately cancellable.
23
+
24
+ :param deadline: The time (clock value) when this scope is cancelled automatically
25
+ :param shield: ``True`` to shield the cancel scope from external cancellation
26
+ """
27
+
28
+ def __new__(
29
+ cls, *, deadline: float = math.inf, shield: bool = False
30
+ ) -> CancelScope:
31
+ return get_async_backend().create_cancel_scope(shield=shield, deadline=deadline)
32
+
33
+ def cancel(self) -> None:
34
+ """Cancel this scope immediately."""
35
+ raise NotImplementedError
36
+
37
+ @property
38
+ def deadline(self) -> float:
39
+ """
40
+ The time (clock value) when this scope is cancelled automatically.
41
+
42
+ Will be ``float('inf')`` if no timeout has been set.
43
+
44
+ """
45
+ raise NotImplementedError
46
+
47
+ @deadline.setter
48
+ def deadline(self, value: float) -> None:
49
+ raise NotImplementedError
50
+
51
+ @property
52
+ def cancel_called(self) -> bool:
53
+ """``True`` if :meth:`cancel` has been called."""
54
+ raise NotImplementedError
55
+
56
+ @property
57
+ def cancelled_caught(self) -> bool:
58
+ """
59
+ ``True`` if this scope suppressed a cancellation exception it itself raised.
60
+
61
+ This is typically used to check if any work was interrupted, or to see if the
62
+ scope was cancelled due to its deadline being reached. The value will, however,
63
+ only be ``True`` if the cancellation was triggered by the scope itself (and not
64
+ an outer scope).
65
+
66
+ """
67
+ raise NotImplementedError
68
+
69
+ @property
70
+ def shield(self) -> bool:
71
+ """
72
+ ``True`` if this scope is shielded from external cancellation.
73
+
74
+ While a scope is shielded, it will not receive cancellations from outside.
75
+
76
+ """
77
+ raise NotImplementedError
78
+
79
+ @shield.setter
80
+ def shield(self, value: bool) -> None:
81
+ raise NotImplementedError
82
+
83
+ def __enter__(self) -> CancelScope:
84
+ raise NotImplementedError
85
+
86
+ def __exit__(
87
+ self,
88
+ exc_type: type[BaseException] | None,
89
+ exc_val: BaseException | None,
90
+ exc_tb: TracebackType | None,
91
+ ) -> bool:
92
+ raise NotImplementedError
93
+
94
+
95
+ @contextmanager
96
+ def fail_after(
97
+ delay: float | None, shield: bool = False
98
+ ) -> Generator[CancelScope, None, None]:
99
+ """
100
+ Create a context manager which raises a :class:`TimeoutError` if does not finish in
101
+ time.
102
+
103
+ :param delay: maximum allowed time (in seconds) before raising the exception, or
104
+ ``None`` to disable the timeout
105
+ :param shield: ``True`` to shield the cancel scope from external cancellation
106
+ :return: a context manager that yields a cancel scope
107
+ :rtype: :class:`~typing.ContextManager`\\[:class:`~anyio.CancelScope`\\]
108
+
109
+ """
110
+ current_time = get_async_backend().current_time
111
+ deadline = (current_time() + delay) if delay is not None else math.inf
112
+ with get_async_backend().create_cancel_scope(
113
+ deadline=deadline, shield=shield
114
+ ) as cancel_scope:
115
+ yield cancel_scope
116
+
117
+ if cancel_scope.cancelled_caught and current_time() >= cancel_scope.deadline:
118
+ raise TimeoutError
119
+
120
+
121
+ def move_on_after(delay: float | None, shield: bool = False) -> CancelScope:
122
+ """
123
+ Create a cancel scope with a deadline that expires after the given delay.
124
+
125
+ :param delay: maximum allowed time (in seconds) before exiting the context block, or
126
+ ``None`` to disable the timeout
127
+ :param shield: ``True`` to shield the cancel scope from external cancellation
128
+ :return: a cancel scope
129
+
130
+ """
131
+ deadline = (
132
+ (get_async_backend().current_time() + delay) if delay is not None else math.inf
133
+ )
134
+ return get_async_backend().create_cancel_scope(deadline=deadline, shield=shield)
135
+
136
+
137
+ def current_effective_deadline() -> float:
138
+ """
139
+ Return the nearest deadline among all the cancel scopes effective for the current
140
+ task.
141
+
142
+ :return: a clock value from the event loop's internal clock (or ``float('inf')`` if
143
+ there is no deadline in effect, or ``float('-inf')`` if the current scope has
144
+ been cancelled)
145
+ :rtype: float
146
+
147
+ """
148
+ return get_async_backend().current_effective_deadline()
149
+
150
+
151
+ def create_task_group() -> TaskGroup:
152
+ """
153
+ Create a task group.
154
+
155
+ :return: a task group
156
+
157
+ """
158
+ return get_async_backend().create_task_group()
.venv/lib/python3.11/site-packages/anyio/_core/_testing.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Awaitable, Generator
4
+ from typing import Any, cast
5
+
6
+ from ._eventloop import get_async_backend
7
+
8
+
9
+ class TaskInfo:
10
+ """
11
+ Represents an asynchronous task.
12
+
13
+ :ivar int id: the unique identifier of the task
14
+ :ivar parent_id: the identifier of the parent task, if any
15
+ :vartype parent_id: Optional[int]
16
+ :ivar str name: the description of the task (if any)
17
+ :ivar ~collections.abc.Coroutine coro: the coroutine object of the task
18
+ """
19
+
20
+ __slots__ = "_name", "id", "parent_id", "name", "coro"
21
+
22
+ def __init__(
23
+ self,
24
+ id: int,
25
+ parent_id: int | None,
26
+ name: str | None,
27
+ coro: Generator[Any, Any, Any] | Awaitable[Any],
28
+ ):
29
+ func = get_current_task
30
+ self._name = f"{func.__module__}.{func.__qualname__}"
31
+ self.id: int = id
32
+ self.parent_id: int | None = parent_id
33
+ self.name: str | None = name
34
+ self.coro: Generator[Any, Any, Any] | Awaitable[Any] = coro
35
+
36
+ def __eq__(self, other: object) -> bool:
37
+ if isinstance(other, TaskInfo):
38
+ return self.id == other.id
39
+
40
+ return NotImplemented
41
+
42
+ def __hash__(self) -> int:
43
+ return hash(self.id)
44
+
45
+ def __repr__(self) -> str:
46
+ return f"{self.__class__.__name__}(id={self.id!r}, name={self.name!r})"
47
+
48
+ def has_pending_cancellation(self) -> bool:
49
+ """
50
+ Return ``True`` if the task has a cancellation pending, ``False`` otherwise.
51
+
52
+ """
53
+ return False
54
+
55
+
56
+ def get_current_task() -> TaskInfo:
57
+ """
58
+ Return the current task.
59
+
60
+ :return: a representation of the current task
61
+
62
+ """
63
+ return get_async_backend().get_current_task()
64
+
65
+
66
+ def get_running_tasks() -> list[TaskInfo]:
67
+ """
68
+ Return a list of running tasks in the current event loop.
69
+
70
+ :return: a list of task info objects
71
+
72
+ """
73
+ return cast("list[TaskInfo]", get_async_backend().get_running_tasks())
74
+
75
+
76
+ async def wait_all_tasks_blocked() -> None:
77
+ """Wait until all other tasks are waiting for something."""
78
+ await get_async_backend().wait_all_tasks_blocked()
.venv/lib/python3.11/site-packages/anyio/_core/_typedattr.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Callable, Mapping
4
+ from typing import Any, TypeVar, final, overload
5
+
6
+ from ._exceptions import TypedAttributeLookupError
7
+
8
+ T_Attr = TypeVar("T_Attr")
9
+ T_Default = TypeVar("T_Default")
10
+ undefined = object()
11
+
12
+
13
+ def typed_attribute() -> Any:
14
+ """Return a unique object, used to mark typed attributes."""
15
+ return object()
16
+
17
+
18
+ class TypedAttributeSet:
19
+ """
20
+ Superclass for typed attribute collections.
21
+
22
+ Checks that every public attribute of every subclass has a type annotation.
23
+ """
24
+
25
+ def __init_subclass__(cls) -> None:
26
+ annotations: dict[str, Any] = getattr(cls, "__annotations__", {})
27
+ for attrname in dir(cls):
28
+ if not attrname.startswith("_") and attrname not in annotations:
29
+ raise TypeError(
30
+ f"Attribute {attrname!r} is missing its type annotation"
31
+ )
32
+
33
+ super().__init_subclass__()
34
+
35
+
36
+ class TypedAttributeProvider:
37
+ """Base class for classes that wish to provide typed extra attributes."""
38
+
39
+ @property
40
+ def extra_attributes(self) -> Mapping[T_Attr, Callable[[], T_Attr]]:
41
+ """
42
+ A mapping of the extra attributes to callables that return the corresponding
43
+ values.
44
+
45
+ If the provider wraps another provider, the attributes from that wrapper should
46
+ also be included in the returned mapping (but the wrapper may override the
47
+ callables from the wrapped instance).
48
+
49
+ """
50
+ return {}
51
+
52
+ @overload
53
+ def extra(self, attribute: T_Attr) -> T_Attr: ...
54
+
55
+ @overload
56
+ def extra(self, attribute: T_Attr, default: T_Default) -> T_Attr | T_Default: ...
57
+
58
+ @final
59
+ def extra(self, attribute: Any, default: object = undefined) -> object:
60
+ """
61
+ extra(attribute, default=undefined)
62
+
63
+ Return the value of the given typed extra attribute.
64
+
65
+ :param attribute: the attribute (member of a :class:`~TypedAttributeSet`) to
66
+ look for
67
+ :param default: the value that should be returned if no value is found for the
68
+ attribute
69
+ :raises ~anyio.TypedAttributeLookupError: if the search failed and no default
70
+ value was given
71
+
72
+ """
73
+ try:
74
+ getter = self.extra_attributes[attribute]
75
+ except KeyError:
76
+ if default is undefined:
77
+ raise TypedAttributeLookupError("Attribute not found") from None
78
+ else:
79
+ return default
80
+
81
+ return getter()
.venv/lib/python3.11/site-packages/anyio/abc/_eventloop.py ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import math
4
+ import sys
5
+ from abc import ABCMeta, abstractmethod
6
+ from collections.abc import AsyncIterator, Awaitable, Callable, Sequence
7
+ from contextlib import AbstractContextManager
8
+ from os import PathLike
9
+ from signal import Signals
10
+ from socket import AddressFamily, SocketKind, socket
11
+ from typing import (
12
+ IO,
13
+ TYPE_CHECKING,
14
+ Any,
15
+ TypeVar,
16
+ Union,
17
+ overload,
18
+ )
19
+
20
+ if sys.version_info >= (3, 11):
21
+ from typing import TypeVarTuple, Unpack
22
+ else:
23
+ from typing_extensions import TypeVarTuple, Unpack
24
+
25
+ if sys.version_info >= (3, 10):
26
+ from typing import TypeAlias
27
+ else:
28
+ from typing_extensions import TypeAlias
29
+
30
+ if TYPE_CHECKING:
31
+ from _typeshed import HasFileno
32
+
33
+ from .._core._synchronization import CapacityLimiter, Event, Lock, Semaphore
34
+ from .._core._tasks import CancelScope
35
+ from .._core._testing import TaskInfo
36
+ from ..from_thread import BlockingPortal
37
+ from ._sockets import (
38
+ ConnectedUDPSocket,
39
+ ConnectedUNIXDatagramSocket,
40
+ IPSockAddrType,
41
+ SocketListener,
42
+ SocketStream,
43
+ UDPSocket,
44
+ UNIXDatagramSocket,
45
+ UNIXSocketStream,
46
+ )
47
+ from ._subprocesses import Process
48
+ from ._tasks import TaskGroup
49
+ from ._testing import TestRunner
50
+
51
+ T_Retval = TypeVar("T_Retval")
52
+ PosArgsT = TypeVarTuple("PosArgsT")
53
+ StrOrBytesPath: TypeAlias = Union[str, bytes, "PathLike[str]", "PathLike[bytes]"]
54
+
55
+
56
+ class AsyncBackend(metaclass=ABCMeta):
57
+ @classmethod
58
+ @abstractmethod
59
+ def run(
60
+ cls,
61
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
62
+ args: tuple[Unpack[PosArgsT]],
63
+ kwargs: dict[str, Any],
64
+ options: dict[str, Any],
65
+ ) -> T_Retval:
66
+ """
67
+ Run the given coroutine function in an asynchronous event loop.
68
+
69
+ The current thread must not be already running an event loop.
70
+
71
+ :param func: a coroutine function
72
+ :param args: positional arguments to ``func``
73
+ :param kwargs: positional arguments to ``func``
74
+ :param options: keyword arguments to call the backend ``run()`` implementation
75
+ with
76
+ :return: the return value of the coroutine function
77
+ """
78
+
79
+ @classmethod
80
+ @abstractmethod
81
+ def current_token(cls) -> object:
82
+ """
83
+
84
+ :return:
85
+ """
86
+
87
+ @classmethod
88
+ @abstractmethod
89
+ def current_time(cls) -> float:
90
+ """
91
+ Return the current value of the event loop's internal clock.
92
+
93
+ :return: the clock value (seconds)
94
+ """
95
+
96
+ @classmethod
97
+ @abstractmethod
98
+ def cancelled_exception_class(cls) -> type[BaseException]:
99
+ """Return the exception class that is raised in a task if it's cancelled."""
100
+
101
+ @classmethod
102
+ @abstractmethod
103
+ async def checkpoint(cls) -> None:
104
+ """
105
+ Check if the task has been cancelled, and allow rescheduling of other tasks.
106
+
107
+ This is effectively the same as running :meth:`checkpoint_if_cancelled` and then
108
+ :meth:`cancel_shielded_checkpoint`.
109
+ """
110
+
111
+ @classmethod
112
+ async def checkpoint_if_cancelled(cls) -> None:
113
+ """
114
+ Check if the current task group has been cancelled.
115
+
116
+ This will check if the task has been cancelled, but will not allow other tasks
117
+ to be scheduled if not.
118
+
119
+ """
120
+ if cls.current_effective_deadline() == -math.inf:
121
+ await cls.checkpoint()
122
+
123
+ @classmethod
124
+ async def cancel_shielded_checkpoint(cls) -> None:
125
+ """
126
+ Allow the rescheduling of other tasks.
127
+
128
+ This will give other tasks the opportunity to run, but without checking if the
129
+ current task group has been cancelled, unlike with :meth:`checkpoint`.
130
+
131
+ """
132
+ with cls.create_cancel_scope(shield=True):
133
+ await cls.sleep(0)
134
+
135
+ @classmethod
136
+ @abstractmethod
137
+ async def sleep(cls, delay: float) -> None:
138
+ """
139
+ Pause the current task for the specified duration.
140
+
141
+ :param delay: the duration, in seconds
142
+ """
143
+
144
+ @classmethod
145
+ @abstractmethod
146
+ def create_cancel_scope(
147
+ cls, *, deadline: float = math.inf, shield: bool = False
148
+ ) -> CancelScope:
149
+ pass
150
+
151
+ @classmethod
152
+ @abstractmethod
153
+ def current_effective_deadline(cls) -> float:
154
+ """
155
+ Return the nearest deadline among all the cancel scopes effective for the
156
+ current task.
157
+
158
+ :return:
159
+ - a clock value from the event loop's internal clock
160
+ - ``inf`` if there is no deadline in effect
161
+ - ``-inf`` if the current scope has been cancelled
162
+ :rtype: float
163
+ """
164
+
165
+ @classmethod
166
+ @abstractmethod
167
+ def create_task_group(cls) -> TaskGroup:
168
+ pass
169
+
170
+ @classmethod
171
+ @abstractmethod
172
+ def create_event(cls) -> Event:
173
+ pass
174
+
175
+ @classmethod
176
+ @abstractmethod
177
+ def create_lock(cls, *, fast_acquire: bool) -> Lock:
178
+ pass
179
+
180
+ @classmethod
181
+ @abstractmethod
182
+ def create_semaphore(
183
+ cls,
184
+ initial_value: int,
185
+ *,
186
+ max_value: int | None = None,
187
+ fast_acquire: bool = False,
188
+ ) -> Semaphore:
189
+ pass
190
+
191
+ @classmethod
192
+ @abstractmethod
193
+ def create_capacity_limiter(cls, total_tokens: float) -> CapacityLimiter:
194
+ pass
195
+
196
+ @classmethod
197
+ @abstractmethod
198
+ async def run_sync_in_worker_thread(
199
+ cls,
200
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
201
+ args: tuple[Unpack[PosArgsT]],
202
+ abandon_on_cancel: bool = False,
203
+ limiter: CapacityLimiter | None = None,
204
+ ) -> T_Retval:
205
+ pass
206
+
207
+ @classmethod
208
+ @abstractmethod
209
+ def check_cancelled(cls) -> None:
210
+ pass
211
+
212
+ @classmethod
213
+ @abstractmethod
214
+ def run_async_from_thread(
215
+ cls,
216
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
217
+ args: tuple[Unpack[PosArgsT]],
218
+ token: object,
219
+ ) -> T_Retval:
220
+ pass
221
+
222
+ @classmethod
223
+ @abstractmethod
224
+ def run_sync_from_thread(
225
+ cls,
226
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
227
+ args: tuple[Unpack[PosArgsT]],
228
+ token: object,
229
+ ) -> T_Retval:
230
+ pass
231
+
232
+ @classmethod
233
+ @abstractmethod
234
+ def create_blocking_portal(cls) -> BlockingPortal:
235
+ pass
236
+
237
+ @classmethod
238
+ @abstractmethod
239
+ async def open_process(
240
+ cls,
241
+ command: StrOrBytesPath | Sequence[StrOrBytesPath],
242
+ *,
243
+ stdin: int | IO[Any] | None,
244
+ stdout: int | IO[Any] | None,
245
+ stderr: int | IO[Any] | None,
246
+ **kwargs: Any,
247
+ ) -> Process:
248
+ pass
249
+
250
+ @classmethod
251
+ @abstractmethod
252
+ def setup_process_pool_exit_at_shutdown(cls, workers: set[Process]) -> None:
253
+ pass
254
+
255
+ @classmethod
256
+ @abstractmethod
257
+ async def connect_tcp(
258
+ cls, host: str, port: int, local_address: IPSockAddrType | None = None
259
+ ) -> SocketStream:
260
+ pass
261
+
262
+ @classmethod
263
+ @abstractmethod
264
+ async def connect_unix(cls, path: str | bytes) -> UNIXSocketStream:
265
+ pass
266
+
267
+ @classmethod
268
+ @abstractmethod
269
+ def create_tcp_listener(cls, sock: socket) -> SocketListener:
270
+ pass
271
+
272
+ @classmethod
273
+ @abstractmethod
274
+ def create_unix_listener(cls, sock: socket) -> SocketListener:
275
+ pass
276
+
277
+ @classmethod
278
+ @abstractmethod
279
+ async def create_udp_socket(
280
+ cls,
281
+ family: AddressFamily,
282
+ local_address: IPSockAddrType | None,
283
+ remote_address: IPSockAddrType | None,
284
+ reuse_port: bool,
285
+ ) -> UDPSocket | ConnectedUDPSocket:
286
+ pass
287
+
288
+ @classmethod
289
+ @overload
290
+ async def create_unix_datagram_socket(
291
+ cls, raw_socket: socket, remote_path: None
292
+ ) -> UNIXDatagramSocket: ...
293
+
294
+ @classmethod
295
+ @overload
296
+ async def create_unix_datagram_socket(
297
+ cls, raw_socket: socket, remote_path: str | bytes
298
+ ) -> ConnectedUNIXDatagramSocket: ...
299
+
300
+ @classmethod
301
+ @abstractmethod
302
+ async def create_unix_datagram_socket(
303
+ cls, raw_socket: socket, remote_path: str | bytes | None
304
+ ) -> UNIXDatagramSocket | ConnectedUNIXDatagramSocket:
305
+ pass
306
+
307
+ @classmethod
308
+ @abstractmethod
309
+ async def getaddrinfo(
310
+ cls,
311
+ host: bytes | str | None,
312
+ port: str | int | None,
313
+ *,
314
+ family: int | AddressFamily = 0,
315
+ type: int | SocketKind = 0,
316
+ proto: int = 0,
317
+ flags: int = 0,
318
+ ) -> list[
319
+ tuple[
320
+ AddressFamily,
321
+ SocketKind,
322
+ int,
323
+ str,
324
+ tuple[str, int] | tuple[str, int, int, int],
325
+ ]
326
+ ]:
327
+ pass
328
+
329
+ @classmethod
330
+ @abstractmethod
331
+ async def getnameinfo(
332
+ cls, sockaddr: IPSockAddrType, flags: int = 0
333
+ ) -> tuple[str, str]:
334
+ pass
335
+
336
+ @classmethod
337
+ @abstractmethod
338
+ async def wait_readable(cls, obj: HasFileno | int) -> None:
339
+ pass
340
+
341
+ @classmethod
342
+ @abstractmethod
343
+ async def wait_writable(cls, obj: HasFileno | int) -> None:
344
+ pass
345
+
346
+ @classmethod
347
+ @abstractmethod
348
+ def current_default_thread_limiter(cls) -> CapacityLimiter:
349
+ pass
350
+
351
+ @classmethod
352
+ @abstractmethod
353
+ def open_signal_receiver(
354
+ cls, *signals: Signals
355
+ ) -> AbstractContextManager[AsyncIterator[Signals]]:
356
+ pass
357
+
358
+ @classmethod
359
+ @abstractmethod
360
+ def get_current_task(cls) -> TaskInfo:
361
+ pass
362
+
363
+ @classmethod
364
+ @abstractmethod
365
+ def get_running_tasks(cls) -> Sequence[TaskInfo]:
366
+ pass
367
+
368
+ @classmethod
369
+ @abstractmethod
370
+ async def wait_all_tasks_blocked(cls) -> None:
371
+ pass
372
+
373
+ @classmethod
374
+ @abstractmethod
375
+ def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:
376
+ pass
.venv/lib/python3.11/site-packages/anyio/abc/_subprocesses.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from abc import abstractmethod
4
+ from signal import Signals
5
+
6
+ from ._resources import AsyncResource
7
+ from ._streams import ByteReceiveStream, ByteSendStream
8
+
9
+
10
+ class Process(AsyncResource):
11
+ """An asynchronous version of :class:`subprocess.Popen`."""
12
+
13
+ @abstractmethod
14
+ async def wait(self) -> int:
15
+ """
16
+ Wait until the process exits.
17
+
18
+ :return: the exit code of the process
19
+ """
20
+
21
+ @abstractmethod
22
+ def terminate(self) -> None:
23
+ """
24
+ Terminates the process, gracefully if possible.
25
+
26
+ On Windows, this calls ``TerminateProcess()``.
27
+ On POSIX systems, this sends ``SIGTERM`` to the process.
28
+
29
+ .. seealso:: :meth:`subprocess.Popen.terminate`
30
+ """
31
+
32
+ @abstractmethod
33
+ def kill(self) -> None:
34
+ """
35
+ Kills the process.
36
+
37
+ On Windows, this calls ``TerminateProcess()``.
38
+ On POSIX systems, this sends ``SIGKILL`` to the process.
39
+
40
+ .. seealso:: :meth:`subprocess.Popen.kill`
41
+ """
42
+
43
+ @abstractmethod
44
+ def send_signal(self, signal: Signals) -> None:
45
+ """
46
+ Send a signal to the subprocess.
47
+
48
+ .. seealso:: :meth:`subprocess.Popen.send_signal`
49
+
50
+ :param signal: the signal number (e.g. :data:`signal.SIGHUP`)
51
+ """
52
+
53
+ @property
54
+ @abstractmethod
55
+ def pid(self) -> int:
56
+ """The process ID of the process."""
57
+
58
+ @property
59
+ @abstractmethod
60
+ def returncode(self) -> int | None:
61
+ """
62
+ The return code of the process. If the process has not yet terminated, this will
63
+ be ``None``.
64
+ """
65
+
66
+ @property
67
+ @abstractmethod
68
+ def stdin(self) -> ByteSendStream | None:
69
+ """The stream for the standard input of the process."""
70
+
71
+ @property
72
+ @abstractmethod
73
+ def stdout(self) -> ByteReceiveStream | None:
74
+ """The stream for the standard output of the process."""
75
+
76
+ @property
77
+ @abstractmethod
78
+ def stderr(self) -> ByteReceiveStream | None:
79
+ """The stream for the standard error output of the process."""
.venv/lib/python3.11/site-packages/anyio/abc/_testing.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import types
4
+ from abc import ABCMeta, abstractmethod
5
+ from collections.abc import AsyncGenerator, Callable, Coroutine, Iterable
6
+ from typing import Any, TypeVar
7
+
8
+ _T = TypeVar("_T")
9
+
10
+
11
+ class TestRunner(metaclass=ABCMeta):
12
+ """
13
+ Encapsulates a running event loop. Every call made through this object will use the
14
+ same event loop.
15
+ """
16
+
17
+ def __enter__(self) -> TestRunner:
18
+ return self
19
+
20
+ @abstractmethod
21
+ def __exit__(
22
+ self,
23
+ exc_type: type[BaseException] | None,
24
+ exc_val: BaseException | None,
25
+ exc_tb: types.TracebackType | None,
26
+ ) -> bool | None: ...
27
+
28
+ @abstractmethod
29
+ def run_asyncgen_fixture(
30
+ self,
31
+ fixture_func: Callable[..., AsyncGenerator[_T, Any]],
32
+ kwargs: dict[str, Any],
33
+ ) -> Iterable[_T]:
34
+ """
35
+ Run an async generator fixture.
36
+
37
+ :param fixture_func: the fixture function
38
+ :param kwargs: keyword arguments to call the fixture function with
39
+ :return: an iterator yielding the value yielded from the async generator
40
+ """
41
+
42
+ @abstractmethod
43
+ def run_fixture(
44
+ self,
45
+ fixture_func: Callable[..., Coroutine[Any, Any, _T]],
46
+ kwargs: dict[str, Any],
47
+ ) -> _T:
48
+ """
49
+ Run an async fixture.
50
+
51
+ :param fixture_func: the fixture function
52
+ :param kwargs: keyword arguments to call the fixture function with
53
+ :return: the return value of the fixture function
54
+ """
55
+
56
+ @abstractmethod
57
+ def run_test(
58
+ self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
59
+ ) -> None:
60
+ """
61
+ Run an async test function.
62
+
63
+ :param test_func: the test function
64
+ :param kwargs: keyword arguments to call the test function with
65
+ """
.venv/lib/python3.11/site-packages/anyio/from_thread.py ADDED
@@ -0,0 +1,527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ from collections.abc import Awaitable, Callable, Generator
5
+ from concurrent.futures import Future
6
+ from contextlib import (
7
+ AbstractAsyncContextManager,
8
+ AbstractContextManager,
9
+ contextmanager,
10
+ )
11
+ from dataclasses import dataclass, field
12
+ from inspect import isawaitable
13
+ from threading import Lock, Thread, get_ident
14
+ from types import TracebackType
15
+ from typing import (
16
+ Any,
17
+ Generic,
18
+ TypeVar,
19
+ cast,
20
+ overload,
21
+ )
22
+
23
+ from ._core import _eventloop
24
+ from ._core._eventloop import get_async_backend, get_cancelled_exc_class, threadlocals
25
+ from ._core._synchronization import Event
26
+ from ._core._tasks import CancelScope, create_task_group
27
+ from .abc import AsyncBackend
28
+ from .abc._tasks import TaskStatus
29
+
30
+ if sys.version_info >= (3, 11):
31
+ from typing import TypeVarTuple, Unpack
32
+ else:
33
+ from typing_extensions import TypeVarTuple, Unpack
34
+
35
+ T_Retval = TypeVar("T_Retval")
36
+ T_co = TypeVar("T_co", covariant=True)
37
+ PosArgsT = TypeVarTuple("PosArgsT")
38
+
39
+
40
+ def run(
41
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], *args: Unpack[PosArgsT]
42
+ ) -> T_Retval:
43
+ """
44
+ Call a coroutine function from a worker thread.
45
+
46
+ :param func: a coroutine function
47
+ :param args: positional arguments for the callable
48
+ :return: the return value of the coroutine function
49
+
50
+ """
51
+ try:
52
+ async_backend = threadlocals.current_async_backend
53
+ token = threadlocals.current_token
54
+ except AttributeError:
55
+ raise RuntimeError(
56
+ "This function can only be run from an AnyIO worker thread"
57
+ ) from None
58
+
59
+ return async_backend.run_async_from_thread(func, args, token=token)
60
+
61
+
62
+ def run_sync(
63
+ func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
64
+ ) -> T_Retval:
65
+ """
66
+ Call a function in the event loop thread from a worker thread.
67
+
68
+ :param func: a callable
69
+ :param args: positional arguments for the callable
70
+ :return: the return value of the callable
71
+
72
+ """
73
+ try:
74
+ async_backend = threadlocals.current_async_backend
75
+ token = threadlocals.current_token
76
+ except AttributeError:
77
+ raise RuntimeError(
78
+ "This function can only be run from an AnyIO worker thread"
79
+ ) from None
80
+
81
+ return async_backend.run_sync_from_thread(func, args, token=token)
82
+
83
+
84
+ class _BlockingAsyncContextManager(Generic[T_co], AbstractContextManager):
85
+ _enter_future: Future[T_co]
86
+ _exit_future: Future[bool | None]
87
+ _exit_event: Event
88
+ _exit_exc_info: tuple[
89
+ type[BaseException] | None, BaseException | None, TracebackType | None
90
+ ] = (None, None, None)
91
+
92
+ def __init__(
93
+ self, async_cm: AbstractAsyncContextManager[T_co], portal: BlockingPortal
94
+ ):
95
+ self._async_cm = async_cm
96
+ self._portal = portal
97
+
98
+ async def run_async_cm(self) -> bool | None:
99
+ try:
100
+ self._exit_event = Event()
101
+ value = await self._async_cm.__aenter__()
102
+ except BaseException as exc:
103
+ self._enter_future.set_exception(exc)
104
+ raise
105
+ else:
106
+ self._enter_future.set_result(value)
107
+
108
+ try:
109
+ # Wait for the sync context manager to exit.
110
+ # This next statement can raise `get_cancelled_exc_class()` if
111
+ # something went wrong in a task group in this async context
112
+ # manager.
113
+ await self._exit_event.wait()
114
+ finally:
115
+ # In case of cancellation, it could be that we end up here before
116
+ # `_BlockingAsyncContextManager.__exit__` is called, and an
117
+ # `_exit_exc_info` has been set.
118
+ result = await self._async_cm.__aexit__(*self._exit_exc_info)
119
+ return result
120
+
121
+ def __enter__(self) -> T_co:
122
+ self._enter_future = Future()
123
+ self._exit_future = self._portal.start_task_soon(self.run_async_cm)
124
+ return self._enter_future.result()
125
+
126
+ def __exit__(
127
+ self,
128
+ __exc_type: type[BaseException] | None,
129
+ __exc_value: BaseException | None,
130
+ __traceback: TracebackType | None,
131
+ ) -> bool | None:
132
+ self._exit_exc_info = __exc_type, __exc_value, __traceback
133
+ self._portal.call(self._exit_event.set)
134
+ return self._exit_future.result()
135
+
136
+
137
+ class _BlockingPortalTaskStatus(TaskStatus):
138
+ def __init__(self, future: Future):
139
+ self._future = future
140
+
141
+ def started(self, value: object = None) -> None:
142
+ self._future.set_result(value)
143
+
144
+
145
+ class BlockingPortal:
146
+ """An object that lets external threads run code in an asynchronous event loop."""
147
+
148
+ def __new__(cls) -> BlockingPortal:
149
+ return get_async_backend().create_blocking_portal()
150
+
151
+ def __init__(self) -> None:
152
+ self._event_loop_thread_id: int | None = get_ident()
153
+ self._stop_event = Event()
154
+ self._task_group = create_task_group()
155
+ self._cancelled_exc_class = get_cancelled_exc_class()
156
+
157
+ async def __aenter__(self) -> BlockingPortal:
158
+ await self._task_group.__aenter__()
159
+ return self
160
+
161
+ async def __aexit__(
162
+ self,
163
+ exc_type: type[BaseException] | None,
164
+ exc_val: BaseException | None,
165
+ exc_tb: TracebackType | None,
166
+ ) -> bool | None:
167
+ await self.stop()
168
+ return await self._task_group.__aexit__(exc_type, exc_val, exc_tb)
169
+
170
+ def _check_running(self) -> None:
171
+ if self._event_loop_thread_id is None:
172
+ raise RuntimeError("This portal is not running")
173
+ if self._event_loop_thread_id == get_ident():
174
+ raise RuntimeError(
175
+ "This method cannot be called from the event loop thread"
176
+ )
177
+
178
+ async def sleep_until_stopped(self) -> None:
179
+ """Sleep until :meth:`stop` is called."""
180
+ await self._stop_event.wait()
181
+
182
+ async def stop(self, cancel_remaining: bool = False) -> None:
183
+ """
184
+ Signal the portal to shut down.
185
+
186
+ This marks the portal as no longer accepting new calls and exits from
187
+ :meth:`sleep_until_stopped`.
188
+
189
+ :param cancel_remaining: ``True`` to cancel all the remaining tasks, ``False``
190
+ to let them finish before returning
191
+
192
+ """
193
+ self._event_loop_thread_id = None
194
+ self._stop_event.set()
195
+ if cancel_remaining:
196
+ self._task_group.cancel_scope.cancel()
197
+
198
+ async def _call_func(
199
+ self,
200
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
201
+ args: tuple[Unpack[PosArgsT]],
202
+ kwargs: dict[str, Any],
203
+ future: Future[T_Retval],
204
+ ) -> None:
205
+ def callback(f: Future[T_Retval]) -> None:
206
+ if f.cancelled() and self._event_loop_thread_id not in (
207
+ None,
208
+ get_ident(),
209
+ ):
210
+ self.call(scope.cancel)
211
+
212
+ try:
213
+ retval_or_awaitable = func(*args, **kwargs)
214
+ if isawaitable(retval_or_awaitable):
215
+ with CancelScope() as scope:
216
+ if future.cancelled():
217
+ scope.cancel()
218
+ else:
219
+ future.add_done_callback(callback)
220
+
221
+ retval = await retval_or_awaitable
222
+ else:
223
+ retval = retval_or_awaitable
224
+ except self._cancelled_exc_class:
225
+ future.cancel()
226
+ future.set_running_or_notify_cancel()
227
+ except BaseException as exc:
228
+ if not future.cancelled():
229
+ future.set_exception(exc)
230
+
231
+ # Let base exceptions fall through
232
+ if not isinstance(exc, Exception):
233
+ raise
234
+ else:
235
+ if not future.cancelled():
236
+ future.set_result(retval)
237
+ finally:
238
+ scope = None # type: ignore[assignment]
239
+
240
+ def _spawn_task_from_thread(
241
+ self,
242
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
243
+ args: tuple[Unpack[PosArgsT]],
244
+ kwargs: dict[str, Any],
245
+ name: object,
246
+ future: Future[T_Retval],
247
+ ) -> None:
248
+ """
249
+ Spawn a new task using the given callable.
250
+
251
+ Implementors must ensure that the future is resolved when the task finishes.
252
+
253
+ :param func: a callable
254
+ :param args: positional arguments to be passed to the callable
255
+ :param kwargs: keyword arguments to be passed to the callable
256
+ :param name: name of the task (will be coerced to a string if not ``None``)
257
+ :param future: a future that will resolve to the return value of the callable,
258
+ or the exception raised during its execution
259
+
260
+ """
261
+ raise NotImplementedError
262
+
263
+ @overload
264
+ def call(
265
+ self,
266
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
267
+ *args: Unpack[PosArgsT],
268
+ ) -> T_Retval: ...
269
+
270
+ @overload
271
+ def call(
272
+ self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
273
+ ) -> T_Retval: ...
274
+
275
+ def call(
276
+ self,
277
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
278
+ *args: Unpack[PosArgsT],
279
+ ) -> T_Retval:
280
+ """
281
+ Call the given function in the event loop thread.
282
+
283
+ If the callable returns a coroutine object, it is awaited on.
284
+
285
+ :param func: any callable
286
+ :raises RuntimeError: if the portal is not running or if this method is called
287
+ from within the event loop thread
288
+
289
+ """
290
+ return cast(T_Retval, self.start_task_soon(func, *args).result())
291
+
292
+ @overload
293
+ def start_task_soon(
294
+ self,
295
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
296
+ *args: Unpack[PosArgsT],
297
+ name: object = None,
298
+ ) -> Future[T_Retval]: ...
299
+
300
+ @overload
301
+ def start_task_soon(
302
+ self,
303
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
304
+ *args: Unpack[PosArgsT],
305
+ name: object = None,
306
+ ) -> Future[T_Retval]: ...
307
+
308
+ def start_task_soon(
309
+ self,
310
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
311
+ *args: Unpack[PosArgsT],
312
+ name: object = None,
313
+ ) -> Future[T_Retval]:
314
+ """
315
+ Start a task in the portal's task group.
316
+
317
+ The task will be run inside a cancel scope which can be cancelled by cancelling
318
+ the returned future.
319
+
320
+ :param func: the target function
321
+ :param args: positional arguments passed to ``func``
322
+ :param name: name of the task (will be coerced to a string if not ``None``)
323
+ :return: a future that resolves with the return value of the callable if the
324
+ task completes successfully, or with the exception raised in the task
325
+ :raises RuntimeError: if the portal is not running or if this method is called
326
+ from within the event loop thread
327
+ :rtype: concurrent.futures.Future[T_Retval]
328
+
329
+ .. versionadded:: 3.0
330
+
331
+ """
332
+ self._check_running()
333
+ f: Future[T_Retval] = Future()
334
+ self._spawn_task_from_thread(func, args, {}, name, f)
335
+ return f
336
+
337
+ def start_task(
338
+ self,
339
+ func: Callable[..., Awaitable[T_Retval]],
340
+ *args: object,
341
+ name: object = None,
342
+ ) -> tuple[Future[T_Retval], Any]:
343
+ """
344
+ Start a task in the portal's task group and wait until it signals for readiness.
345
+
346
+ This method works the same way as :meth:`.abc.TaskGroup.start`.
347
+
348
+ :param func: the target function
349
+ :param args: positional arguments passed to ``func``
350
+ :param name: name of the task (will be coerced to a string if not ``None``)
351
+ :return: a tuple of (future, task_status_value) where the ``task_status_value``
352
+ is the value passed to ``task_status.started()`` from within the target
353
+ function
354
+ :rtype: tuple[concurrent.futures.Future[T_Retval], Any]
355
+
356
+ .. versionadded:: 3.0
357
+
358
+ """
359
+
360
+ def task_done(future: Future[T_Retval]) -> None:
361
+ if not task_status_future.done():
362
+ if future.cancelled():
363
+ task_status_future.cancel()
364
+ elif future.exception():
365
+ task_status_future.set_exception(future.exception())
366
+ else:
367
+ exc = RuntimeError(
368
+ "Task exited without calling task_status.started()"
369
+ )
370
+ task_status_future.set_exception(exc)
371
+
372
+ self._check_running()
373
+ task_status_future: Future = Future()
374
+ task_status = _BlockingPortalTaskStatus(task_status_future)
375
+ f: Future = Future()
376
+ f.add_done_callback(task_done)
377
+ self._spawn_task_from_thread(func, args, {"task_status": task_status}, name, f)
378
+ return f, task_status_future.result()
379
+
380
+ def wrap_async_context_manager(
381
+ self, cm: AbstractAsyncContextManager[T_co]
382
+ ) -> AbstractContextManager[T_co]:
383
+ """
384
+ Wrap an async context manager as a synchronous context manager via this portal.
385
+
386
+ Spawns a task that will call both ``__aenter__()`` and ``__aexit__()``, stopping
387
+ in the middle until the synchronous context manager exits.
388
+
389
+ :param cm: an asynchronous context manager
390
+ :return: a synchronous context manager
391
+
392
+ .. versionadded:: 2.1
393
+
394
+ """
395
+ return _BlockingAsyncContextManager(cm, self)
396
+
397
+
398
+ @dataclass
399
+ class BlockingPortalProvider:
400
+ """
401
+ A manager for a blocking portal. Used as a context manager. The first thread to
402
+ enter this context manager causes a blocking portal to be started with the specific
403
+ parameters, and the last thread to exit causes the portal to be shut down. Thus,
404
+ there will be exactly one blocking portal running in this context as long as at
405
+ least one thread has entered this context manager.
406
+
407
+ The parameters are the same as for :func:`~anyio.run`.
408
+
409
+ :param backend: name of the backend
410
+ :param backend_options: backend options
411
+
412
+ .. versionadded:: 4.4
413
+ """
414
+
415
+ backend: str = "asyncio"
416
+ backend_options: dict[str, Any] | None = None
417
+ _lock: Lock = field(init=False, default_factory=Lock)
418
+ _leases: int = field(init=False, default=0)
419
+ _portal: BlockingPortal = field(init=False)
420
+ _portal_cm: AbstractContextManager[BlockingPortal] | None = field(
421
+ init=False, default=None
422
+ )
423
+
424
+ def __enter__(self) -> BlockingPortal:
425
+ with self._lock:
426
+ if self._portal_cm is None:
427
+ self._portal_cm = start_blocking_portal(
428
+ self.backend, self.backend_options
429
+ )
430
+ self._portal = self._portal_cm.__enter__()
431
+
432
+ self._leases += 1
433
+ return self._portal
434
+
435
+ def __exit__(
436
+ self,
437
+ exc_type: type[BaseException] | None,
438
+ exc_val: BaseException | None,
439
+ exc_tb: TracebackType | None,
440
+ ) -> None:
441
+ portal_cm: AbstractContextManager[BlockingPortal] | None = None
442
+ with self._lock:
443
+ assert self._portal_cm
444
+ assert self._leases > 0
445
+ self._leases -= 1
446
+ if not self._leases:
447
+ portal_cm = self._portal_cm
448
+ self._portal_cm = None
449
+ del self._portal
450
+
451
+ if portal_cm:
452
+ portal_cm.__exit__(None, None, None)
453
+
454
+
455
+ @contextmanager
456
+ def start_blocking_portal(
457
+ backend: str = "asyncio", backend_options: dict[str, Any] | None = None
458
+ ) -> Generator[BlockingPortal, Any, None]:
459
+ """
460
+ Start a new event loop in a new thread and run a blocking portal in its main task.
461
+
462
+ The parameters are the same as for :func:`~anyio.run`.
463
+
464
+ :param backend: name of the backend
465
+ :param backend_options: backend options
466
+ :return: a context manager that yields a blocking portal
467
+
468
+ .. versionchanged:: 3.0
469
+ Usage as a context manager is now required.
470
+
471
+ """
472
+
473
+ async def run_portal() -> None:
474
+ async with BlockingPortal() as portal_:
475
+ future.set_result(portal_)
476
+ await portal_.sleep_until_stopped()
477
+
478
+ def run_blocking_portal() -> None:
479
+ if future.set_running_or_notify_cancel():
480
+ try:
481
+ _eventloop.run(
482
+ run_portal, backend=backend, backend_options=backend_options
483
+ )
484
+ except BaseException as exc:
485
+ if not future.done():
486
+ future.set_exception(exc)
487
+
488
+ future: Future[BlockingPortal] = Future()
489
+ thread = Thread(target=run_blocking_portal, daemon=True)
490
+ thread.start()
491
+ try:
492
+ cancel_remaining_tasks = False
493
+ portal = future.result()
494
+ try:
495
+ yield portal
496
+ except BaseException:
497
+ cancel_remaining_tasks = True
498
+ raise
499
+ finally:
500
+ try:
501
+ portal.call(portal.stop, cancel_remaining_tasks)
502
+ except RuntimeError:
503
+ pass
504
+ finally:
505
+ thread.join()
506
+
507
+
508
+ def check_cancelled() -> None:
509
+ """
510
+ Check if the cancel scope of the host task's running the current worker thread has
511
+ been cancelled.
512
+
513
+ If the host task's current cancel scope has indeed been cancelled, the
514
+ backend-specific cancellation exception will be raised.
515
+
516
+ :raises RuntimeError: if the current thread was not spawned by
517
+ :func:`.to_thread.run_sync`
518
+
519
+ """
520
+ try:
521
+ async_backend: AsyncBackend = threadlocals.current_async_backend
522
+ except AttributeError:
523
+ raise RuntimeError(
524
+ "This function can only be run from an AnyIO worker thread"
525
+ ) from None
526
+
527
+ async_backend.check_cancelled()
.venv/lib/python3.11/site-packages/anyio/lowlevel.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import enum
4
+ from dataclasses import dataclass
5
+ from typing import Any, Generic, Literal, TypeVar, overload
6
+ from weakref import WeakKeyDictionary
7
+
8
+ from ._core._eventloop import get_async_backend
9
+
10
+ T = TypeVar("T")
11
+ D = TypeVar("D")
12
+
13
+
14
+ async def checkpoint() -> None:
15
+ """
16
+ Check for cancellation and allow the scheduler to switch to another task.
17
+
18
+ Equivalent to (but more efficient than)::
19
+
20
+ await checkpoint_if_cancelled()
21
+ await cancel_shielded_checkpoint()
22
+
23
+
24
+ .. versionadded:: 3.0
25
+
26
+ """
27
+ await get_async_backend().checkpoint()
28
+
29
+
30
+ async def checkpoint_if_cancelled() -> None:
31
+ """
32
+ Enter a checkpoint if the enclosing cancel scope has been cancelled.
33
+
34
+ This does not allow the scheduler to switch to a different task.
35
+
36
+ .. versionadded:: 3.0
37
+
38
+ """
39
+ await get_async_backend().checkpoint_if_cancelled()
40
+
41
+
42
+ async def cancel_shielded_checkpoint() -> None:
43
+ """
44
+ Allow the scheduler to switch to another task but without checking for cancellation.
45
+
46
+ Equivalent to (but potentially more efficient than)::
47
+
48
+ with CancelScope(shield=True):
49
+ await checkpoint()
50
+
51
+
52
+ .. versionadded:: 3.0
53
+
54
+ """
55
+ await get_async_backend().cancel_shielded_checkpoint()
56
+
57
+
58
+ def current_token() -> object:
59
+ """
60
+ Return a backend specific token object that can be used to get back to the event
61
+ loop.
62
+
63
+ """
64
+ return get_async_backend().current_token()
65
+
66
+
67
+ _run_vars: WeakKeyDictionary[Any, dict[str, Any]] = WeakKeyDictionary()
68
+ _token_wrappers: dict[Any, _TokenWrapper] = {}
69
+
70
+
71
+ @dataclass(frozen=True)
72
+ class _TokenWrapper:
73
+ __slots__ = "_token", "__weakref__"
74
+ _token: object
75
+
76
+
77
+ class _NoValueSet(enum.Enum):
78
+ NO_VALUE_SET = enum.auto()
79
+
80
+
81
+ class RunvarToken(Generic[T]):
82
+ __slots__ = "_var", "_value", "_redeemed"
83
+
84
+ def __init__(self, var: RunVar[T], value: T | Literal[_NoValueSet.NO_VALUE_SET]):
85
+ self._var = var
86
+ self._value: T | Literal[_NoValueSet.NO_VALUE_SET] = value
87
+ self._redeemed = False
88
+
89
+
90
+ class RunVar(Generic[T]):
91
+ """
92
+ Like a :class:`~contextvars.ContextVar`, except scoped to the running event loop.
93
+ """
94
+
95
+ __slots__ = "_name", "_default"
96
+
97
+ NO_VALUE_SET: Literal[_NoValueSet.NO_VALUE_SET] = _NoValueSet.NO_VALUE_SET
98
+
99
+ _token_wrappers: set[_TokenWrapper] = set()
100
+
101
+ def __init__(
102
+ self, name: str, default: T | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
103
+ ):
104
+ self._name = name
105
+ self._default = default
106
+
107
+ @property
108
+ def _current_vars(self) -> dict[str, T]:
109
+ token = current_token()
110
+ try:
111
+ return _run_vars[token]
112
+ except KeyError:
113
+ run_vars = _run_vars[token] = {}
114
+ return run_vars
115
+
116
+ @overload
117
+ def get(self, default: D) -> T | D: ...
118
+
119
+ @overload
120
+ def get(self) -> T: ...
121
+
122
+ def get(
123
+ self, default: D | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
124
+ ) -> T | D:
125
+ try:
126
+ return self._current_vars[self._name]
127
+ except KeyError:
128
+ if default is not RunVar.NO_VALUE_SET:
129
+ return default
130
+ elif self._default is not RunVar.NO_VALUE_SET:
131
+ return self._default
132
+
133
+ raise LookupError(
134
+ f'Run variable "{self._name}" has no value and no default set'
135
+ )
136
+
137
+ def set(self, value: T) -> RunvarToken[T]:
138
+ current_vars = self._current_vars
139
+ token = RunvarToken(self, current_vars.get(self._name, RunVar.NO_VALUE_SET))
140
+ current_vars[self._name] = value
141
+ return token
142
+
143
+ def reset(self, token: RunvarToken[T]) -> None:
144
+ if token._var is not self:
145
+ raise ValueError("This token does not belong to this RunVar")
146
+
147
+ if token._redeemed:
148
+ raise ValueError("This token has already been used")
149
+
150
+ if token._value is _NoValueSet.NO_VALUE_SET:
151
+ try:
152
+ del self._current_vars[self._name]
153
+ except KeyError:
154
+ pass
155
+ else:
156
+ self._current_vars[self._name] = token._value
157
+
158
+ token._redeemed = True
159
+
160
+ def __repr__(self) -> str:
161
+ return f"<RunVar name={self._name!r}>"
.venv/lib/python3.11/site-packages/anyio/py.typed ADDED
File without changes
.venv/lib/python3.11/site-packages/anyio/pytest_plugin.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ from collections.abc import Generator, Iterator
5
+ from contextlib import ExitStack, contextmanager
6
+ from inspect import isasyncgenfunction, iscoroutinefunction, ismethod
7
+ from typing import Any, cast
8
+
9
+ import pytest
10
+ import sniffio
11
+ from _pytest.fixtures import SubRequest
12
+ from _pytest.outcomes import Exit
13
+
14
+ from ._core._eventloop import get_all_backends, get_async_backend
15
+ from ._core._exceptions import iterate_exceptions
16
+ from .abc import TestRunner
17
+
18
+ if sys.version_info < (3, 11):
19
+ from exceptiongroup import ExceptionGroup
20
+
21
+ _current_runner: TestRunner | None = None
22
+ _runner_stack: ExitStack | None = None
23
+ _runner_leases = 0
24
+
25
+
26
+ def extract_backend_and_options(backend: object) -> tuple[str, dict[str, Any]]:
27
+ if isinstance(backend, str):
28
+ return backend, {}
29
+ elif isinstance(backend, tuple) and len(backend) == 2:
30
+ if isinstance(backend[0], str) and isinstance(backend[1], dict):
31
+ return cast(tuple[str, dict[str, Any]], backend)
32
+
33
+ raise TypeError("anyio_backend must be either a string or tuple of (string, dict)")
34
+
35
+
36
+ @contextmanager
37
+ def get_runner(
38
+ backend_name: str, backend_options: dict[str, Any]
39
+ ) -> Iterator[TestRunner]:
40
+ global _current_runner, _runner_leases, _runner_stack
41
+ if _current_runner is None:
42
+ asynclib = get_async_backend(backend_name)
43
+ _runner_stack = ExitStack()
44
+ if sniffio.current_async_library_cvar.get(None) is None:
45
+ # Since we're in control of the event loop, we can cache the name of the
46
+ # async library
47
+ token = sniffio.current_async_library_cvar.set(backend_name)
48
+ _runner_stack.callback(sniffio.current_async_library_cvar.reset, token)
49
+
50
+ backend_options = backend_options or {}
51
+ _current_runner = _runner_stack.enter_context(
52
+ asynclib.create_test_runner(backend_options)
53
+ )
54
+
55
+ _runner_leases += 1
56
+ try:
57
+ yield _current_runner
58
+ finally:
59
+ _runner_leases -= 1
60
+ if not _runner_leases:
61
+ assert _runner_stack is not None
62
+ _runner_stack.close()
63
+ _runner_stack = _current_runner = None
64
+
65
+
66
+ def pytest_configure(config: Any) -> None:
67
+ config.addinivalue_line(
68
+ "markers",
69
+ "anyio: mark the (coroutine function) test to be run "
70
+ "asynchronously via anyio.",
71
+ )
72
+
73
+
74
+ @pytest.hookimpl(hookwrapper=True)
75
+ def pytest_fixture_setup(fixturedef: Any, request: Any) -> Generator[Any]:
76
+ def wrapper(
77
+ *args: Any, anyio_backend: Any, request: SubRequest, **kwargs: Any
78
+ ) -> Any:
79
+ # Rebind any fixture methods to the request instance
80
+ if (
81
+ request.instance
82
+ and ismethod(func)
83
+ and type(func.__self__) is type(request.instance)
84
+ ):
85
+ local_func = func.__func__.__get__(request.instance)
86
+ else:
87
+ local_func = func
88
+
89
+ backend_name, backend_options = extract_backend_and_options(anyio_backend)
90
+ if has_backend_arg:
91
+ kwargs["anyio_backend"] = anyio_backend
92
+
93
+ if has_request_arg:
94
+ kwargs["request"] = request
95
+
96
+ with get_runner(backend_name, backend_options) as runner:
97
+ if isasyncgenfunction(local_func):
98
+ yield from runner.run_asyncgen_fixture(local_func, kwargs)
99
+ else:
100
+ yield runner.run_fixture(local_func, kwargs)
101
+
102
+ # Only apply this to coroutine functions and async generator functions in requests
103
+ # that involve the anyio_backend fixture
104
+ func = fixturedef.func
105
+ if isasyncgenfunction(func) or iscoroutinefunction(func):
106
+ if "anyio_backend" in request.fixturenames:
107
+ fixturedef.func = wrapper
108
+ original_argname = fixturedef.argnames
109
+
110
+ if not (has_backend_arg := "anyio_backend" in fixturedef.argnames):
111
+ fixturedef.argnames += ("anyio_backend",)
112
+
113
+ if not (has_request_arg := "request" in fixturedef.argnames):
114
+ fixturedef.argnames += ("request",)
115
+
116
+ try:
117
+ return (yield)
118
+ finally:
119
+ fixturedef.func = func
120
+ fixturedef.argnames = original_argname
121
+
122
+ return (yield)
123
+
124
+
125
+ @pytest.hookimpl(tryfirst=True)
126
+ def pytest_pycollect_makeitem(collector: Any, name: Any, obj: Any) -> None:
127
+ if collector.istestfunction(obj, name):
128
+ inner_func = obj.hypothesis.inner_test if hasattr(obj, "hypothesis") else obj
129
+ if iscoroutinefunction(inner_func):
130
+ marker = collector.get_closest_marker("anyio")
131
+ own_markers = getattr(obj, "pytestmark", ())
132
+ if marker or any(marker.name == "anyio" for marker in own_markers):
133
+ pytest.mark.usefixtures("anyio_backend")(obj)
134
+
135
+
136
+ @pytest.hookimpl(tryfirst=True)
137
+ def pytest_pyfunc_call(pyfuncitem: Any) -> bool | None:
138
+ def run_with_hypothesis(**kwargs: Any) -> None:
139
+ with get_runner(backend_name, backend_options) as runner:
140
+ runner.run_test(original_func, kwargs)
141
+
142
+ backend = pyfuncitem.funcargs.get("anyio_backend")
143
+ if backend:
144
+ backend_name, backend_options = extract_backend_and_options(backend)
145
+
146
+ if hasattr(pyfuncitem.obj, "hypothesis"):
147
+ # Wrap the inner test function unless it's already wrapped
148
+ original_func = pyfuncitem.obj.hypothesis.inner_test
149
+ if original_func.__qualname__ != run_with_hypothesis.__qualname__:
150
+ if iscoroutinefunction(original_func):
151
+ pyfuncitem.obj.hypothesis.inner_test = run_with_hypothesis
152
+
153
+ return None
154
+
155
+ if iscoroutinefunction(pyfuncitem.obj):
156
+ funcargs = pyfuncitem.funcargs
157
+ testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
158
+ with get_runner(backend_name, backend_options) as runner:
159
+ try:
160
+ runner.run_test(pyfuncitem.obj, testargs)
161
+ except ExceptionGroup as excgrp:
162
+ for exc in iterate_exceptions(excgrp):
163
+ if isinstance(exc, (Exit, KeyboardInterrupt, SystemExit)):
164
+ raise exc from excgrp
165
+
166
+ raise
167
+
168
+ return True
169
+
170
+ return None
171
+
172
+
173
+ @pytest.fixture(scope="module", params=get_all_backends())
174
+ def anyio_backend(request: Any) -> Any:
175
+ return request.param
176
+
177
+
178
+ @pytest.fixture
179
+ def anyio_backend_name(anyio_backend: Any) -> str:
180
+ if isinstance(anyio_backend, str):
181
+ return anyio_backend
182
+ else:
183
+ return anyio_backend[0]
184
+
185
+
186
+ @pytest.fixture
187
+ def anyio_backend_options(anyio_backend: Any) -> dict[str, Any]:
188
+ if isinstance(anyio_backend, str):
189
+ return {}
190
+ else:
191
+ return anyio_backend[1]
.venv/lib/python3.11/site-packages/anyio/streams/__init__.py ADDED
File without changes
.venv/lib/python3.11/site-packages/anyio/streams/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (186 Bytes). View file
 
.venv/lib/python3.11/site-packages/anyio/streams/__pycache__/buffered.cpython-311.pyc ADDED
Binary file (6.49 kB). View file