diff --git a/.gitattributes b/.gitattributes index 39898d5066407054f7959629ce477f33d5cb2308..ff831307360f2788d7f1d6a06d8f27f2aec6d753 100644 --- a/.gitattributes +++ b/.gitattributes @@ -211,3 +211,4 @@ tuning-competition-baseline/.venv/lib/python3.11/site-packages/torch/_inductor/_ .venv/lib/python3.11/site-packages/watchfiles/_rust_notify.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text .venv/lib/python3.11/site-packages/frozenlist/_frozenlist.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text .venv/lib/python3.11/site-packages/uvloop/loop.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +.venv/lib/python3.11/site-packages/functorch/_C.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/.venv/lib/python3.11/site-packages/fsspec/tests/abstract/__init__.py b/.venv/lib/python3.11/site-packages/fsspec/tests/abstract/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ed2ad802ecaf021106c25c03112f29e75c7b2f8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/fsspec/tests/abstract/__init__.py @@ -0,0 +1,289 @@ +import os +from hashlib import md5 + +import pytest + +from fsspec.implementations.local import LocalFileSystem +from fsspec.tests.abstract.copy import AbstractCopyTests # noqa: F401 +from fsspec.tests.abstract.get import AbstractGetTests # noqa: F401 +from fsspec.tests.abstract.open import AbstractOpenTests # noqa: F401 +from fsspec.tests.abstract.pipe import AbstractPipeTests # noqa: F401 +from fsspec.tests.abstract.put import AbstractPutTests # noqa: F401 + + +class BaseAbstractFixtures: + """ + Abstract base class containing fixtures that are used by but never need to + be overridden in derived filesystem-specific classes to run the abstract + tests on such filesystems. + """ + + @pytest.fixture + def fs_bulk_operations_scenario_0(self, fs, fs_join, fs_path): + """ + Scenario on remote filesystem that is used for many cp/get/put tests. + + Cleans up at the end of each test it which it is used. + """ + source = self._bulk_operations_scenario_0(fs, fs_join, fs_path) + yield source + fs.rm(source, recursive=True) + + @pytest.fixture + def fs_glob_edge_cases_files(self, fs, fs_join, fs_path): + """ + Scenario on remote filesystem that is used for glob edge cases cp/get/put tests. + + Cleans up at the end of each test it which it is used. + """ + source = self._glob_edge_cases_files(fs, fs_join, fs_path) + yield source + fs.rm(source, recursive=True) + + @pytest.fixture + def fs_dir_and_file_with_same_name_prefix(self, fs, fs_join, fs_path): + """ + Scenario on remote filesystem that is used to check cp/get/put on directory + and file with the same name prefixes. + + Cleans up at the end of each test it which it is used. + """ + source = self._dir_and_file_with_same_name_prefix(fs, fs_join, fs_path) + yield source + fs.rm(source, recursive=True) + + @pytest.fixture + def fs_10_files_with_hashed_names(self, fs, fs_join, fs_path): + """ + Scenario on remote filesystem that is used to check cp/get/put files order + when source and destination are lists. + + Cleans up at the end of each test it which it is used. + """ + source = self._10_files_with_hashed_names(fs, fs_join, fs_path) + yield source + fs.rm(source, recursive=True) + + @pytest.fixture + def fs_target(self, fs, fs_join, fs_path): + """ + Return name of remote directory that does not yet exist to copy into. + + Cleans up at the end of each test it which it is used. + """ + target = fs_join(fs_path, "target") + yield target + if fs.exists(target): + fs.rm(target, recursive=True) + + @pytest.fixture + def local_bulk_operations_scenario_0(self, local_fs, local_join, local_path): + """ + Scenario on local filesystem that is used for many cp/get/put tests. + + Cleans up at the end of each test it which it is used. + """ + source = self._bulk_operations_scenario_0(local_fs, local_join, local_path) + yield source + local_fs.rm(source, recursive=True) + + @pytest.fixture + def local_glob_edge_cases_files(self, local_fs, local_join, local_path): + """ + Scenario on local filesystem that is used for glob edge cases cp/get/put tests. + + Cleans up at the end of each test it which it is used. + """ + source = self._glob_edge_cases_files(local_fs, local_join, local_path) + yield source + local_fs.rm(source, recursive=True) + + @pytest.fixture + def local_dir_and_file_with_same_name_prefix( + self, local_fs, local_join, local_path + ): + """ + Scenario on local filesystem that is used to check cp/get/put on directory + and file with the same name prefixes. + + Cleans up at the end of each test it which it is used. + """ + source = self._dir_and_file_with_same_name_prefix( + local_fs, local_join, local_path + ) + yield source + local_fs.rm(source, recursive=True) + + @pytest.fixture + def local_10_files_with_hashed_names(self, local_fs, local_join, local_path): + """ + Scenario on local filesystem that is used to check cp/get/put files order + when source and destination are lists. + + Cleans up at the end of each test it which it is used. + """ + source = self._10_files_with_hashed_names(local_fs, local_join, local_path) + yield source + local_fs.rm(source, recursive=True) + + @pytest.fixture + def local_target(self, local_fs, local_join, local_path): + """ + Return name of local directory that does not yet exist to copy into. + + Cleans up at the end of each test it which it is used. + """ + target = local_join(local_path, "target") + yield target + if local_fs.exists(target): + local_fs.rm(target, recursive=True) + + def _glob_edge_cases_files(self, some_fs, some_join, some_path): + """ + Scenario that is used for glob edge cases cp/get/put tests. + Creates the following directory and file structure: + + 📁 source + ├── 📄 file1 + ├── 📄 file2 + ├── 📁 subdir0 + │ ├── 📄 subfile1 + │ ├── 📄 subfile2 + │ └── 📁 nesteddir + │ └── 📄 nestedfile + └── 📁 subdir1 + ├── 📄 subfile1 + ├── 📄 subfile2 + └── 📁 nesteddir + └── 📄 nestedfile + """ + source = some_join(some_path, "source") + some_fs.touch(some_join(source, "file1")) + some_fs.touch(some_join(source, "file2")) + + for subdir_idx in range(2): + subdir = some_join(source, f"subdir{subdir_idx}") + nesteddir = some_join(subdir, "nesteddir") + some_fs.makedirs(nesteddir) + some_fs.touch(some_join(subdir, "subfile1")) + some_fs.touch(some_join(subdir, "subfile2")) + some_fs.touch(some_join(nesteddir, "nestedfile")) + + return source + + def _bulk_operations_scenario_0(self, some_fs, some_join, some_path): + """ + Scenario that is used for many cp/get/put tests. Creates the following + directory and file structure: + + 📁 source + ├── 📄 file1 + ├── 📄 file2 + └── 📁 subdir + ├── 📄 subfile1 + ├── 📄 subfile2 + └── 📁 nesteddir + └── 📄 nestedfile + """ + source = some_join(some_path, "source") + subdir = some_join(source, "subdir") + nesteddir = some_join(subdir, "nesteddir") + some_fs.makedirs(nesteddir) + some_fs.touch(some_join(source, "file1")) + some_fs.touch(some_join(source, "file2")) + some_fs.touch(some_join(subdir, "subfile1")) + some_fs.touch(some_join(subdir, "subfile2")) + some_fs.touch(some_join(nesteddir, "nestedfile")) + return source + + def _dir_and_file_with_same_name_prefix(self, some_fs, some_join, some_path): + """ + Scenario that is used to check cp/get/put on directory and file with + the same name prefixes. Creates the following directory and file structure: + + 📁 source + ├── 📄 subdir.txt + └── 📁 subdir + └── 📄 subfile.txt + """ + source = some_join(some_path, "source") + subdir = some_join(source, "subdir") + file = some_join(source, "subdir.txt") + subfile = some_join(subdir, "subfile.txt") + some_fs.makedirs(subdir) + some_fs.touch(file) + some_fs.touch(subfile) + return source + + def _10_files_with_hashed_names(self, some_fs, some_join, some_path): + """ + Scenario that is used to check cp/get/put files order when source and + destination are lists. Creates the following directory and file structure: + + 📁 source + └── 📄 {hashed([0-9])}.txt + """ + source = some_join(some_path, "source") + for i in range(10): + hashed_i = md5(str(i).encode("utf-8")).hexdigest() + path = some_join(source, f"{hashed_i}.txt") + some_fs.pipe(path=path, value=f"{i}".encode()) + return source + + +class AbstractFixtures(BaseAbstractFixtures): + """ + Abstract base class containing fixtures that may be overridden in derived + filesystem-specific classes to run the abstract tests on such filesystems. + + For any particular filesystem some of these fixtures must be overridden, + such as ``fs`` and ``fs_path``, and others may be overridden if the + default functions here are not appropriate, such as ``fs_join``. + """ + + @pytest.fixture + def fs(self): + raise NotImplementedError("This function must be overridden in derived classes") + + @pytest.fixture + def fs_join(self): + """ + Return a function that joins its arguments together into a path. + + Most fsspec implementations join paths in a platform-dependent way, + but some will override this to always use a forward slash. + """ + return os.path.join + + @pytest.fixture + def fs_path(self): + raise NotImplementedError("This function must be overridden in derived classes") + + @pytest.fixture(scope="class") + def local_fs(self): + # Maybe need an option for auto_mkdir=False? This is only relevant + # for certain implementations. + return LocalFileSystem(auto_mkdir=True) + + @pytest.fixture + def local_join(self): + """ + Return a function that joins its arguments together into a path, on + the local filesystem. + """ + return os.path.join + + @pytest.fixture + def local_path(self, tmpdir): + return tmpdir + + @pytest.fixture + def supports_empty_directories(self): + """ + Return whether this implementation supports empty directories. + """ + return True + + @pytest.fixture + def fs_sanitize_path(self): + return lambda x: x diff --git a/.venv/lib/python3.11/site-packages/fsspec/tests/abstract/__pycache__/get.cpython-311.pyc b/.venv/lib/python3.11/site-packages/fsspec/tests/abstract/__pycache__/get.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc1c473ed1c282f085313049a91551d4f0c35d2f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/fsspec/tests/abstract/__pycache__/get.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/fsspec/tests/abstract/__pycache__/pipe.cpython-311.pyc b/.venv/lib/python3.11/site-packages/fsspec/tests/abstract/__pycache__/pipe.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cd0ec4507b076b80d22fca5fc7592928a6d2ec7 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/fsspec/tests/abstract/__pycache__/pipe.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/fsspec/tests/abstract/__pycache__/put.cpython-311.pyc b/.venv/lib/python3.11/site-packages/fsspec/tests/abstract/__pycache__/put.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98f762e81cae03c2a8c984e444f1a2ed63db74bf Binary files /dev/null and b/.venv/lib/python3.11/site-packages/fsspec/tests/abstract/__pycache__/put.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/fsspec/tests/abstract/open.py b/.venv/lib/python3.11/site-packages/fsspec/tests/abstract/open.py new file mode 100644 index 0000000000000000000000000000000000000000..bb75ea852276fb8d834345883813b8e27a0ae24c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/fsspec/tests/abstract/open.py @@ -0,0 +1,11 @@ +import pytest + + +class AbstractOpenTests: + def test_open_exclusive(self, fs, fs_target): + with fs.open(fs_target, "wb") as f: + f.write(b"data") + with fs.open(fs_target, "rb") as f: + assert f.read() == b"data" + with pytest.raises(FileExistsError): + fs.open(fs_target, "xb") diff --git a/.venv/lib/python3.11/site-packages/functorch/_C.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/functorch/_C.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..00ec2a70abdad2de08364fe4a160ec7646595526 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/functorch/_C.cpython-311-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39dc1f80069251db7f97e91763cbde3e2a9b53d8db73007ad0ddf7b041fee1d5 +size 324432 diff --git a/.venv/lib/python3.11/site-packages/zmq/__init__.pxd b/.venv/lib/python3.11/site-packages/zmq/__init__.pxd new file mode 100644 index 0000000000000000000000000000000000000000..37b8362e2a07a77d41ff9f6d3b589364a5e00a05 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/__init__.pxd @@ -0,0 +1 @@ +from zmq.backend.cython cimport Context, Frame, Socket, libzmq diff --git a/.venv/lib/python3.11/site-packages/zmq/__init__.py b/.venv/lib/python3.11/site-packages/zmq/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f1149c580d9d9f5c802286d28b19c6c76be81efe --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/__init__.py @@ -0,0 +1,94 @@ +"""Python bindings for 0MQ""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from __future__ import annotations + +import os +import sys +from contextlib import contextmanager + + +@contextmanager +def _libs_on_path(): + """context manager for libs directory on $PATH + + Works around mysterious issue where os.add_dll_directory + does not resolve imports (conda-forge Python >= 3.8) + """ + + if not sys.platform.startswith("win"): + yield + return + + libs_dir = os.path.abspath( + os.path.join( + os.path.dirname(__file__), + os.pardir, + "pyzmq.libs", + ) + ) + if not os.path.exists(libs_dir): + # no bundled libs + yield + return + + path_before = os.environ.get("PATH") + try: + os.environ["PATH"] = os.pathsep.join([path_before or "", libs_dir]) + yield + finally: + if path_before is None: + os.environ.pop("PATH") + else: + os.environ["PATH"] = path_before + + +# zmq top-level imports + +# workaround for Windows +with _libs_on_path(): + from zmq import backend + +from . import constants # noqa +from .constants import * # noqa +from zmq.backend import * # noqa +from zmq import sugar +from zmq.sugar import * # noqa + + +def get_includes(): + """Return a list of directories to include for linking against pyzmq with cython.""" + from os.path import abspath, dirname, exists, join, pardir + + base = dirname(__file__) + parent = abspath(join(base, pardir)) + includes = [parent] + [join(parent, base, subdir) for subdir in ('utils',)] + if exists(join(parent, base, 'include')): + includes.append(join(parent, base, 'include')) + return includes + + +def get_library_dirs(): + """Return a list of directories used to link against pyzmq's bundled libzmq.""" + from os.path import abspath, dirname, join, pardir + + base = dirname(__file__) + parent = abspath(join(base, pardir)) + return [join(parent, base)] + + +COPY_THRESHOLD = 65536 +DRAFT_API = backend.has("draft") + +__all__ = ( + [ + 'get_includes', + 'COPY_THRESHOLD', + 'DRAFT_API', + ] + + constants.__all__ + + sugar.__all__ + + backend.__all__ +) diff --git a/.venv/lib/python3.11/site-packages/zmq/__init__.pyi b/.venv/lib/python3.11/site-packages/zmq/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..45a83d730e219f852141c5999c3303af138925eb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/__init__.pyi @@ -0,0 +1,29 @@ +from typing import List + +from . import backend, sugar + +COPY_THRESHOLD: int +DRAFT_API: bool +__version__: str + +# mypy doesn't like overwriting symbols with * so be explicit +# about what comes from backend, not from sugar +# see tools/backend_imports.py to generate this list +# note: `x as x` is required for re-export +# see https://github.com/python/mypy/issues/2190 +from .backend import IPC_PATH_MAX_LEN as IPC_PATH_MAX_LEN +from .backend import curve_keypair as curve_keypair +from .backend import curve_public as curve_public +from .backend import device as device +from .backend import has as has +from .backend import proxy as proxy +from .backend import proxy_steerable as proxy_steerable +from .backend import strerror as strerror +from .backend import zmq_errno as zmq_errno +from .backend import zmq_poll as zmq_poll +from .constants import * +from .error import * +from .sugar import * + +def get_includes() -> list[str]: ... +def get_library_dirs() -> list[str]: ... diff --git a/.venv/lib/python3.11/site-packages/zmq/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84e578f0203b6ffca3cb7dc5eb92f0e00e58ac9b Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/__pycache__/_future.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/__pycache__/_future.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e31bb9c717e572cd63ae2386d22c12ff616984e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/__pycache__/_future.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/__pycache__/_typing.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/__pycache__/_typing.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4db28d7df915f79ca6b087051d68000ca05dd1e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/__pycache__/_typing.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/__pycache__/asyncio.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/__pycache__/asyncio.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fe0769270fc2c481f5d369768afbe9fdec4eaf9 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/__pycache__/asyncio.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/__pycache__/constants.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/__pycache__/constants.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64504bfc0eb6f77ccbb6608d19b6cf0805bdbb8e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/__pycache__/constants.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/__pycache__/decorators.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/__pycache__/decorators.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..470a627999ef3d01a5177ae95fbfc17fe16fb470 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/__pycache__/decorators.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/__pycache__/error.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/__pycache__/error.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bfb7cc3a32c20367760bf965c043bceb1d1eb9e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/__pycache__/error.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/_future.pyi b/.venv/lib/python3.11/site-packages/zmq/_future.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1f193aac70cb5cf8e80755e08cdb963541eec31e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/_future.pyi @@ -0,0 +1,92 @@ +"""type annotations for async sockets""" + +from __future__ import annotations + +from asyncio import Future +from pickle import DEFAULT_PROTOCOL +from typing import Any, Awaitable, Literal, Sequence, TypeVar, overload + +import zmq as _zmq + +class _AsyncPoller(_zmq.Poller): + _socket_class: type[_AsyncSocket] + + def poll(self, timeout=-1) -> Awaitable[list[tuple[Any, int]]]: ... # type: ignore + +T = TypeVar("T", bound="_AsyncSocket") + +class _AsyncSocket(_zmq.Socket[Future]): + @classmethod + def from_socket(cls: type[T], socket: _zmq.Socket, io_loop: Any = None) -> T: ... + def send( # type: ignore + self, + data: Any, + flags: int = 0, + copy: bool = True, + track: bool = False, + routing_id: int | None = None, + group: str | None = None, + ) -> Awaitable[_zmq.MessageTracker | None]: ... + @overload # type: ignore + def recv(self, flags: int = 0, *, track: bool = False) -> Awaitable[bytes]: ... + @overload + def recv( + self, flags: int = 0, *, copy: Literal[True], track: bool = False + ) -> Awaitable[bytes]: ... + @overload + def recv( + self, flags: int = 0, *, copy: Literal[False], track: bool = False + ) -> Awaitable[_zmq.Frame]: ... + @overload + def recv( + self, flags: int = 0, copy: bool = True, track: bool = False + ) -> Awaitable[bytes | _zmq.Frame]: ... + def send_multipart( # type: ignore + self, + msg_parts: Sequence, + flags: int = 0, + copy: bool = True, + track: bool = False, + routing_id: int | None = None, + group: str | None = None, + ) -> Awaitable[_zmq.MessageTracker | None]: ... + @overload # type: ignore + def recv_multipart( + self, flags: int = 0, *, track: bool = False + ) -> Awaitable[list[bytes]]: ... + @overload + def recv_multipart( + self, flags: int = 0, *, copy: Literal[True], track: bool = False + ) -> Awaitable[list[bytes]]: ... + @overload + def recv_multipart( + self, flags: int = 0, *, copy: Literal[False], track: bool = False + ) -> Awaitable[list[_zmq.Frame]]: ... + @overload + def recv_multipart( + self, flags: int = 0, copy: bool = True, track: bool = False + ) -> Awaitable[list[bytes] | list[_zmq.Frame]]: ... + + # serialization wrappers + + def send_string( # type: ignore + self, + u: str, + flags: int = 0, + copy: bool = True, + *, + encoding: str = 'utf-8', + **kwargs, + ) -> Awaitable[_zmq.Frame | None]: ... + def recv_string( # type: ignore + self, flags: int = 0, encoding: str = 'utf-8' + ) -> Awaitable[str]: ... + def send_pyobj( # type: ignore + self, obj: Any, flags: int = 0, protocol: int = DEFAULT_PROTOCOL, **kwargs + ) -> Awaitable[_zmq.Frame | None]: ... + def recv_pyobj(self, flags: int = 0) -> Awaitable[Any]: ... # type: ignore + def send_json( # type: ignore + self, obj: Any, flags: int = 0, **kwargs + ) -> Awaitable[_zmq.Frame | None]: ... + def recv_json(self, flags: int = 0, **kwargs) -> Awaitable[Any]: ... # type: ignore + def poll(self, timeout=-1) -> Awaitable[list[tuple[Any, int]]]: ... # type: ignore diff --git a/.venv/lib/python3.11/site-packages/zmq/_typing.py b/.venv/lib/python3.11/site-packages/zmq/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..92ec879c23e32be4f3ec636c8c6da7b102a420bf --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/_typing.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +import sys +from typing import Any, Dict + +if sys.version_info >= (3, 8): + from typing import Literal, TypedDict +else: + # avoid runtime dependency on typing_extensions on py37 + try: + from typing_extensions import Literal, TypedDict # type: ignore + except ImportError: + + class _Literal: + def __getitem__(self, key): + return Any + + Literal = _Literal() # type: ignore + + class TypedDict(Dict): # type: ignore + pass + + +if sys.version_info >= (3, 10): + from typing import TypeAlias +else: + try: + from typing_extensions import TypeAlias + except ImportError: + TypeAlias = type # type: ignore diff --git a/.venv/lib/python3.11/site-packages/zmq/auth/__init__.py b/.venv/lib/python3.11/site-packages/zmq/auth/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ebacab71272ab4e1b7999ea8d6a875c08c6b861b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/auth/__init__.py @@ -0,0 +1,13 @@ +"""Utilities for ZAP authentication. + +To run authentication in a background thread, see :mod:`zmq.auth.thread`. +For integration with the asyncio event loop, see :mod:`zmq.auth.asyncio`. + +Authentication examples are provided in the pyzmq codebase, under +`/examples/security/`. + +.. versionadded:: 14.1 +""" + +from .base import * +from .certs import * diff --git a/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c1e3ca453dbe6c641c0e47a4c732537f93610c9 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/asyncio.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/asyncio.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0daab271e6a752b5e12a7d665305a83cf20c890 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/asyncio.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/base.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/base.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b6c63d5be129454d37689a4e003e8e8ff9c41df Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/base.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/certs.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/certs.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..943efb8996aa8f2e88d4e9b37566792751b14401 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/certs.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/ioloop.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/ioloop.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0850fe1c04fc4074f812d497ae3bf9b670cafbb Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/ioloop.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/thread.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/thread.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e13d06574bc2dc2adee5159bd58d1f48561e4737 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/auth/__pycache__/thread.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/auth/asyncio.py b/.venv/lib/python3.11/site-packages/zmq/auth/asyncio.py new file mode 100644 index 0000000000000000000000000000000000000000..8b4915c12784538761dab70cee8e34395a1df066 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/auth/asyncio.py @@ -0,0 +1,66 @@ +"""ZAP Authenticator integrated with the asyncio IO loop. + +.. versionadded:: 15.2 +""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import asyncio +import warnings +from typing import Any, Optional + +import zmq +from zmq.asyncio import Poller + +from .base import Authenticator + + +class AsyncioAuthenticator(Authenticator): + """ZAP authentication for use in the asyncio IO loop""" + + __poller: Optional[Poller] + __task: Any + + def __init__( + self, + context: Optional["zmq.Context"] = None, + loop: Any = None, + encoding: str = 'utf-8', + log: Any = None, + ): + super().__init__(context, encoding, log) + if loop is not None: + warnings.warn( + f"{self.__class__.__name__}(loop) is deprecated and ignored", + DeprecationWarning, + stacklevel=2, + ) + self.__poller = None + self.__task = None + + async def __handle_zap(self) -> None: + while self.__poller is not None: + events = await self.__poller.poll() + if self.zap_socket in dict(events): + msg = self.zap_socket.recv_multipart() + await self.handle_zap_message(msg) + + def start(self) -> None: + """Start ZAP authentication""" + super().start() + self.__poller = Poller() + self.__poller.register(self.zap_socket, zmq.POLLIN) + self.__task = asyncio.ensure_future(self.__handle_zap()) + + def stop(self) -> None: + """Stop ZAP authentication""" + if self.__task: + self.__task.cancel() + if self.__poller: + self.__poller.unregister(self.zap_socket) + self.__poller = None + super().stop() + + +__all__ = ["AsyncioAuthenticator"] diff --git a/.venv/lib/python3.11/site-packages/zmq/auth/base.py b/.venv/lib/python3.11/site-packages/zmq/auth/base.py new file mode 100644 index 0000000000000000000000000000000000000000..c862b60c14f0625de1cf8c0006357760dce88d29 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/auth/base.py @@ -0,0 +1,445 @@ +"""Base implementation of 0MQ authentication.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import logging +import os +from typing import Any, Awaitable, Dict, List, Optional, Set, Tuple, Union + +import zmq +from zmq.error import _check_version +from zmq.utils import z85 + +from .certs import load_certificates + +CURVE_ALLOW_ANY = '*' +VERSION = b'1.0' + + +class Authenticator: + """Implementation of ZAP authentication for zmq connections. + + This authenticator class does not register with an event loop. As a result, + you will need to manually call `handle_zap_message`:: + + auth = zmq.Authenticator() + auth.allow("127.0.0.1") + auth.start() + while True: + await auth.handle_zap_msg(auth.zap_socket.recv_multipart()) + + Alternatively, you can register `auth.zap_socket` with a poller. + + Since many users will want to run ZAP in a way that does not block the + main thread, other authentication classes (such as :mod:`zmq.auth.thread`) + are provided. + + Note: + + - libzmq provides four levels of security: default NULL (which the Authenticator does + not see), and authenticated NULL, PLAIN, CURVE, and GSSAPI, which the Authenticator can see. + - until you add policies, all incoming NULL connections are allowed. + (classic ZeroMQ behavior), and all PLAIN and CURVE connections are denied. + - GSSAPI requires no configuration. + """ + + context: "zmq.Context" + encoding: str + allow_any: bool + credentials_providers: Dict[str, Any] + zap_socket: "zmq.Socket" + _allowed: Set[str] + _denied: Set[str] + passwords: Dict[str, Dict[str, str]] + certs: Dict[str, Dict[bytes, Any]] + log: Any + + def __init__( + self, + context: Optional["zmq.Context"] = None, + encoding: str = 'utf-8', + log: Any = None, + ): + _check_version((4, 0), "security") + self.context = context or zmq.Context.instance() + self.encoding = encoding + self.allow_any = False + self.credentials_providers = {} + self.zap_socket = None # type: ignore + self._allowed = set() + self._denied = set() + # passwords is a dict keyed by domain and contains values + # of dicts with username:password pairs. + self.passwords = {} + # certs is dict keyed by domain and contains values + # of dicts keyed by the public keys from the specified location. + self.certs = {} + self.log = log or logging.getLogger('zmq.auth') + + def start(self) -> None: + """Create and bind the ZAP socket""" + self.zap_socket = self.context.socket(zmq.REP, socket_class=zmq.Socket) + self.zap_socket.linger = 1 + self.zap_socket.bind("inproc://zeromq.zap.01") + self.log.debug("Starting") + + def stop(self) -> None: + """Close the ZAP socket""" + if self.zap_socket: + self.zap_socket.close() + self.zap_socket = None # type: ignore + + def allow(self, *addresses: str) -> None: + """Allow IP address(es). + + Connections from addresses not explicitly allowed will be rejected. + + - For NULL, all clients from this address will be accepted. + - For real auth setups, they will be allowed to continue with authentication. + + allow is mutually exclusive with deny. + """ + if self._denied: + raise ValueError("Only use allow or deny, not both") + self.log.debug("Allowing %s", ','.join(addresses)) + self._allowed.update(addresses) + + def deny(self, *addresses: str) -> None: + """Deny IP address(es). + + Addresses not explicitly denied will be allowed to continue with authentication. + + deny is mutually exclusive with allow. + """ + if self._allowed: + raise ValueError("Only use a allow or deny, not both") + self.log.debug("Denying %s", ','.join(addresses)) + self._denied.update(addresses) + + def configure_plain( + self, domain: str = '*', passwords: Optional[Dict[str, str]] = None + ) -> None: + """Configure PLAIN authentication for a given domain. + + PLAIN authentication uses a plain-text password file. + To cover all domains, use "*". + You can modify the password file at any time; it is reloaded automatically. + """ + if passwords: + self.passwords[domain] = passwords + self.log.debug("Configure plain: %s", domain) + + def configure_curve( + self, domain: str = '*', location: Union[str, os.PathLike] = "." + ) -> None: + """Configure CURVE authentication for a given domain. + + CURVE authentication uses a directory that holds all public client certificates, + i.e. their public keys. + + To cover all domains, use "*". + + You can add and remove certificates in that directory at any time. configure_curve must be called + every time certificates are added or removed, in order to update the Authenticator's state + + To allow all client keys without checking, specify CURVE_ALLOW_ANY for the location. + """ + # If location is CURVE_ALLOW_ANY then allow all clients. Otherwise + # treat location as a directory that holds the certificates. + self.log.debug("Configure curve: %s[%s]", domain, location) + if location == CURVE_ALLOW_ANY: + self.allow_any = True + else: + self.allow_any = False + try: + self.certs[domain] = load_certificates(location) + except Exception as e: + self.log.error("Failed to load CURVE certs from %s: %s", location, e) + + def configure_curve_callback( + self, domain: str = '*', credentials_provider: Any = None + ) -> None: + """Configure CURVE authentication for a given domain. + + CURVE authentication using a callback function validating + the client public key according to a custom mechanism, e.g. checking the + key against records in a db. credentials_provider is an object of a class which + implements a callback method accepting two parameters (domain and key), e.g.:: + + class CredentialsProvider(object): + + def __init__(self): + ...e.g. db connection + + def callback(self, domain, key): + valid = ...lookup key and/or domain in db + if valid: + logging.info('Authorizing: {0}, {1}'.format(domain, key)) + return True + else: + logging.warning('NOT Authorizing: {0}, {1}'.format(domain, key)) + return False + + To cover all domains, use "*". + """ + + self.allow_any = False + + if credentials_provider is not None: + self.credentials_providers[domain] = credentials_provider + else: + self.log.error("None credentials_provider provided for domain:%s", domain) + + def curve_user_id(self, client_public_key: bytes) -> str: + """Return the User-Id corresponding to a CURVE client's public key + + Default implementation uses the z85-encoding of the public key. + + Override to define a custom mapping of public key : user-id + + This is only called on successful authentication. + + Parameters + ---------- + client_public_key: bytes + The client public key used for the given message + + Returns + ------- + user_id: unicode + The user ID as text + """ + return z85.encode(client_public_key).decode('ascii') + + def configure_gssapi( + self, domain: str = '*', location: Optional[str] = None + ) -> None: + """Configure GSSAPI authentication + + Currently this is a no-op because there is nothing to configure with GSSAPI. + """ + + async def handle_zap_message(self, msg: List[bytes]): + """Perform ZAP authentication""" + if len(msg) < 6: + self.log.error("Invalid ZAP message, not enough frames: %r", msg) + if len(msg) < 2: + self.log.error("Not enough information to reply") + else: + self._send_zap_reply(msg[1], b"400", b"Not enough frames") + return + + version, request_id, domain, address, identity, mechanism = msg[:6] + credentials = msg[6:] + + domain = domain.decode(self.encoding, 'replace') + address = address.decode(self.encoding, 'replace') + + if version != VERSION: + self.log.error("Invalid ZAP version: %r", msg) + self._send_zap_reply(request_id, b"400", b"Invalid version") + return + + self.log.debug( + "version: %r, request_id: %r, domain: %r," + " address: %r, identity: %r, mechanism: %r", + version, + request_id, + domain, + address, + identity, + mechanism, + ) + + # Is address is explicitly allowed or _denied? + allowed = False + denied = False + reason = b"NO ACCESS" + + if self._allowed: + if address in self._allowed: + allowed = True + self.log.debug("PASSED (allowed) address=%s", address) + else: + denied = True + reason = b"Address not allowed" + self.log.debug("DENIED (not allowed) address=%s", address) + + elif self._denied: + if address in self._denied: + denied = True + reason = b"Address denied" + self.log.debug("DENIED (denied) address=%s", address) + else: + allowed = True + self.log.debug("PASSED (not denied) address=%s", address) + + # Perform authentication mechanism-specific checks if necessary + username = "anonymous" + if not denied: + if mechanism == b'NULL' and not allowed: + # For NULL, we allow if the address wasn't denied + self.log.debug("ALLOWED (NULL)") + allowed = True + + elif mechanism == b'PLAIN': + # For PLAIN, even a _alloweded address must authenticate + if len(credentials) != 2: + self.log.error("Invalid PLAIN credentials: %r", credentials) + self._send_zap_reply(request_id, b"400", b"Invalid credentials") + return + username, password = ( + c.decode(self.encoding, 'replace') for c in credentials + ) + allowed, reason = self._authenticate_plain(domain, username, password) + + elif mechanism == b'CURVE': + # For CURVE, even a _alloweded address must authenticate + if len(credentials) != 1: + self.log.error("Invalid CURVE credentials: %r", credentials) + self._send_zap_reply(request_id, b"400", b"Invalid credentials") + return + key = credentials[0] + allowed, reason = await self._authenticate_curve(domain, key) + if allowed: + username = self.curve_user_id(key) + + elif mechanism == b'GSSAPI': + if len(credentials) != 1: + self.log.error("Invalid GSSAPI credentials: %r", credentials) + self._send_zap_reply(request_id, b"400", b"Invalid credentials") + return + # use principal as user-id for now + principal = credentials[0] + username = principal.decode("utf8") + allowed, reason = self._authenticate_gssapi(domain, principal) + + if allowed: + self._send_zap_reply(request_id, b"200", b"OK", username) + else: + self._send_zap_reply(request_id, b"400", reason) + + def _authenticate_plain( + self, domain: str, username: str, password: str + ) -> Tuple[bool, bytes]: + """PLAIN ZAP authentication""" + allowed = False + reason = b"" + if self.passwords: + # If no domain is not specified then use the default domain + if not domain: + domain = '*' + + if domain in self.passwords: + if username in self.passwords[domain]: + if password == self.passwords[domain][username]: + allowed = True + else: + reason = b"Invalid password" + else: + reason = b"Invalid username" + else: + reason = b"Invalid domain" + + if allowed: + self.log.debug( + "ALLOWED (PLAIN) domain=%s username=%s password=%s", + domain, + username, + password, + ) + else: + self.log.debug("DENIED %s", reason) + + else: + reason = b"No passwords defined" + self.log.debug("DENIED (PLAIN) %s", reason) + + return allowed, reason + + async def _authenticate_curve( + self, domain: str, client_key: bytes + ) -> Tuple[bool, bytes]: + """CURVE ZAP authentication""" + allowed = False + reason = b"" + if self.allow_any: + allowed = True + reason = b"OK" + self.log.debug("ALLOWED (CURVE allow any client)") + elif self.credentials_providers != {}: + # If no explicit domain is specified then use the default domain + if not domain: + domain = '*' + + if domain in self.credentials_providers: + z85_client_key = z85.encode(client_key) + # Callback to check if key is Allowed + r = self.credentials_providers[domain].callback(domain, z85_client_key) + if isinstance(r, Awaitable): + r = await r + if r: + allowed = True + reason = b"OK" + else: + reason = b"Unknown key" + + status = "ALLOWED" if allowed else "DENIED" + self.log.debug( + "%s (CURVE auth_callback) domain=%s client_key=%s", + status, + domain, + z85_client_key, + ) + else: + reason = b"Unknown domain" + else: + # If no explicit domain is specified then use the default domain + if not domain: + domain = '*' + + if domain in self.certs: + # The certs dict stores keys in z85 format, convert binary key to z85 bytes + z85_client_key = z85.encode(client_key) + if self.certs[domain].get(z85_client_key): + allowed = True + reason = b"OK" + else: + reason = b"Unknown key" + + status = "ALLOWED" if allowed else "DENIED" + self.log.debug( + "%s (CURVE) domain=%s client_key=%s", + status, + domain, + z85_client_key, + ) + else: + reason = b"Unknown domain" + + return allowed, reason + + def _authenticate_gssapi(self, domain: str, principal: bytes) -> Tuple[bool, bytes]: + """Nothing to do for GSSAPI, which has already been handled by an external service.""" + self.log.debug("ALLOWED (GSSAPI) domain=%s principal=%s", domain, principal) + return True, b'OK' + + def _send_zap_reply( + self, + request_id: bytes, + status_code: bytes, + status_text: bytes, + user_id: str = 'anonymous', + ) -> None: + """Send a ZAP reply to finish the authentication.""" + user_id = user_id if status_code == b'200' else b'' + if isinstance(user_id, str): + user_id = user_id.encode(self.encoding, 'replace') + metadata = b'' # not currently used + self.log.debug("ZAP reply code=%s text=%s", status_code, status_text) + reply = [VERSION, request_id, status_code, status_text, user_id, metadata] + self.zap_socket.send_multipart(reply) + + +__all__ = ['Authenticator', 'CURVE_ALLOW_ANY'] diff --git a/.venv/lib/python3.11/site-packages/zmq/auth/certs.py b/.venv/lib/python3.11/site-packages/zmq/auth/certs.py new file mode 100644 index 0000000000000000000000000000000000000000..d60ae005dc111da80576adcdc378390ba6026527 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/auth/certs.py @@ -0,0 +1,140 @@ +"""0MQ authentication related functions and classes.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import datetime +import glob +import os +from typing import Dict, Optional, Tuple, Union + +import zmq + +_cert_secret_banner = """# **** Generated on {0} by pyzmq **** +# ZeroMQ CURVE **Secret** Certificate +# DO NOT PROVIDE THIS FILE TO OTHER USERS nor change its permissions. + +""" + + +_cert_public_banner = """# **** Generated on {0} by pyzmq **** +# ZeroMQ CURVE Public Certificate +# Exchange securely, or use a secure mechanism to verify the contents +# of this file after exchange. Store public certificates in your home +# directory, in the .curve subdirectory. + +""" + + +def _write_key_file( + key_filename: Union[str, os.PathLike], + banner: str, + public_key: Union[str, bytes], + secret_key: Optional[Union[str, bytes]] = None, + metadata: Optional[Dict[str, str]] = None, + encoding: str = 'utf-8', +) -> None: + """Create a certificate file""" + if isinstance(public_key, bytes): + public_key = public_key.decode(encoding) + if isinstance(secret_key, bytes): + secret_key = secret_key.decode(encoding) + with open(key_filename, 'w', encoding='utf8') as f: + f.write(banner.format(datetime.datetime.now())) + + f.write('metadata\n') + if metadata: + for k, v in metadata.items(): + if isinstance(k, bytes): + k = k.decode(encoding) + if isinstance(v, bytes): + v = v.decode(encoding) + f.write(f" {k} = {v}\n") + + f.write('curve\n') + f.write(f" public-key = \"{public_key}\"\n") + + if secret_key: + f.write(f" secret-key = \"{secret_key}\"\n") + + +def create_certificates( + key_dir: Union[str, os.PathLike], + name: str, + metadata: Optional[Dict[str, str]] = None, +) -> Tuple[str, str]: + """Create zmq certificates. + + Returns the file paths to the public and secret certificate files. + """ + public_key, secret_key = zmq.curve_keypair() + base_filename = os.path.join(key_dir, name) + secret_key_file = f"{base_filename}.key_secret" + public_key_file = f"{base_filename}.key" + now = datetime.datetime.now() + + _write_key_file(public_key_file, _cert_public_banner.format(now), public_key) + + _write_key_file( + secret_key_file, + _cert_secret_banner.format(now), + public_key, + secret_key=secret_key, + metadata=metadata, + ) + + return public_key_file, secret_key_file + + +def load_certificate( + filename: Union[str, os.PathLike], +) -> Tuple[bytes, Optional[bytes]]: + """Load public and secret key from a zmq certificate. + + Returns (public_key, secret_key) + + If the certificate file only contains the public key, + secret_key will be None. + + If there is no public key found in the file, ValueError will be raised. + """ + public_key = None + secret_key = None + if not os.path.exists(filename): + raise OSError(f"Invalid certificate file: {filename}") + + with open(filename, 'rb') as f: + for line in f: + line = line.strip() + if line.startswith(b'#'): + continue + if line.startswith(b'public-key'): + public_key = line.split(b"=", 1)[1].strip(b' \t\'"') + if line.startswith(b'secret-key'): + secret_key = line.split(b"=", 1)[1].strip(b' \t\'"') + if public_key and secret_key: + break + + if public_key is None: + raise ValueError(f"No public key found in {filename}") + + return public_key, secret_key + + +def load_certificates(directory: Union[str, os.PathLike] = '.') -> Dict[bytes, bool]: + """Load public keys from all certificates in a directory""" + certs = {} + if not os.path.isdir(directory): + raise OSError(f"Invalid certificate directory: {directory}") + # Follow czmq pattern of public keys stored in *.key files. + glob_string = os.path.join(directory, "*.key") + + cert_files = glob.glob(glob_string) + for cert_file in cert_files: + public_key, _ = load_certificate(cert_file) + if public_key: + certs[public_key] = True + return certs + + +__all__ = ['create_certificates', 'load_certificate', 'load_certificates'] diff --git a/.venv/lib/python3.11/site-packages/zmq/auth/ioloop.py b/.venv/lib/python3.11/site-packages/zmq/auth/ioloop.py new file mode 100644 index 0000000000000000000000000000000000000000..f87f068e7ef92004a7bbad93e77e445a607e0093 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/auth/ioloop.py @@ -0,0 +1,48 @@ +"""ZAP Authenticator integrated with the tornado IOLoop. + +.. versionadded:: 14.1 +.. deprecated:: 25 + Use asyncio.AsyncioAuthenticator instead. + Since tornado runs on asyncio, the asyncio authenticator + offers the same functionality in tornado. +""" + +import warnings + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. +from typing import Any, Optional + +import zmq + +from .asyncio import AsyncioAuthenticator + +warnings.warn( + "zmq.auth.ioloop.IOLoopAuthenticator is deprecated. Use zmq.auth.asyncio.AsyncioAuthenticator", + DeprecationWarning, + stacklevel=2, +) + + +class IOLoopAuthenticator(AsyncioAuthenticator): + """ZAP authentication for use in the tornado IOLoop""" + + def __init__( + self, + context: Optional["zmq.Context"] = None, + encoding: str = 'utf-8', + log: Any = None, + io_loop: Any = None, + ): + loop = None + if io_loop is not None: + warnings.warn( + f"{self.__class__.__name__}(io_loop) is deprecated and ignored", + DeprecationWarning, + stacklevel=2, + ) + loop = io_loop.asyncio_loop + super().__init__(context=context, encoding=encoding, log=log, loop=loop) + + +__all__ = ['IOLoopAuthenticator'] diff --git a/.venv/lib/python3.11/site-packages/zmq/auth/thread.py b/.venv/lib/python3.11/site-packages/zmq/auth/thread.py new file mode 100644 index 0000000000000000000000000000000000000000..a227c4bd5974f95e3b5b844a1ce0de34f9454f10 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/auth/thread.py @@ -0,0 +1,139 @@ +"""ZAP Authenticator in a Python Thread. + +.. versionadded:: 14.1 +""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import asyncio +from threading import Event, Thread +from typing import Any, List, Optional + +import zmq +import zmq.asyncio + +from .base import Authenticator + + +class AuthenticationThread(Thread): + """A Thread for running a zmq Authenticator + + This is run in the background by ThreadAuthenticator + """ + + pipe: zmq.Socket + loop: asyncio.AbstractEventLoop + authenticator: Authenticator + poller: Optional[zmq.asyncio.Poller] = None + + def __init__( + self, + authenticator: Authenticator, + pipe: zmq.Socket, + ) -> None: + super().__init__(daemon=True) + self.authenticator = authenticator + self.log = authenticator.log + self.pipe = pipe + + self.started = Event() + + def run(self) -> None: + """Start the Authentication Agent thread task""" + + loop = asyncio.new_event_loop() + try: + loop.run_until_complete(self._run()) + finally: + if self.pipe: + self.pipe.close() + self.pipe = None # type: ignore + + loop.close() + + async def _run(self): + self.poller = zmq.asyncio.Poller() + self.poller.register(self.pipe, zmq.POLLIN) + self.poller.register(self.authenticator.zap_socket, zmq.POLLIN) + self.started.set() + + while True: + events = dict(await self.poller.poll()) + if self.pipe in events: + msg = self.pipe.recv_multipart() + if self._handle_pipe_message(msg): + return + if self.authenticator.zap_socket in events: + msg = self.authenticator.zap_socket.recv_multipart() + await self.authenticator.handle_zap_message(msg) + + def _handle_pipe_message(self, msg: List[bytes]) -> bool: + command = msg[0] + self.log.debug("auth received API command %r", command) + + if command == b'TERMINATE': + return True + + else: + self.log.error("Invalid auth command from API: %r", command) + self.pipe.send(b'ERROR') + + return False + + +class ThreadAuthenticator(Authenticator): + """Run ZAP authentication in a background thread""" + + pipe: "zmq.Socket" + pipe_endpoint: str = '' + thread: AuthenticationThread + + def __init__( + self, + context: Optional["zmq.Context"] = None, + encoding: str = 'utf-8', + log: Any = None, + ): + super().__init__(context=context, encoding=encoding, log=log) + self.pipe = None # type: ignore + self.pipe_endpoint = f"inproc://{id(self)}.inproc" + self.thread = None # type: ignore + + def start(self) -> None: + """Start the authentication thread""" + # start the Authenticator + super().start() + + # create a socket pair to communicate with auth thread. + self.pipe = self.context.socket(zmq.PAIR, socket_class=zmq.Socket) + self.pipe.linger = 1 + self.pipe.bind(self.pipe_endpoint) + thread_pipe = self.context.socket(zmq.PAIR, socket_class=zmq.Socket) + thread_pipe.linger = 1 + thread_pipe.connect(self.pipe_endpoint) + self.thread = AuthenticationThread(authenticator=self, pipe=thread_pipe) + self.thread.start() + if not self.thread.started.wait(timeout=10): + raise RuntimeError("Authenticator thread failed to start") + + def stop(self) -> None: + """Stop the authentication thread""" + if self.pipe: + self.pipe.send(b'TERMINATE') + if self.is_alive(): + self.thread.join() + self.thread = None # type: ignore + self.pipe.close() + self.pipe = None # type: ignore + super().stop() + + def is_alive(self) -> bool: + """Is the ZAP thread currently running?""" + return bool(self.thread and self.thread.is_alive()) + + def __del__(self) -> None: + self.stop() + + +__all__ = ['ThreadAuthenticator'] diff --git a/.venv/lib/python3.11/site-packages/zmq/constants.py b/.venv/lib/python3.11/site-packages/zmq/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..cddf433a15d9102b9162425f8d8a54aa125f783e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/constants.py @@ -0,0 +1,974 @@ +"""zmq constants as enums""" + +from __future__ import annotations + +import errno +import sys +from enum import Enum, IntEnum, IntFlag + +_HAUSNUMERO = 156384712 + + +class Errno(IntEnum): + """libzmq error codes + + .. versionadded:: 23 + """ + + EAGAIN = errno.EAGAIN + EFAULT = errno.EFAULT + EINVAL = errno.EINVAL + + if sys.platform.startswith("win"): + # Windows: libzmq uses errno.h + # while Python errno prefers WSA* variants + # many of these were introduced to errno.h in vs2010 + # ref: https://github.com/python/cpython/blob/3.9/Modules/errnomodule.c#L10-L37 + # source: https://docs.microsoft.com/en-us/cpp/c-runtime-library/errno-constants + ENOTSUP = 129 + EPROTONOSUPPORT = 135 + ENOBUFS = 119 + ENETDOWN = 116 + EADDRINUSE = 100 + EADDRNOTAVAIL = 101 + ECONNREFUSED = 107 + EINPROGRESS = 112 + ENOTSOCK = 128 + EMSGSIZE = 115 + EAFNOSUPPORT = 102 + ENETUNREACH = 118 + ECONNABORTED = 106 + ECONNRESET = 108 + ENOTCONN = 126 + ETIMEDOUT = 138 + EHOSTUNREACH = 110 + ENETRESET = 117 + + else: + ENOTSUP = getattr(errno, "ENOTSUP", _HAUSNUMERO + 1) + EPROTONOSUPPORT = getattr(errno, "EPROTONOSUPPORT", _HAUSNUMERO + 2) + ENOBUFS = getattr(errno, "ENOBUFS", _HAUSNUMERO + 3) + ENETDOWN = getattr(errno, "ENETDOWN", _HAUSNUMERO + 4) + EADDRINUSE = getattr(errno, "EADDRINUSE", _HAUSNUMERO + 5) + EADDRNOTAVAIL = getattr(errno, "EADDRNOTAVAIL", _HAUSNUMERO + 6) + ECONNREFUSED = getattr(errno, "ECONNREFUSED", _HAUSNUMERO + 7) + EINPROGRESS = getattr(errno, "EINPROGRESS", _HAUSNUMERO + 8) + ENOTSOCK = getattr(errno, "ENOTSOCK", _HAUSNUMERO + 9) + EMSGSIZE = getattr(errno, "EMSGSIZE", _HAUSNUMERO + 10) + EAFNOSUPPORT = getattr(errno, "EAFNOSUPPORT", _HAUSNUMERO + 11) + ENETUNREACH = getattr(errno, "ENETUNREACH", _HAUSNUMERO + 12) + ECONNABORTED = getattr(errno, "ECONNABORTED", _HAUSNUMERO + 13) + ECONNRESET = getattr(errno, "ECONNRESET", _HAUSNUMERO + 14) + ENOTCONN = getattr(errno, "ENOTCONN", _HAUSNUMERO + 15) + ETIMEDOUT = getattr(errno, "ETIMEDOUT", _HAUSNUMERO + 16) + EHOSTUNREACH = getattr(errno, "EHOSTUNREACH", _HAUSNUMERO + 17) + ENETRESET = getattr(errno, "ENETRESET", _HAUSNUMERO + 18) + + # Native 0MQ error codes + EFSM = _HAUSNUMERO + 51 + ENOCOMPATPROTO = _HAUSNUMERO + 52 + ETERM = _HAUSNUMERO + 53 + EMTHREAD = _HAUSNUMERO + 54 + + +class ContextOption(IntEnum): + """Options for Context.get/set + + .. versionadded:: 23 + """ + + IO_THREADS = 1 + MAX_SOCKETS = 2 + SOCKET_LIMIT = 3 + THREAD_PRIORITY = 3 + THREAD_SCHED_POLICY = 4 + MAX_MSGSZ = 5 + MSG_T_SIZE = 6 + THREAD_AFFINITY_CPU_ADD = 7 + THREAD_AFFINITY_CPU_REMOVE = 8 + THREAD_NAME_PREFIX = 9 + + +class SocketType(IntEnum): + """zmq socket types + + .. versionadded:: 23 + """ + + PAIR = 0 + PUB = 1 + SUB = 2 + REQ = 3 + REP = 4 + DEALER = 5 + ROUTER = 6 + PULL = 7 + PUSH = 8 + XPUB = 9 + XSUB = 10 + STREAM = 11 + + # deprecated aliases + XREQ = DEALER + XREP = ROUTER + + # DRAFT socket types + SERVER = 12 + CLIENT = 13 + RADIO = 14 + DISH = 15 + GATHER = 16 + SCATTER = 17 + DGRAM = 18 + PEER = 19 + CHANNEL = 20 + + +class _OptType(Enum): + int = 'int' + int64 = 'int64' + bytes = 'bytes' + fd = 'fd' + + +class SocketOption(IntEnum): + """Options for Socket.get/set + + .. versionadded:: 23 + """ + + _opt_type: _OptType + + def __new__(cls, value: int, opt_type: _OptType = _OptType.int): + """Attach option type as `._opt_type`""" + obj = int.__new__(cls, value) + obj._value_ = value + obj._opt_type = opt_type + return obj + + HWM = 1 + AFFINITY = 4, _OptType.int64 + ROUTING_ID = 5, _OptType.bytes + SUBSCRIBE = 6, _OptType.bytes + UNSUBSCRIBE = 7, _OptType.bytes + RATE = 8 + RECOVERY_IVL = 9 + SNDBUF = 11 + RCVBUF = 12 + RCVMORE = 13 + FD = 14, _OptType.fd + EVENTS = 15 + TYPE = 16 + LINGER = 17 + RECONNECT_IVL = 18 + BACKLOG = 19 + RECONNECT_IVL_MAX = 21 + MAXMSGSIZE = 22, _OptType.int64 + SNDHWM = 23 + RCVHWM = 24 + MULTICAST_HOPS = 25 + RCVTIMEO = 27 + SNDTIMEO = 28 + LAST_ENDPOINT = 32, _OptType.bytes + ROUTER_MANDATORY = 33 + TCP_KEEPALIVE = 34 + TCP_KEEPALIVE_CNT = 35 + TCP_KEEPALIVE_IDLE = 36 + TCP_KEEPALIVE_INTVL = 37 + IMMEDIATE = 39 + XPUB_VERBOSE = 40 + ROUTER_RAW = 41 + IPV6 = 42 + MECHANISM = 43 + PLAIN_SERVER = 44 + PLAIN_USERNAME = 45, _OptType.bytes + PLAIN_PASSWORD = 46, _OptType.bytes + CURVE_SERVER = 47 + CURVE_PUBLICKEY = 48, _OptType.bytes + CURVE_SECRETKEY = 49, _OptType.bytes + CURVE_SERVERKEY = 50, _OptType.bytes + PROBE_ROUTER = 51 + REQ_CORRELATE = 52 + REQ_RELAXED = 53 + CONFLATE = 54 + ZAP_DOMAIN = 55, _OptType.bytes + ROUTER_HANDOVER = 56 + TOS = 57 + CONNECT_ROUTING_ID = 61, _OptType.bytes + GSSAPI_SERVER = 62 + GSSAPI_PRINCIPAL = 63, _OptType.bytes + GSSAPI_SERVICE_PRINCIPAL = 64, _OptType.bytes + GSSAPI_PLAINTEXT = 65 + HANDSHAKE_IVL = 66 + SOCKS_PROXY = 68, _OptType.bytes + XPUB_NODROP = 69 + BLOCKY = 70 + XPUB_MANUAL = 71 + XPUB_WELCOME_MSG = 72, _OptType.bytes + STREAM_NOTIFY = 73 + INVERT_MATCHING = 74 + HEARTBEAT_IVL = 75 + HEARTBEAT_TTL = 76 + HEARTBEAT_TIMEOUT = 77 + XPUB_VERBOSER = 78 + CONNECT_TIMEOUT = 79 + TCP_MAXRT = 80 + THREAD_SAFE = 81 + MULTICAST_MAXTPDU = 84 + VMCI_BUFFER_SIZE = 85, _OptType.int64 + VMCI_BUFFER_MIN_SIZE = 86, _OptType.int64 + VMCI_BUFFER_MAX_SIZE = 87, _OptType.int64 + VMCI_CONNECT_TIMEOUT = 88 + USE_FD = 89 + GSSAPI_PRINCIPAL_NAMETYPE = 90 + GSSAPI_SERVICE_PRINCIPAL_NAMETYPE = 91 + BINDTODEVICE = 92, _OptType.bytes + + # Deprecated options and aliases + # must not use name-assignment, must have the same value + IDENTITY = ROUTING_ID + CONNECT_RID = CONNECT_ROUTING_ID + TCP_ACCEPT_FILTER = 38, _OptType.bytes + IPC_FILTER_PID = 58 + IPC_FILTER_UID = 59 + IPC_FILTER_GID = 60 + IPV4ONLY = 31 + DELAY_ATTACH_ON_CONNECT = IMMEDIATE + FAIL_UNROUTABLE = ROUTER_MANDATORY + ROUTER_BEHAVIOR = ROUTER_MANDATORY + + # Draft socket options + ZAP_ENFORCE_DOMAIN = 93 + LOOPBACK_FASTPATH = 94 + METADATA = 95, _OptType.bytes + MULTICAST_LOOP = 96 + ROUTER_NOTIFY = 97 + XPUB_MANUAL_LAST_VALUE = 98 + SOCKS_USERNAME = 99, _OptType.bytes + SOCKS_PASSWORD = 100, _OptType.bytes + IN_BATCH_SIZE = 101 + OUT_BATCH_SIZE = 102 + WSS_KEY_PEM = 103, _OptType.bytes + WSS_CERT_PEM = 104, _OptType.bytes + WSS_TRUST_PEM = 105, _OptType.bytes + WSS_HOSTNAME = 106, _OptType.bytes + WSS_TRUST_SYSTEM = 107 + ONLY_FIRST_SUBSCRIBE = 108 + RECONNECT_STOP = 109 + HELLO_MSG = 110, _OptType.bytes + DISCONNECT_MSG = 111, _OptType.bytes + PRIORITY = 112 + # 4.3.5 + BUSY_POLL = 113 + HICCUP_MSG = 114, _OptType.bytes + XSUB_VERBOSE_UNSUBSCRIBE = 115 + TOPICS_COUNT = 116 + NORM_MODE = 117 + NORM_UNICAST_NACK = 118 + NORM_BUFFER_SIZE = 119 + NORM_SEGMENT_SIZE = 120 + NORM_BLOCK_SIZE = 121 + NORM_NUM_PARITY = 122 + NORM_NUM_AUTOPARITY = 123 + NORM_PUSH = 124 + + +class MessageOption(IntEnum): + """Options on zmq.Frame objects + + .. versionadded:: 23 + """ + + MORE = 1 + SHARED = 3 + # Deprecated message options + SRCFD = 2 + + +class Flag(IntFlag): + """Send/recv flags + + .. versionadded:: 23 + """ + + DONTWAIT = 1 + SNDMORE = 2 + NOBLOCK = DONTWAIT + + +class RouterNotify(IntEnum): + """Values for zmq.ROUTER_NOTIFY socket option + + .. versionadded:: 26 + .. versionadded:: libzmq-4.3.0 (draft) + """ + + @staticmethod + def _global_name(name): + return f"NOTIFY_{name}" + + CONNECT = 1 + DISCONNECT = 2 + + +class NormMode(IntEnum): + """Values for zmq.NORM_MODE socket option + + .. versionadded:: 26 + .. versionadded:: libzmq-4.3.5 (draft) + """ + + @staticmethod + def _global_name(name): + return f"NORM_{name}" + + FIXED = 0 + CC = 1 + CCL = 2 + CCE = 3 + CCE_ECNONLY = 4 + + +class SecurityMechanism(IntEnum): + """Security mechanisms (as returned by ``socket.get(zmq.MECHANISM)``) + + .. versionadded:: 23 + """ + + NULL = 0 + PLAIN = 1 + CURVE = 2 + GSSAPI = 3 + + +class ReconnectStop(IntEnum): + """Select behavior for socket.reconnect_stop + + .. versionadded:: 25 + """ + + @staticmethod + def _global_name(name): + return f"RECONNECT_STOP_{name}" + + CONN_REFUSED = 0x1 + HANDSHAKE_FAILED = 0x2 + AFTER_DISCONNECT = 0x4 + + +class Event(IntFlag): + """Socket monitoring events + + .. versionadded:: 23 + """ + + @staticmethod + def _global_name(name): + if name.startswith("PROTOCOL_ERROR_"): + return name + else: + # add EVENT_ prefix + return "EVENT_" + name + + PROTOCOL_ERROR_WS_UNSPECIFIED = 0x30000000 + PROTOCOL_ERROR_ZMTP_UNSPECIFIED = 0x10000000 + PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND = 0x10000001 + PROTOCOL_ERROR_ZMTP_INVALID_SEQUENCE = 0x10000002 + PROTOCOL_ERROR_ZMTP_KEY_EXCHANGE = 0x10000003 + PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_UNSPECIFIED = 0x10000011 + PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_MESSAGE = 0x10000012 + PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO = 0x10000013 + PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_INITIATE = 0x10000014 + PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_ERROR = 0x10000015 + PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_READY = 0x10000016 + PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_WELCOME = 0x10000017 + PROTOCOL_ERROR_ZMTP_INVALID_METADATA = 0x10000018 + + PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC = 0x11000001 + PROTOCOL_ERROR_ZMTP_MECHANISM_MISMATCH = 0x11000002 + PROTOCOL_ERROR_ZAP_UNSPECIFIED = 0x20000000 + PROTOCOL_ERROR_ZAP_MALFORMED_REPLY = 0x20000001 + PROTOCOL_ERROR_ZAP_BAD_REQUEST_ID = 0x20000002 + PROTOCOL_ERROR_ZAP_BAD_VERSION = 0x20000003 + PROTOCOL_ERROR_ZAP_INVALID_STATUS_CODE = 0x20000004 + PROTOCOL_ERROR_ZAP_INVALID_METADATA = 0x20000005 + + # define event types _after_ overlapping protocol error masks + CONNECTED = 0x0001 + CONNECT_DELAYED = 0x0002 + CONNECT_RETRIED = 0x0004 + LISTENING = 0x0008 + BIND_FAILED = 0x0010 + ACCEPTED = 0x0020 + ACCEPT_FAILED = 0x0040 + CLOSED = 0x0080 + CLOSE_FAILED = 0x0100 + DISCONNECTED = 0x0200 + MONITOR_STOPPED = 0x0400 + + HANDSHAKE_FAILED_NO_DETAIL = 0x0800 + HANDSHAKE_SUCCEEDED = 0x1000 + HANDSHAKE_FAILED_PROTOCOL = 0x2000 + HANDSHAKE_FAILED_AUTH = 0x4000 + + ALL_V1 = 0xFFFF + ALL = ALL_V1 + + # DRAFT Socket monitoring events + PIPES_STATS = 0x10000 + ALL_V2 = ALL_V1 | PIPES_STATS + + +class PollEvent(IntFlag): + """Which events to poll for in poll methods + + .. versionadded: 23 + """ + + POLLIN = 1 + POLLOUT = 2 + POLLERR = 4 + POLLPRI = 8 + + +class DeviceType(IntEnum): + """Device type constants for zmq.device + + .. versionadded: 23 + """ + + STREAMER = 1 + FORWARDER = 2 + QUEUE = 3 + + +# AUTOGENERATED_BELOW_HERE + + +IO_THREADS: int = ContextOption.IO_THREADS +MAX_SOCKETS: int = ContextOption.MAX_SOCKETS +SOCKET_LIMIT: int = ContextOption.SOCKET_LIMIT +THREAD_PRIORITY: int = ContextOption.THREAD_PRIORITY +THREAD_SCHED_POLICY: int = ContextOption.THREAD_SCHED_POLICY +MAX_MSGSZ: int = ContextOption.MAX_MSGSZ +MSG_T_SIZE: int = ContextOption.MSG_T_SIZE +THREAD_AFFINITY_CPU_ADD: int = ContextOption.THREAD_AFFINITY_CPU_ADD +THREAD_AFFINITY_CPU_REMOVE: int = ContextOption.THREAD_AFFINITY_CPU_REMOVE +THREAD_NAME_PREFIX: int = ContextOption.THREAD_NAME_PREFIX +STREAMER: int = DeviceType.STREAMER +FORWARDER: int = DeviceType.FORWARDER +QUEUE: int = DeviceType.QUEUE +EAGAIN: int = Errno.EAGAIN +EFAULT: int = Errno.EFAULT +EINVAL: int = Errno.EINVAL +ENOTSUP: int = Errno.ENOTSUP +EPROTONOSUPPORT: int = Errno.EPROTONOSUPPORT +ENOBUFS: int = Errno.ENOBUFS +ENETDOWN: int = Errno.ENETDOWN +EADDRINUSE: int = Errno.EADDRINUSE +EADDRNOTAVAIL: int = Errno.EADDRNOTAVAIL +ECONNREFUSED: int = Errno.ECONNREFUSED +EINPROGRESS: int = Errno.EINPROGRESS +ENOTSOCK: int = Errno.ENOTSOCK +EMSGSIZE: int = Errno.EMSGSIZE +EAFNOSUPPORT: int = Errno.EAFNOSUPPORT +ENETUNREACH: int = Errno.ENETUNREACH +ECONNABORTED: int = Errno.ECONNABORTED +ECONNRESET: int = Errno.ECONNRESET +ENOTCONN: int = Errno.ENOTCONN +ETIMEDOUT: int = Errno.ETIMEDOUT +EHOSTUNREACH: int = Errno.EHOSTUNREACH +ENETRESET: int = Errno.ENETRESET +EFSM: int = Errno.EFSM +ENOCOMPATPROTO: int = Errno.ENOCOMPATPROTO +ETERM: int = Errno.ETERM +EMTHREAD: int = Errno.EMTHREAD +PROTOCOL_ERROR_WS_UNSPECIFIED: int = Event.PROTOCOL_ERROR_WS_UNSPECIFIED +PROTOCOL_ERROR_ZMTP_UNSPECIFIED: int = Event.PROTOCOL_ERROR_ZMTP_UNSPECIFIED +PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND: int = ( + Event.PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND +) +PROTOCOL_ERROR_ZMTP_INVALID_SEQUENCE: int = Event.PROTOCOL_ERROR_ZMTP_INVALID_SEQUENCE +PROTOCOL_ERROR_ZMTP_KEY_EXCHANGE: int = Event.PROTOCOL_ERROR_ZMTP_KEY_EXCHANGE +PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_UNSPECIFIED: int = ( + Event.PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_UNSPECIFIED +) +PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_MESSAGE: int = ( + Event.PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_MESSAGE +) +PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO: int = ( + Event.PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO +) +PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_INITIATE: int = ( + Event.PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_INITIATE +) +PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_ERROR: int = ( + Event.PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_ERROR +) +PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_READY: int = ( + Event.PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_READY +) +PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_WELCOME: int = ( + Event.PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_WELCOME +) +PROTOCOL_ERROR_ZMTP_INVALID_METADATA: int = Event.PROTOCOL_ERROR_ZMTP_INVALID_METADATA +PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC: int = Event.PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC +PROTOCOL_ERROR_ZMTP_MECHANISM_MISMATCH: int = ( + Event.PROTOCOL_ERROR_ZMTP_MECHANISM_MISMATCH +) +PROTOCOL_ERROR_ZAP_UNSPECIFIED: int = Event.PROTOCOL_ERROR_ZAP_UNSPECIFIED +PROTOCOL_ERROR_ZAP_MALFORMED_REPLY: int = Event.PROTOCOL_ERROR_ZAP_MALFORMED_REPLY +PROTOCOL_ERROR_ZAP_BAD_REQUEST_ID: int = Event.PROTOCOL_ERROR_ZAP_BAD_REQUEST_ID +PROTOCOL_ERROR_ZAP_BAD_VERSION: int = Event.PROTOCOL_ERROR_ZAP_BAD_VERSION +PROTOCOL_ERROR_ZAP_INVALID_STATUS_CODE: int = ( + Event.PROTOCOL_ERROR_ZAP_INVALID_STATUS_CODE +) +PROTOCOL_ERROR_ZAP_INVALID_METADATA: int = Event.PROTOCOL_ERROR_ZAP_INVALID_METADATA +EVENT_CONNECTED: int = Event.CONNECTED +EVENT_CONNECT_DELAYED: int = Event.CONNECT_DELAYED +EVENT_CONNECT_RETRIED: int = Event.CONNECT_RETRIED +EVENT_LISTENING: int = Event.LISTENING +EVENT_BIND_FAILED: int = Event.BIND_FAILED +EVENT_ACCEPTED: int = Event.ACCEPTED +EVENT_ACCEPT_FAILED: int = Event.ACCEPT_FAILED +EVENT_CLOSED: int = Event.CLOSED +EVENT_CLOSE_FAILED: int = Event.CLOSE_FAILED +EVENT_DISCONNECTED: int = Event.DISCONNECTED +EVENT_MONITOR_STOPPED: int = Event.MONITOR_STOPPED +EVENT_HANDSHAKE_FAILED_NO_DETAIL: int = Event.HANDSHAKE_FAILED_NO_DETAIL +EVENT_HANDSHAKE_SUCCEEDED: int = Event.HANDSHAKE_SUCCEEDED +EVENT_HANDSHAKE_FAILED_PROTOCOL: int = Event.HANDSHAKE_FAILED_PROTOCOL +EVENT_HANDSHAKE_FAILED_AUTH: int = Event.HANDSHAKE_FAILED_AUTH +EVENT_ALL_V1: int = Event.ALL_V1 +EVENT_ALL: int = Event.ALL +EVENT_PIPES_STATS: int = Event.PIPES_STATS +EVENT_ALL_V2: int = Event.ALL_V2 +DONTWAIT: int = Flag.DONTWAIT +SNDMORE: int = Flag.SNDMORE +NOBLOCK: int = Flag.NOBLOCK +MORE: int = MessageOption.MORE +SHARED: int = MessageOption.SHARED +SRCFD: int = MessageOption.SRCFD +NORM_FIXED: int = NormMode.FIXED +NORM_CC: int = NormMode.CC +NORM_CCL: int = NormMode.CCL +NORM_CCE: int = NormMode.CCE +NORM_CCE_ECNONLY: int = NormMode.CCE_ECNONLY +POLLIN: int = PollEvent.POLLIN +POLLOUT: int = PollEvent.POLLOUT +POLLERR: int = PollEvent.POLLERR +POLLPRI: int = PollEvent.POLLPRI +RECONNECT_STOP_CONN_REFUSED: int = ReconnectStop.CONN_REFUSED +RECONNECT_STOP_HANDSHAKE_FAILED: int = ReconnectStop.HANDSHAKE_FAILED +RECONNECT_STOP_AFTER_DISCONNECT: int = ReconnectStop.AFTER_DISCONNECT +NOTIFY_CONNECT: int = RouterNotify.CONNECT +NOTIFY_DISCONNECT: int = RouterNotify.DISCONNECT +NULL: int = SecurityMechanism.NULL +PLAIN: int = SecurityMechanism.PLAIN +CURVE: int = SecurityMechanism.CURVE +GSSAPI: int = SecurityMechanism.GSSAPI +HWM: int = SocketOption.HWM +AFFINITY: int = SocketOption.AFFINITY +ROUTING_ID: int = SocketOption.ROUTING_ID +SUBSCRIBE: int = SocketOption.SUBSCRIBE +UNSUBSCRIBE: int = SocketOption.UNSUBSCRIBE +RATE: int = SocketOption.RATE +RECOVERY_IVL: int = SocketOption.RECOVERY_IVL +SNDBUF: int = SocketOption.SNDBUF +RCVBUF: int = SocketOption.RCVBUF +RCVMORE: int = SocketOption.RCVMORE +FD: int = SocketOption.FD +EVENTS: int = SocketOption.EVENTS +TYPE: int = SocketOption.TYPE +LINGER: int = SocketOption.LINGER +RECONNECT_IVL: int = SocketOption.RECONNECT_IVL +BACKLOG: int = SocketOption.BACKLOG +RECONNECT_IVL_MAX: int = SocketOption.RECONNECT_IVL_MAX +MAXMSGSIZE: int = SocketOption.MAXMSGSIZE +SNDHWM: int = SocketOption.SNDHWM +RCVHWM: int = SocketOption.RCVHWM +MULTICAST_HOPS: int = SocketOption.MULTICAST_HOPS +RCVTIMEO: int = SocketOption.RCVTIMEO +SNDTIMEO: int = SocketOption.SNDTIMEO +LAST_ENDPOINT: int = SocketOption.LAST_ENDPOINT +ROUTER_MANDATORY: int = SocketOption.ROUTER_MANDATORY +TCP_KEEPALIVE: int = SocketOption.TCP_KEEPALIVE +TCP_KEEPALIVE_CNT: int = SocketOption.TCP_KEEPALIVE_CNT +TCP_KEEPALIVE_IDLE: int = SocketOption.TCP_KEEPALIVE_IDLE +TCP_KEEPALIVE_INTVL: int = SocketOption.TCP_KEEPALIVE_INTVL +IMMEDIATE: int = SocketOption.IMMEDIATE +XPUB_VERBOSE: int = SocketOption.XPUB_VERBOSE +ROUTER_RAW: int = SocketOption.ROUTER_RAW +IPV6: int = SocketOption.IPV6 +MECHANISM: int = SocketOption.MECHANISM +PLAIN_SERVER: int = SocketOption.PLAIN_SERVER +PLAIN_USERNAME: int = SocketOption.PLAIN_USERNAME +PLAIN_PASSWORD: int = SocketOption.PLAIN_PASSWORD +CURVE_SERVER: int = SocketOption.CURVE_SERVER +CURVE_PUBLICKEY: int = SocketOption.CURVE_PUBLICKEY +CURVE_SECRETKEY: int = SocketOption.CURVE_SECRETKEY +CURVE_SERVERKEY: int = SocketOption.CURVE_SERVERKEY +PROBE_ROUTER: int = SocketOption.PROBE_ROUTER +REQ_CORRELATE: int = SocketOption.REQ_CORRELATE +REQ_RELAXED: int = SocketOption.REQ_RELAXED +CONFLATE: int = SocketOption.CONFLATE +ZAP_DOMAIN: int = SocketOption.ZAP_DOMAIN +ROUTER_HANDOVER: int = SocketOption.ROUTER_HANDOVER +TOS: int = SocketOption.TOS +CONNECT_ROUTING_ID: int = SocketOption.CONNECT_ROUTING_ID +GSSAPI_SERVER: int = SocketOption.GSSAPI_SERVER +GSSAPI_PRINCIPAL: int = SocketOption.GSSAPI_PRINCIPAL +GSSAPI_SERVICE_PRINCIPAL: int = SocketOption.GSSAPI_SERVICE_PRINCIPAL +GSSAPI_PLAINTEXT: int = SocketOption.GSSAPI_PLAINTEXT +HANDSHAKE_IVL: int = SocketOption.HANDSHAKE_IVL +SOCKS_PROXY: int = SocketOption.SOCKS_PROXY +XPUB_NODROP: int = SocketOption.XPUB_NODROP +BLOCKY: int = SocketOption.BLOCKY +XPUB_MANUAL: int = SocketOption.XPUB_MANUAL +XPUB_WELCOME_MSG: int = SocketOption.XPUB_WELCOME_MSG +STREAM_NOTIFY: int = SocketOption.STREAM_NOTIFY +INVERT_MATCHING: int = SocketOption.INVERT_MATCHING +HEARTBEAT_IVL: int = SocketOption.HEARTBEAT_IVL +HEARTBEAT_TTL: int = SocketOption.HEARTBEAT_TTL +HEARTBEAT_TIMEOUT: int = SocketOption.HEARTBEAT_TIMEOUT +XPUB_VERBOSER: int = SocketOption.XPUB_VERBOSER +CONNECT_TIMEOUT: int = SocketOption.CONNECT_TIMEOUT +TCP_MAXRT: int = SocketOption.TCP_MAXRT +THREAD_SAFE: int = SocketOption.THREAD_SAFE +MULTICAST_MAXTPDU: int = SocketOption.MULTICAST_MAXTPDU +VMCI_BUFFER_SIZE: int = SocketOption.VMCI_BUFFER_SIZE +VMCI_BUFFER_MIN_SIZE: int = SocketOption.VMCI_BUFFER_MIN_SIZE +VMCI_BUFFER_MAX_SIZE: int = SocketOption.VMCI_BUFFER_MAX_SIZE +VMCI_CONNECT_TIMEOUT: int = SocketOption.VMCI_CONNECT_TIMEOUT +USE_FD: int = SocketOption.USE_FD +GSSAPI_PRINCIPAL_NAMETYPE: int = SocketOption.GSSAPI_PRINCIPAL_NAMETYPE +GSSAPI_SERVICE_PRINCIPAL_NAMETYPE: int = SocketOption.GSSAPI_SERVICE_PRINCIPAL_NAMETYPE +BINDTODEVICE: int = SocketOption.BINDTODEVICE +IDENTITY: int = SocketOption.IDENTITY +CONNECT_RID: int = SocketOption.CONNECT_RID +TCP_ACCEPT_FILTER: int = SocketOption.TCP_ACCEPT_FILTER +IPC_FILTER_PID: int = SocketOption.IPC_FILTER_PID +IPC_FILTER_UID: int = SocketOption.IPC_FILTER_UID +IPC_FILTER_GID: int = SocketOption.IPC_FILTER_GID +IPV4ONLY: int = SocketOption.IPV4ONLY +DELAY_ATTACH_ON_CONNECT: int = SocketOption.DELAY_ATTACH_ON_CONNECT +FAIL_UNROUTABLE: int = SocketOption.FAIL_UNROUTABLE +ROUTER_BEHAVIOR: int = SocketOption.ROUTER_BEHAVIOR +ZAP_ENFORCE_DOMAIN: int = SocketOption.ZAP_ENFORCE_DOMAIN +LOOPBACK_FASTPATH: int = SocketOption.LOOPBACK_FASTPATH +METADATA: int = SocketOption.METADATA +MULTICAST_LOOP: int = SocketOption.MULTICAST_LOOP +ROUTER_NOTIFY: int = SocketOption.ROUTER_NOTIFY +XPUB_MANUAL_LAST_VALUE: int = SocketOption.XPUB_MANUAL_LAST_VALUE +SOCKS_USERNAME: int = SocketOption.SOCKS_USERNAME +SOCKS_PASSWORD: int = SocketOption.SOCKS_PASSWORD +IN_BATCH_SIZE: int = SocketOption.IN_BATCH_SIZE +OUT_BATCH_SIZE: int = SocketOption.OUT_BATCH_SIZE +WSS_KEY_PEM: int = SocketOption.WSS_KEY_PEM +WSS_CERT_PEM: int = SocketOption.WSS_CERT_PEM +WSS_TRUST_PEM: int = SocketOption.WSS_TRUST_PEM +WSS_HOSTNAME: int = SocketOption.WSS_HOSTNAME +WSS_TRUST_SYSTEM: int = SocketOption.WSS_TRUST_SYSTEM +ONLY_FIRST_SUBSCRIBE: int = SocketOption.ONLY_FIRST_SUBSCRIBE +RECONNECT_STOP: int = SocketOption.RECONNECT_STOP +HELLO_MSG: int = SocketOption.HELLO_MSG +DISCONNECT_MSG: int = SocketOption.DISCONNECT_MSG +PRIORITY: int = SocketOption.PRIORITY +BUSY_POLL: int = SocketOption.BUSY_POLL +HICCUP_MSG: int = SocketOption.HICCUP_MSG +XSUB_VERBOSE_UNSUBSCRIBE: int = SocketOption.XSUB_VERBOSE_UNSUBSCRIBE +TOPICS_COUNT: int = SocketOption.TOPICS_COUNT +NORM_MODE: int = SocketOption.NORM_MODE +NORM_UNICAST_NACK: int = SocketOption.NORM_UNICAST_NACK +NORM_BUFFER_SIZE: int = SocketOption.NORM_BUFFER_SIZE +NORM_SEGMENT_SIZE: int = SocketOption.NORM_SEGMENT_SIZE +NORM_BLOCK_SIZE: int = SocketOption.NORM_BLOCK_SIZE +NORM_NUM_PARITY: int = SocketOption.NORM_NUM_PARITY +NORM_NUM_AUTOPARITY: int = SocketOption.NORM_NUM_AUTOPARITY +NORM_PUSH: int = SocketOption.NORM_PUSH +PAIR: int = SocketType.PAIR +PUB: int = SocketType.PUB +SUB: int = SocketType.SUB +REQ: int = SocketType.REQ +REP: int = SocketType.REP +DEALER: int = SocketType.DEALER +ROUTER: int = SocketType.ROUTER +PULL: int = SocketType.PULL +PUSH: int = SocketType.PUSH +XPUB: int = SocketType.XPUB +XSUB: int = SocketType.XSUB +STREAM: int = SocketType.STREAM +XREQ: int = SocketType.XREQ +XREP: int = SocketType.XREP +SERVER: int = SocketType.SERVER +CLIENT: int = SocketType.CLIENT +RADIO: int = SocketType.RADIO +DISH: int = SocketType.DISH +GATHER: int = SocketType.GATHER +SCATTER: int = SocketType.SCATTER +DGRAM: int = SocketType.DGRAM +PEER: int = SocketType.PEER +CHANNEL: int = SocketType.CHANNEL + +__all__: list[str] = [ + "ContextOption", + "IO_THREADS", + "MAX_SOCKETS", + "SOCKET_LIMIT", + "THREAD_PRIORITY", + "THREAD_SCHED_POLICY", + "MAX_MSGSZ", + "MSG_T_SIZE", + "THREAD_AFFINITY_CPU_ADD", + "THREAD_AFFINITY_CPU_REMOVE", + "THREAD_NAME_PREFIX", + "DeviceType", + "STREAMER", + "FORWARDER", + "QUEUE", + "Enum", + "Errno", + "EAGAIN", + "EFAULT", + "EINVAL", + "ENOTSUP", + "EPROTONOSUPPORT", + "ENOBUFS", + "ENETDOWN", + "EADDRINUSE", + "EADDRNOTAVAIL", + "ECONNREFUSED", + "EINPROGRESS", + "ENOTSOCK", + "EMSGSIZE", + "EAFNOSUPPORT", + "ENETUNREACH", + "ECONNABORTED", + "ECONNRESET", + "ENOTCONN", + "ETIMEDOUT", + "EHOSTUNREACH", + "ENETRESET", + "EFSM", + "ENOCOMPATPROTO", + "ETERM", + "EMTHREAD", + "Event", + "PROTOCOL_ERROR_WS_UNSPECIFIED", + "PROTOCOL_ERROR_ZMTP_UNSPECIFIED", + "PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND", + "PROTOCOL_ERROR_ZMTP_INVALID_SEQUENCE", + "PROTOCOL_ERROR_ZMTP_KEY_EXCHANGE", + "PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_UNSPECIFIED", + "PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_MESSAGE", + "PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO", + "PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_INITIATE", + "PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_ERROR", + "PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_READY", + "PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_WELCOME", + "PROTOCOL_ERROR_ZMTP_INVALID_METADATA", + "PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC", + "PROTOCOL_ERROR_ZMTP_MECHANISM_MISMATCH", + "PROTOCOL_ERROR_ZAP_UNSPECIFIED", + "PROTOCOL_ERROR_ZAP_MALFORMED_REPLY", + "PROTOCOL_ERROR_ZAP_BAD_REQUEST_ID", + "PROTOCOL_ERROR_ZAP_BAD_VERSION", + "PROTOCOL_ERROR_ZAP_INVALID_STATUS_CODE", + "PROTOCOL_ERROR_ZAP_INVALID_METADATA", + "EVENT_CONNECTED", + "EVENT_CONNECT_DELAYED", + "EVENT_CONNECT_RETRIED", + "EVENT_LISTENING", + "EVENT_BIND_FAILED", + "EVENT_ACCEPTED", + "EVENT_ACCEPT_FAILED", + "EVENT_CLOSED", + "EVENT_CLOSE_FAILED", + "EVENT_DISCONNECTED", + "EVENT_MONITOR_STOPPED", + "EVENT_HANDSHAKE_FAILED_NO_DETAIL", + "EVENT_HANDSHAKE_SUCCEEDED", + "EVENT_HANDSHAKE_FAILED_PROTOCOL", + "EVENT_HANDSHAKE_FAILED_AUTH", + "EVENT_ALL_V1", + "EVENT_ALL", + "EVENT_PIPES_STATS", + "EVENT_ALL_V2", + "Flag", + "DONTWAIT", + "SNDMORE", + "NOBLOCK", + "IntEnum", + "IntFlag", + "MessageOption", + "MORE", + "SHARED", + "SRCFD", + "NormMode", + "NORM_FIXED", + "NORM_CC", + "NORM_CCL", + "NORM_CCE", + "NORM_CCE_ECNONLY", + "PollEvent", + "POLLIN", + "POLLOUT", + "POLLERR", + "POLLPRI", + "ReconnectStop", + "RECONNECT_STOP_CONN_REFUSED", + "RECONNECT_STOP_HANDSHAKE_FAILED", + "RECONNECT_STOP_AFTER_DISCONNECT", + "RouterNotify", + "NOTIFY_CONNECT", + "NOTIFY_DISCONNECT", + "SecurityMechanism", + "NULL", + "PLAIN", + "CURVE", + "GSSAPI", + "SocketOption", + "HWM", + "AFFINITY", + "ROUTING_ID", + "SUBSCRIBE", + "UNSUBSCRIBE", + "RATE", + "RECOVERY_IVL", + "SNDBUF", + "RCVBUF", + "RCVMORE", + "FD", + "EVENTS", + "TYPE", + "LINGER", + "RECONNECT_IVL", + "BACKLOG", + "RECONNECT_IVL_MAX", + "MAXMSGSIZE", + "SNDHWM", + "RCVHWM", + "MULTICAST_HOPS", + "RCVTIMEO", + "SNDTIMEO", + "LAST_ENDPOINT", + "ROUTER_MANDATORY", + "TCP_KEEPALIVE", + "TCP_KEEPALIVE_CNT", + "TCP_KEEPALIVE_IDLE", + "TCP_KEEPALIVE_INTVL", + "IMMEDIATE", + "XPUB_VERBOSE", + "ROUTER_RAW", + "IPV6", + "MECHANISM", + "PLAIN_SERVER", + "PLAIN_USERNAME", + "PLAIN_PASSWORD", + "CURVE_SERVER", + "CURVE_PUBLICKEY", + "CURVE_SECRETKEY", + "CURVE_SERVERKEY", + "PROBE_ROUTER", + "REQ_CORRELATE", + "REQ_RELAXED", + "CONFLATE", + "ZAP_DOMAIN", + "ROUTER_HANDOVER", + "TOS", + "CONNECT_ROUTING_ID", + "GSSAPI_SERVER", + "GSSAPI_PRINCIPAL", + "GSSAPI_SERVICE_PRINCIPAL", + "GSSAPI_PLAINTEXT", + "HANDSHAKE_IVL", + "SOCKS_PROXY", + "XPUB_NODROP", + "BLOCKY", + "XPUB_MANUAL", + "XPUB_WELCOME_MSG", + "STREAM_NOTIFY", + "INVERT_MATCHING", + "HEARTBEAT_IVL", + "HEARTBEAT_TTL", + "HEARTBEAT_TIMEOUT", + "XPUB_VERBOSER", + "CONNECT_TIMEOUT", + "TCP_MAXRT", + "THREAD_SAFE", + "MULTICAST_MAXTPDU", + "VMCI_BUFFER_SIZE", + "VMCI_BUFFER_MIN_SIZE", + "VMCI_BUFFER_MAX_SIZE", + "VMCI_CONNECT_TIMEOUT", + "USE_FD", + "GSSAPI_PRINCIPAL_NAMETYPE", + "GSSAPI_SERVICE_PRINCIPAL_NAMETYPE", + "BINDTODEVICE", + "IDENTITY", + "CONNECT_RID", + "TCP_ACCEPT_FILTER", + "IPC_FILTER_PID", + "IPC_FILTER_UID", + "IPC_FILTER_GID", + "IPV4ONLY", + "DELAY_ATTACH_ON_CONNECT", + "FAIL_UNROUTABLE", + "ROUTER_BEHAVIOR", + "ZAP_ENFORCE_DOMAIN", + "LOOPBACK_FASTPATH", + "METADATA", + "MULTICAST_LOOP", + "ROUTER_NOTIFY", + "XPUB_MANUAL_LAST_VALUE", + "SOCKS_USERNAME", + "SOCKS_PASSWORD", + "IN_BATCH_SIZE", + "OUT_BATCH_SIZE", + "WSS_KEY_PEM", + "WSS_CERT_PEM", + "WSS_TRUST_PEM", + "WSS_HOSTNAME", + "WSS_TRUST_SYSTEM", + "ONLY_FIRST_SUBSCRIBE", + "RECONNECT_STOP", + "HELLO_MSG", + "DISCONNECT_MSG", + "PRIORITY", + "BUSY_POLL", + "HICCUP_MSG", + "XSUB_VERBOSE_UNSUBSCRIBE", + "TOPICS_COUNT", + "NORM_MODE", + "NORM_UNICAST_NACK", + "NORM_BUFFER_SIZE", + "NORM_SEGMENT_SIZE", + "NORM_BLOCK_SIZE", + "NORM_NUM_PARITY", + "NORM_NUM_AUTOPARITY", + "NORM_PUSH", + "SocketType", + "PAIR", + "PUB", + "SUB", + "REQ", + "REP", + "DEALER", + "ROUTER", + "PULL", + "PUSH", + "XPUB", + "XSUB", + "STREAM", + "XREQ", + "XREP", + "SERVER", + "CLIENT", + "RADIO", + "DISH", + "GATHER", + "SCATTER", + "DGRAM", + "PEER", + "CHANNEL", +] diff --git a/.venv/lib/python3.11/site-packages/zmq/devices/__init__.py b/.venv/lib/python3.11/site-packages/zmq/devices/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2236d024d3c9276de92550241cd7cd4cdebd1039 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/devices/__init__.py @@ -0,0 +1,28 @@ +"""0MQ Device classes for running in background threads or processes.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from zmq import device +from zmq.devices import ( + basedevice, + monitoredqueue, + monitoredqueuedevice, + proxydevice, + proxysteerabledevice, +) +from zmq.devices.basedevice import * +from zmq.devices.monitoredqueue import * +from zmq.devices.monitoredqueuedevice import * +from zmq.devices.proxydevice import * +from zmq.devices.proxysteerabledevice import * + +__all__ = ['device'] +for submod in ( + basedevice, + proxydevice, + proxysteerabledevice, + monitoredqueue, + monitoredqueuedevice, +): + __all__.extend(submod.__all__) # type: ignore diff --git a/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30b24fd183bec43a700bc1fda840aab786bda1f4 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/basedevice.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/basedevice.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1d690854168509a00089d62c0911fd01dc13923 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/basedevice.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/monitoredqueue.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/monitoredqueue.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..207d43264dacf716f430e4011e8c7ce5595c0acf Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/monitoredqueue.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/monitoredqueuedevice.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/monitoredqueuedevice.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bd0f74a80df8a059325bb3dfb461659c5c56034 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/monitoredqueuedevice.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/proxydevice.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/proxydevice.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..060fc6b6e872ab2cbaa58f42b16817ac8855523d Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/proxydevice.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/proxysteerabledevice.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/proxysteerabledevice.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..baa1aa2c5ee65458e51bbadddea006a02394c805 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/devices/__pycache__/proxysteerabledevice.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/devices/basedevice.py b/.venv/lib/python3.11/site-packages/zmq/devices/basedevice.py new file mode 100644 index 0000000000000000000000000000000000000000..5591d844aa8acb461b1b7e96e257ea795750609c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/devices/basedevice.py @@ -0,0 +1,310 @@ +"""Classes for running 0MQ Devices in the background.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import time +from multiprocessing import Process +from threading import Thread +from typing import Any, Callable, List, Optional, Tuple + +import zmq +from zmq import ENOTSOCK, ETERM, PUSH, QUEUE, Context, ZMQBindError, ZMQError, device + + +class Device: + """A 0MQ Device to be run in the background. + + You do not pass Socket instances to this, but rather Socket types:: + + Device(device_type, in_socket_type, out_socket_type) + + For instance:: + + dev = Device(zmq.QUEUE, zmq.DEALER, zmq.ROUTER) + + Similar to zmq.device, but socket types instead of sockets themselves are + passed, and the sockets are created in the work thread, to avoid issues + with thread safety. As a result, additional bind_{in|out} and + connect_{in|out} methods and setsockopt_{in|out} allow users to specify + connections for the sockets. + + Parameters + ---------- + device_type : int + The 0MQ Device type + {in|out}_type : int + zmq socket types, to be passed later to context.socket(). e.g. + zmq.PUB, zmq.SUB, zmq.REQ. If out_type is < 0, then in_socket is used + for both in_socket and out_socket. + + Methods + ------- + bind_{in_out}(iface) + passthrough for ``{in|out}_socket.bind(iface)``, to be called in the thread + connect_{in_out}(iface) + passthrough for ``{in|out}_socket.connect(iface)``, to be called in the + thread + setsockopt_{in_out}(opt,value) + passthrough for ``{in|out}_socket.setsockopt(opt, value)``, to be called in + the thread + + Attributes + ---------- + daemon : bool + sets whether the thread should be run as a daemon + Default is true, because if it is false, the thread will not + exit unless it is killed + context_factory : callable + This is a class attribute. + Function for creating the Context. This will be Context.instance + in ThreadDevices, and Context in ProcessDevices. The only reason + it is not instance() in ProcessDevices is that there may be a stale + Context instance already initialized, and the forked environment + should *never* try to use it. + """ + + context_factory: Callable[[], zmq.Context] = Context.instance + """Callable that returns a context. Typically either Context.instance or Context, + depending on whether the device should share the global instance or not. + """ + + daemon: bool + device_type: int + in_type: int + out_type: int + + _in_binds: List[str] + _in_connects: List[str] + _in_sockopts: List[Tuple[int, Any]] + _out_binds: List[str] + _out_connects: List[str] + _out_sockopts: List[Tuple[int, Any]] + _random_addrs: List[str] + _sockets: List[zmq.Socket] + + def __init__( + self, + device_type: int = QUEUE, + in_type: Optional[int] = None, + out_type: Optional[int] = None, + ) -> None: + self.device_type = device_type + if in_type is None: + raise TypeError("in_type must be specified") + if out_type is None: + raise TypeError("out_type must be specified") + self.in_type = in_type + self.out_type = out_type + self._in_binds = [] + self._in_connects = [] + self._in_sockopts = [] + self._out_binds = [] + self._out_connects = [] + self._out_sockopts = [] + self._random_addrs = [] + self.daemon = True + self.done = False + self._sockets = [] + + def bind_in(self, addr: str) -> None: + """Enqueue ZMQ address for binding on in_socket. + + See zmq.Socket.bind for details. + """ + self._in_binds.append(addr) + + def bind_in_to_random_port(self, addr: str, *args, **kwargs) -> int: + """Enqueue a random port on the given interface for binding on + in_socket. + + See zmq.Socket.bind_to_random_port for details. + + .. versionadded:: 18.0 + """ + port = self._reserve_random_port(addr, *args, **kwargs) + + self.bind_in(f'{addr}:{port}') + + return port + + def connect_in(self, addr: str) -> None: + """Enqueue ZMQ address for connecting on in_socket. + + See zmq.Socket.connect for details. + """ + self._in_connects.append(addr) + + def setsockopt_in(self, opt: int, value: Any) -> None: + """Enqueue setsockopt(opt, value) for in_socket + + See zmq.Socket.setsockopt for details. + """ + self._in_sockopts.append((opt, value)) + + def bind_out(self, addr: str) -> None: + """Enqueue ZMQ address for binding on out_socket. + + See zmq.Socket.bind for details. + """ + self._out_binds.append(addr) + + def bind_out_to_random_port(self, addr: str, *args, **kwargs) -> int: + """Enqueue a random port on the given interface for binding on + out_socket. + + See zmq.Socket.bind_to_random_port for details. + + .. versionadded:: 18.0 + """ + port = self._reserve_random_port(addr, *args, **kwargs) + + self.bind_out(f'{addr}:{port}') + + return port + + def connect_out(self, addr: str): + """Enqueue ZMQ address for connecting on out_socket. + + See zmq.Socket.connect for details. + """ + self._out_connects.append(addr) + + def setsockopt_out(self, opt: int, value: Any): + """Enqueue setsockopt(opt, value) for out_socket + + See zmq.Socket.setsockopt for details. + """ + self._out_sockopts.append((opt, value)) + + def _reserve_random_port(self, addr: str, *args, **kwargs) -> int: + with Context() as ctx: + with ctx.socket(PUSH) as binder: + for i in range(5): + port = binder.bind_to_random_port(addr, *args, **kwargs) + + new_addr = f'{addr}:{port}' + + if new_addr in self._random_addrs: + continue + else: + break + else: + raise ZMQBindError("Could not reserve random port.") + + self._random_addrs.append(new_addr) + + return port + + def _setup_sockets(self) -> Tuple[zmq.Socket, zmq.Socket]: + ctx: zmq.Context[zmq.Socket] = self.context_factory() # type: ignore + self._context = ctx + + # create the sockets + ins = ctx.socket(self.in_type) + self._sockets.append(ins) + if self.out_type < 0: + outs = ins + else: + outs = ctx.socket(self.out_type) + self._sockets.append(outs) + + # set sockopts (must be done first, in case of zmq.IDENTITY) + for opt, value in self._in_sockopts: + ins.setsockopt(opt, value) + for opt, value in self._out_sockopts: + outs.setsockopt(opt, value) + + for iface in self._in_binds: + ins.bind(iface) + for iface in self._out_binds: + outs.bind(iface) + + for iface in self._in_connects: + ins.connect(iface) + for iface in self._out_connects: + outs.connect(iface) + + return ins, outs + + def run_device(self) -> None: + """The runner method. + + Do not call me directly, instead call ``self.start()``, just like a Thread. + """ + ins, outs = self._setup_sockets() + device(self.device_type, ins, outs) + + def _close_sockets(self): + """Cleanup sockets we created""" + for s in self._sockets: + if s and not s.closed: + s.close() + + def run(self) -> None: + """wrap run_device in try/catch ETERM""" + try: + self.run_device() + except ZMQError as e: + if e.errno in {ETERM, ENOTSOCK}: + # silence TERM, ENOTSOCK errors, because this should be a clean shutdown + pass + else: + raise + finally: + self.done = True + self._close_sockets() + + def start(self) -> None: + """Start the device. Override me in subclass for other launchers.""" + return self.run() + + def join(self, timeout: Optional[float] = None) -> None: + """wait for me to finish, like Thread.join. + + Reimplemented appropriately by subclasses.""" + tic = time.monotonic() + toc = tic + while not self.done and not (timeout is not None and toc - tic > timeout): + time.sleep(0.001) + toc = time.monotonic() + + +class BackgroundDevice(Device): + """Base class for launching Devices in background processes and threads.""" + + launcher: Any = None + _launch_class: Any = None + + def start(self) -> None: + self.launcher = self._launch_class(target=self.run) + self.launcher.daemon = self.daemon + return self.launcher.start() + + def join(self, timeout: Optional[float] = None) -> None: + return self.launcher.join(timeout=timeout) + + +class ThreadDevice(BackgroundDevice): + """A Device that will be run in a background Thread. + + See Device for details. + """ + + _launch_class = Thread + + +class ProcessDevice(BackgroundDevice): + """A Device that will be run in a background Process. + + See Device for details. + """ + + _launch_class = Process + context_factory = Context + """Callable that returns a context. Typically either Context.instance or Context, + depending on whether the device should share the global instance or not. + """ + + +__all__ = ['Device', 'ThreadDevice', 'ProcessDevice'] diff --git a/.venv/lib/python3.11/site-packages/zmq/devices/monitoredqueue.py b/.venv/lib/python3.11/site-packages/zmq/devices/monitoredqueue.py new file mode 100644 index 0000000000000000000000000000000000000000..f590457a8696979889fe3b5f4b7604f6149d69b7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/devices/monitoredqueue.py @@ -0,0 +1,51 @@ +"""pure Python monitored_queue function + +For use when Cython extension is unavailable (PyPy). + +Authors +------- +* MinRK +""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from typing import Callable + +import zmq +from zmq.backend import monitored_queue as _backend_mq + + +def _relay(ins, outs, sides, prefix, swap_ids): + msg = ins.recv_multipart() + if swap_ids: + msg[:2] = msg[:2][::-1] + outs.send_multipart(msg) + sides.send_multipart([prefix] + msg) + + +def _monitored_queue( + in_socket, out_socket, mon_socket, in_prefix=b'in', out_prefix=b'out' +): + swap_ids = in_socket.type == zmq.ROUTER and out_socket.type == zmq.ROUTER + + poller = zmq.Poller() + poller.register(in_socket, zmq.POLLIN) + poller.register(out_socket, zmq.POLLIN) + while True: + events = dict(poller.poll()) + if in_socket in events: + _relay(in_socket, out_socket, mon_socket, in_prefix, swap_ids) + if out_socket in events: + _relay(out_socket, in_socket, mon_socket, out_prefix, swap_ids) + + +monitored_queue: Callable +if _backend_mq is not None: + monitored_queue = _backend_mq # type: ignore +else: + # backend has no monitored_queue + monitored_queue = _monitored_queue + + +__all__ = ['monitored_queue'] diff --git a/.venv/lib/python3.11/site-packages/zmq/devices/monitoredqueuedevice.py b/.venv/lib/python3.11/site-packages/zmq/devices/monitoredqueuedevice.py new file mode 100644 index 0000000000000000000000000000000000000000..7bcc5629964e3c1fb19cf042c4ec4e16444ac8aa --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/devices/monitoredqueuedevice.py @@ -0,0 +1,60 @@ +"""MonitoredQueue classes and functions.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from zmq import PUB +from zmq.devices.monitoredqueue import monitored_queue +from zmq.devices.proxydevice import ProcessProxy, Proxy, ProxyBase, ThreadProxy + + +class MonitoredQueueBase(ProxyBase): + """Base class for overriding methods.""" + + _in_prefix = b'' + _out_prefix = b'' + + def __init__( + self, in_type, out_type, mon_type=PUB, in_prefix=b'in', out_prefix=b'out' + ): + ProxyBase.__init__(self, in_type=in_type, out_type=out_type, mon_type=mon_type) + + self._in_prefix = in_prefix + self._out_prefix = out_prefix + + def run_device(self): + ins, outs, mons = self._setup_sockets() + monitored_queue(ins, outs, mons, self._in_prefix, self._out_prefix) + + +class MonitoredQueue(MonitoredQueueBase, Proxy): + """Class for running monitored_queue in the background. + + See zmq.devices.Device for most of the spec. MonitoredQueue differs from Proxy, + only in that it adds a ``prefix`` to messages sent on the monitor socket, + with a different prefix for each direction. + + MQ also supports ROUTER on both sides, which zmq.proxy does not. + + If a message arrives on `in_sock`, it will be prefixed with `in_prefix` on the monitor socket. + If it arrives on out_sock, it will be prefixed with `out_prefix`. + + A PUB socket is the most logical choice for the mon_socket, but it is not required. + """ + + +class ThreadMonitoredQueue(MonitoredQueueBase, ThreadProxy): + """Run zmq.monitored_queue in a background thread. + + See MonitoredQueue and Proxy for details. + """ + + +class ProcessMonitoredQueue(MonitoredQueueBase, ProcessProxy): + """Run zmq.monitored_queue in a separate process. + + See MonitoredQueue and Proxy for details. + """ + + +__all__ = ['MonitoredQueue', 'ThreadMonitoredQueue', 'ProcessMonitoredQueue'] diff --git a/.venv/lib/python3.11/site-packages/zmq/devices/proxydevice.py b/.venv/lib/python3.11/site-packages/zmq/devices/proxydevice.py new file mode 100644 index 0000000000000000000000000000000000000000..f2af06793c27bbf4c9a9c33a377e2acd6ded5c09 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/devices/proxydevice.py @@ -0,0 +1,104 @@ +"""Proxy classes and functions.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import zmq +from zmq.devices.basedevice import Device, ProcessDevice, ThreadDevice + + +class ProxyBase: + """Base class for overriding methods.""" + + def __init__(self, in_type, out_type, mon_type=zmq.PUB): + Device.__init__(self, in_type=in_type, out_type=out_type) + self.mon_type = mon_type + self._mon_binds = [] + self._mon_connects = [] + self._mon_sockopts = [] + + def bind_mon(self, addr): + """Enqueue ZMQ address for binding on mon_socket. + + See zmq.Socket.bind for details. + """ + self._mon_binds.append(addr) + + def bind_mon_to_random_port(self, addr, *args, **kwargs): + """Enqueue a random port on the given interface for binding on + mon_socket. + + See zmq.Socket.bind_to_random_port for details. + + .. versionadded:: 18.0 + """ + port = self._reserve_random_port(addr, *args, **kwargs) + + self.bind_mon(f'{addr}:{port}') + + return port + + def connect_mon(self, addr): + """Enqueue ZMQ address for connecting on mon_socket. + + See zmq.Socket.connect for details. + """ + self._mon_connects.append(addr) + + def setsockopt_mon(self, opt, value): + """Enqueue setsockopt(opt, value) for mon_socket + + See zmq.Socket.setsockopt for details. + """ + self._mon_sockopts.append((opt, value)) + + def _setup_sockets(self): + ins, outs = Device._setup_sockets(self) + ctx = self._context + mons = ctx.socket(self.mon_type) + self._sockets.append(mons) + + # set sockopts (must be done first, in case of zmq.IDENTITY) + for opt, value in self._mon_sockopts: + mons.setsockopt(opt, value) + + for iface in self._mon_binds: + mons.bind(iface) + + for iface in self._mon_connects: + mons.connect(iface) + + return ins, outs, mons + + def run_device(self): + ins, outs, mons = self._setup_sockets() + zmq.proxy(ins, outs, mons) + + +class Proxy(ProxyBase, Device): + """Threadsafe Proxy object. + + See zmq.devices.Device for most of the spec. This subclass adds a + _mon version of each _{in|out} method, for configuring the + monitor socket. + + A Proxy is a 3-socket ZMQ Device that functions just like a + QUEUE, except each message is also sent out on the monitor socket. + + A PUB socket is the most logical choice for the mon_socket, but it is not required. + """ + + +class ThreadProxy(ProxyBase, ThreadDevice): + """Proxy in a Thread. See Proxy for more.""" + + +class ProcessProxy(ProxyBase, ProcessDevice): + """Proxy in a Process. See Proxy for more.""" + + +__all__ = [ + 'Proxy', + 'ThreadProxy', + 'ProcessProxy', +] diff --git a/.venv/lib/python3.11/site-packages/zmq/devices/proxysteerabledevice.py b/.venv/lib/python3.11/site-packages/zmq/devices/proxysteerabledevice.py new file mode 100644 index 0000000000000000000000000000000000000000..256a1e0498c907791da79935c7ed0f35faf90ce0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/devices/proxysteerabledevice.py @@ -0,0 +1,106 @@ +"""Classes for running a steerable ZMQ proxy""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import zmq +from zmq.devices.proxydevice import ProcessProxy, Proxy, ThreadProxy + + +class ProxySteerableBase: + """Base class for overriding methods.""" + + def __init__(self, in_type, out_type, mon_type=zmq.PUB, ctrl_type=None): + super().__init__(in_type=in_type, out_type=out_type, mon_type=mon_type) + self.ctrl_type = ctrl_type + self._ctrl_binds = [] + self._ctrl_connects = [] + self._ctrl_sockopts = [] + + def bind_ctrl(self, addr): + """Enqueue ZMQ address for binding on ctrl_socket. + + See zmq.Socket.bind for details. + """ + self._ctrl_binds.append(addr) + + def bind_ctrl_to_random_port(self, addr, *args, **kwargs): + """Enqueue a random port on the given interface for binding on + ctrl_socket. + + See zmq.Socket.bind_to_random_port for details. + """ + port = self._reserve_random_port(addr, *args, **kwargs) + + self.bind_ctrl(f'{addr}:{port}') + + return port + + def connect_ctrl(self, addr): + """Enqueue ZMQ address for connecting on ctrl_socket. + + See zmq.Socket.connect for details. + """ + self._ctrl_connects.append(addr) + + def setsockopt_ctrl(self, opt, value): + """Enqueue setsockopt(opt, value) for ctrl_socket + + See zmq.Socket.setsockopt for details. + """ + self._ctrl_sockopts.append((opt, value)) + + def _setup_sockets(self): + ins, outs, mons = super()._setup_sockets() + ctx = self._context + ctrls = ctx.socket(self.ctrl_type) + self._sockets.append(ctrls) + + for opt, value in self._ctrl_sockopts: + ctrls.setsockopt(opt, value) + + for iface in self._ctrl_binds: + ctrls.bind(iface) + + for iface in self._ctrl_connects: + ctrls.connect(iface) + + return ins, outs, mons, ctrls + + def run_device(self): + ins, outs, mons, ctrls = self._setup_sockets() + zmq.proxy_steerable(ins, outs, mons, ctrls) + + +class ProxySteerable(ProxySteerableBase, Proxy): + """Class for running a steerable proxy in the background. + + See zmq.devices.Proxy for most of the spec. If the control socket is not + NULL, the proxy supports control flow, provided by the socket. + + If PAUSE is received on this socket, the proxy suspends its activities. If + RESUME is received, it goes on. If TERMINATE is received, it terminates + smoothly. If the control socket is NULL, the proxy behave exactly as if + zmq.devices.Proxy had been used. + + This subclass adds a _ctrl version of each _{in|out} + method, for configuring the control socket. + + .. versionadded:: libzmq-4.1 + .. versionadded:: 18.0 + """ + + +class ThreadProxySteerable(ProxySteerableBase, ThreadProxy): + """ProxySteerable in a Thread. See ProxySteerable for details.""" + + +class ProcessProxySteerable(ProxySteerableBase, ProcessProxy): + """ProxySteerable in a Process. See ProxySteerable for details.""" + + +__all__ = [ + 'ProxySteerable', + 'ThreadProxySteerable', + 'ProcessProxySteerable', +] diff --git a/.venv/lib/python3.11/site-packages/zmq/log/__init__.py b/.venv/lib/python3.11/site-packages/zmq/log/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/.venv/lib/python3.11/site-packages/zmq/log/__main__.py b/.venv/lib/python3.11/site-packages/zmq/log/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..98c6b97c47381d413fa861702a926e1d57d39a8a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/log/__main__.py @@ -0,0 +1,135 @@ +"""pyzmq log watcher. + +Easily view log messages published by the PUBHandler in zmq.log.handlers + +Designed to be run as an executable module - try this to see options: + python -m zmq.log -h + +Subscribes to the '' (empty string) topic by default which means it will work +out-of-the-box with a PUBHandler object instantiated with default settings. +If you change the root topic with PUBHandler.setRootTopic() you must pass +the value to this script with the --topic argument. + +Note that the default formats for the PUBHandler object selectively include +the log level in the message. This creates redundancy in this script as it +always prints the topic of the message, which includes the log level. +Consider overriding the default formats with PUBHandler.setFormat() to +avoid this issue. + +""" + +# encoding: utf-8 + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import argparse +from datetime import datetime +from typing import Dict + +import zmq + +parser = argparse.ArgumentParser('ZMQ Log Watcher') +parser.add_argument('zmq_pub_url', type=str, help='URL to a ZMQ publisher socket.') +parser.add_argument( + '-t', + '--topic', + type=str, + default='', + help='Only receive messages that start with this topic.', +) +parser.add_argument( + '--timestamp', action='store_true', help='Append local time to the log messages.' +) +parser.add_argument( + '--separator', + type=str, + default=' | ', + help='String to print between topic and message.', +) +parser.add_argument( + '--dateformat', + type=str, + default='%Y-%d-%m %H:%M', + help='Set alternative date format for use with --timestamp.', +) +parser.add_argument( + '--align', + action='store_true', + default=False, + help='Try to align messages by the width of their topics.', +) +parser.add_argument( + '--color', + action='store_true', + default=False, + help='Color the output based on the error level. Requires the colorama module.', +) +args = parser.parse_args() + + +if args.color: + import colorama + + colorama.init() + colors = { + 'DEBUG': colorama.Fore.LIGHTCYAN_EX, + 'INFO': colorama.Fore.LIGHTWHITE_EX, + 'WARNING': colorama.Fore.YELLOW, + 'ERROR': colorama.Fore.LIGHTRED_EX, + 'CRITICAL': colorama.Fore.LIGHTRED_EX, + '__RESET__': colorama.Fore.RESET, + } +else: + colors = {} + + +ctx = zmq.Context() +sub = ctx.socket(zmq.SUB) +sub.subscribe(args.topic.encode("utf8")) +sub.connect(args.zmq_pub_url) + +topic_widths: Dict[int, int] = {} + +while True: + try: + if sub.poll(10, zmq.POLLIN): + topic, msg = sub.recv_multipart() + topics = topic.decode('utf8').strip().split('.') + + if args.align: + topics.extend(' ' for extra in range(len(topics), len(topic_widths))) + aligned_parts = [] + for key, part in enumerate(topics): + topic_widths[key] = max(len(part), topic_widths.get(key, 0)) + fmt = ''.join(('{:<', str(topic_widths[key]), '}')) + aligned_parts.append(fmt.format(part)) + + if len(topics) == 1: + level = topics[0] + else: + level = topics[1] + + fields = { + 'msg': msg.decode('utf8').strip(), + 'ts': ( + datetime.now().strftime(args.dateformat) + ' ' + if args.timestamp + else '' + ), + 'aligned': ( + '.'.join(aligned_parts) + if args.align + else topic.decode('utf8').strip() + ), + 'color': colors.get(level, ''), + 'color_rst': colors.get('__RESET__', ''), + 'sep': args.separator, + } + print('{ts}{color}{aligned}{sep}{msg}{color_rst}'.format(**fields)) + except KeyboardInterrupt: + break + +sub.disconnect(args.zmq_pub_url) +if args.color: + print(colorama.Fore.RESET) diff --git a/.venv/lib/python3.11/site-packages/zmq/log/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/log/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43dfe160e5cc76685885d88d94d49a74f6d17b35 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/log/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/log/__pycache__/__main__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/log/__pycache__/__main__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..468dca59c944969b9d4e4367cc9549dec0d9e176 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/log/__pycache__/__main__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/log/__pycache__/handlers.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/log/__pycache__/handlers.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66637d2fef684b60d867de81a9bc978cd4dec4d8 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/log/__pycache__/handlers.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/log/handlers.py b/.venv/lib/python3.11/site-packages/zmq/log/handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..8138d2eed96c676261688f74ba9fe91dbc29d9c0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/log/handlers.py @@ -0,0 +1,232 @@ +"""pyzmq logging handlers. + +This mainly defines the PUBHandler object for publishing logging messages over +a zmq.PUB socket. + +The PUBHandler can be used with the regular logging module, as in:: + + >>> import logging + >>> handler = PUBHandler('tcp://127.0.0.1:12345') + >>> handler.root_topic = 'foo' + >>> logger = logging.getLogger('foobar') + >>> logger.setLevel(logging.DEBUG) + >>> logger.addHandler(handler) + +Or using ``dictConfig``, as in:: + + >>> from logging.config import dictConfig + >>> socket = Context.instance().socket(PUB) + >>> socket.connect('tcp://127.0.0.1:12345') + >>> dictConfig({ + >>> 'version': 1, + >>> 'handlers': { + >>> 'zmq': { + >>> 'class': 'zmq.log.handlers.PUBHandler', + >>> 'level': logging.DEBUG, + >>> 'root_topic': 'foo', + >>> 'interface_or_socket': socket + >>> } + >>> }, + >>> 'root': { + >>> 'level': 'DEBUG', + >>> 'handlers': ['zmq'], + >>> } + >>> }) + + +After this point, all messages logged by ``logger`` will be published on the +PUB socket. + +Code adapted from StarCluster: + + https://github.com/jtriley/StarCluster/blob/StarCluster-0.91/starcluster/logger.py +""" + +from __future__ import annotations + +import logging +from copy import copy + +import zmq + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +TOPIC_DELIM = "::" # delimiter for splitting topics on the receiving end. + + +class PUBHandler(logging.Handler): + """A basic logging handler that emits log messages through a PUB socket. + + Takes a PUB socket already bound to interfaces or an interface to bind to. + + Example:: + + sock = context.socket(zmq.PUB) + sock.bind('inproc://log') + handler = PUBHandler(sock) + + Or:: + + handler = PUBHandler('inproc://loc') + + These are equivalent. + + Log messages handled by this handler are broadcast with ZMQ topics + ``this.root_topic`` comes first, followed by the log level + (DEBUG,INFO,etc.), followed by any additional subtopics specified in the + message by: log.debug("subtopic.subsub::the real message") + """ + + ctx: zmq.Context + socket: zmq.Socket + + def __init__( + self, + interface_or_socket: str | zmq.Socket, + context: zmq.Context | None = None, + root_topic: str = '', + ) -> None: + logging.Handler.__init__(self) + self.root_topic = root_topic + self.formatters = { + logging.DEBUG: logging.Formatter( + "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n" + ), + logging.INFO: logging.Formatter("%(message)s\n"), + logging.WARN: logging.Formatter( + "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n" + ), + logging.ERROR: logging.Formatter( + "%(levelname)s %(filename)s:%(lineno)d - %(message)s - %(exc_info)s\n" + ), + logging.CRITICAL: logging.Formatter( + "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n" + ), + } + if isinstance(interface_or_socket, zmq.Socket): + self.socket = interface_or_socket + self.ctx = self.socket.context + else: + self.ctx = context or zmq.Context() + self.socket = self.ctx.socket(zmq.PUB) + self.socket.bind(interface_or_socket) + + @property + def root_topic(self) -> str: + return self._root_topic + + @root_topic.setter + def root_topic(self, value: str): + self.setRootTopic(value) + + def setRootTopic(self, root_topic: str): + """Set the root topic for this handler. + + This value is prepended to all messages published by this handler, and it + defaults to the empty string ''. When you subscribe to this socket, you must + set your subscription to an empty string, or to at least the first letter of + the binary representation of this string to ensure you receive any messages + from this handler. + + If you use the default empty string root topic, messages will begin with + the binary representation of the log level string (INFO, WARN, etc.). + Note that ZMQ SUB sockets can have multiple subscriptions. + """ + if isinstance(root_topic, bytes): + root_topic = root_topic.decode("utf8") + self._root_topic = root_topic + + def setFormatter(self, fmt, level=logging.NOTSET): + """Set the Formatter for this handler. + + If no level is provided, the same format is used for all levels. This + will overwrite all selective formatters set in the object constructor. + """ + if level == logging.NOTSET: + for fmt_level in self.formatters.keys(): + self.formatters[fmt_level] = fmt + else: + self.formatters[level] = fmt + + def format(self, record): + """Format a record.""" + return self.formatters[record.levelno].format(record) + + def emit(self, record): + """Emit a log message on my socket.""" + + # LogRecord.getMessage explicitly allows msg to be anything _castable_ to a str + try: + topic, msg = str(record.msg).split(TOPIC_DELIM, 1) + except ValueError: + topic = "" + else: + # copy to avoid mutating LogRecord in-place + record = copy(record) + record.msg = msg + + try: + bmsg = self.format(record).encode("utf8") + except Exception: + self.handleError(record) + return + + topic_list = [] + + if self.root_topic: + topic_list.append(self.root_topic) + + topic_list.append(record.levelname) + + if topic: + topic_list.append(topic) + + btopic = '.'.join(topic_list).encode("utf8", "replace") + + self.socket.send_multipart([btopic, bmsg]) + + +class TopicLogger(logging.Logger): + """A simple wrapper that takes an additional argument to log methods. + + All the regular methods exist, but instead of one msg argument, two + arguments: topic, msg are passed. + + That is:: + + logger.debug('msg') + + Would become:: + + logger.debug('topic.sub', 'msg') + """ + + def log(self, level, topic, msg, *args, **kwargs): + """Log 'msg % args' with level and topic. + + To pass exception information, use the keyword argument exc_info + with a True value:: + + logger.log(level, "zmq.fun", "We have a %s", + "mysterious problem", exc_info=1) + """ + logging.Logger.log(self, level, f'{topic}{TOPIC_DELIM}{msg}', *args, **kwargs) + + +# Generate the methods of TopicLogger, since they are just adding a +# topic prefix to a message. +for name in "debug warn warning error critical fatal".split(): + try: + meth = getattr(logging.Logger, name) + except AttributeError: + # some methods are missing, e.g. Logger.warn was removed from Python 3.13 + continue + setattr( + TopicLogger, + name, + lambda self, level, topic, msg, *args, **kwargs: meth( + self, level, topic + TOPIC_DELIM + msg, *args, **kwargs + ), + ) diff --git a/.venv/lib/python3.11/site-packages/zmq/ssh/__init__.py b/.venv/lib/python3.11/site-packages/zmq/ssh/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..57f09568223c48babf9c9d7745218f7e33290eaa --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/ssh/__init__.py @@ -0,0 +1 @@ +from zmq.ssh.tunnel import * diff --git a/.venv/lib/python3.11/site-packages/zmq/ssh/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/ssh/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c696a34664596c446ad65a63139aca6b54a402f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/ssh/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/ssh/__pycache__/forward.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/ssh/__pycache__/forward.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..412fa1d4b7a39641461b93c8d730d7a998468096 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/ssh/__pycache__/forward.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/ssh/__pycache__/tunnel.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/ssh/__pycache__/tunnel.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abcbd735997c9b70be64bdeef5a7de256782d803 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/ssh/__pycache__/tunnel.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/ssh/forward.py b/.venv/lib/python3.11/site-packages/zmq/ssh/forward.py new file mode 100644 index 0000000000000000000000000000000000000000..2115137ae847e3c5cfaf059a7b11c3cfadca1313 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/ssh/forward.py @@ -0,0 +1,96 @@ +# +# This file is adapted from a paramiko demo, and thus licensed under LGPL 2.1. +# Original Copyright (C) 2003-2007 Robey Pointer +# Edits Copyright (C) 2010 The IPython Team +# +# Paramiko is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR +# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with Paramiko; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA. + +""" +Sample script showing how to do local port forwarding over paramiko. + +This script connects to the requested SSH server and sets up local port +forwarding (the openssh -L option) from a local port through a tunneled +connection to a destination reachable from the SSH server machine. +""" + +import logging +import select +import socketserver + +logger = logging.getLogger('ssh') + + +class ForwardServer(socketserver.ThreadingTCPServer): + daemon_threads = True + allow_reuse_address = True + + +class Handler(socketserver.BaseRequestHandler): + def handle(self): + try: + chan = self.ssh_transport.open_channel( + 'direct-tcpip', + (self.chain_host, self.chain_port), + self.request.getpeername(), + ) + except Exception as e: + logger.debug( + 'Incoming request to %s:%d failed: %r', + self.chain_host, + self.chain_port, + e, + ) + return + if chan is None: + logger.debug( + 'Incoming request to %s:%d was rejected by the SSH server.', + self.chain_host, + self.chain_port, + ) + return + + logger.debug( + f'Connected! Tunnel open {self.request.getpeername()!r} -> {chan.getpeername()!r} -> {(self.chain_host, self.chain_port)!r}' + ) + while True: + r, w, x = select.select([self.request, chan], [], []) + if self.request in r: + data = self.request.recv(1024) + if len(data) == 0: + break + chan.send(data) + if chan in r: + data = chan.recv(1024) + if len(data) == 0: + break + self.request.send(data) + chan.close() + self.request.close() + logger.debug('Tunnel closed ') + + +def forward_tunnel(local_port, remote_host, remote_port, transport): + # this is a little convoluted, but lets me configure things for the Handler + # object. (SocketServer doesn't give Handlers any way to access the outer + # server normally.) + class SubHander(Handler): + chain_host = remote_host + chain_port = remote_port + ssh_transport = transport + + ForwardServer(('127.0.0.1', local_port), SubHander).serve_forever() + + +__all__ = ['forward_tunnel'] diff --git a/.venv/lib/python3.11/site-packages/zmq/ssh/tunnel.py b/.venv/lib/python3.11/site-packages/zmq/ssh/tunnel.py new file mode 100644 index 0000000000000000000000000000000000000000..0e9c88e8a1acd88ad3aca4495d9632591cb7d336 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/ssh/tunnel.py @@ -0,0 +1,430 @@ +"""Basic ssh tunnel utilities, and convenience functions for tunneling +zeromq connections. +""" + +# Copyright (C) 2010-2011 IPython Development Team +# Copyright (C) 2011- PyZMQ Developers +# +# Redistributed from IPython under the terms of the BSD License. + +import atexit +import os +import re +import signal +import socket +import sys +import warnings +from getpass import getpass, getuser +from multiprocessing import Process + +try: + with warnings.catch_warnings(): + warnings.simplefilter('ignore', DeprecationWarning) + import paramiko + + SSHException = paramiko.ssh_exception.SSHException +except ImportError: + paramiko = None # type: ignore + + class SSHException(Exception): # type: ignore + pass + +else: + from .forward import forward_tunnel + +try: + import pexpect +except ImportError: + pexpect = None + + +class MaxRetryExceeded(Exception): + pass + + +def select_random_ports(n): + """Select and return n random ports that are available.""" + ports = [] + sockets = [] + for i in range(n): + sock = socket.socket() + sock.bind(('', 0)) + ports.append(sock.getsockname()[1]) + sockets.append(sock) + for sock in sockets: + sock.close() + return ports + + +# ----------------------------------------------------------------------------- +# Check for passwordless login +# ----------------------------------------------------------------------------- +_password_pat = re.compile(rb'pass(word|phrase)', re.IGNORECASE) + + +def try_passwordless_ssh(server, keyfile, paramiko=None): + """Attempt to make an ssh connection without a password. + This is mainly used for requiring password input only once + when many tunnels may be connected to the same server. + + If paramiko is None, the default for the platform is chosen. + """ + if paramiko is None: + paramiko = sys.platform == 'win32' + if not paramiko: + f = _try_passwordless_openssh + else: + f = _try_passwordless_paramiko + return f(server, keyfile) + + +def _try_passwordless_openssh(server, keyfile): + """Try passwordless login with shell ssh command.""" + if pexpect is None: + raise ImportError("pexpect unavailable, use paramiko") + cmd = 'ssh -f ' + server + if keyfile: + cmd += ' -i ' + keyfile + cmd += ' exit' + + # pop SSH_ASKPASS from env + env = os.environ.copy() + env.pop('SSH_ASKPASS', None) + + ssh_newkey = 'Are you sure you want to continue connecting' + p = pexpect.spawn(cmd, env=env) + + MAX_RETRY = 10 + + for _ in range(MAX_RETRY): + try: + i = p.expect([ssh_newkey, _password_pat], timeout=0.1) + if i == 0: + raise SSHException( + 'The authenticity of the host can\'t be established.' + ) + except pexpect.TIMEOUT: + continue + except pexpect.EOF: + return True + else: + return False + + raise MaxRetryExceeded(f"Failed after {MAX_RETRY} attempts") + + +def _try_passwordless_paramiko(server, keyfile): + """Try passwordless login with paramiko.""" + if paramiko is None: + msg = "Paramiko unavailable, " + if sys.platform == 'win32': + msg += "Paramiko is required for ssh tunneled connections on Windows." + else: + msg += "use OpenSSH." + raise ImportError(msg) + username, server, port = _split_server(server) + client = paramiko.SSHClient() + known_hosts = os.path.expanduser("~/.ssh/known_hosts") + try: + client.load_host_keys(known_hosts) + except FileNotFoundError: + pass + + policy_name = os.environ.get("PYZMQ_PARAMIKO_HOST_KEY_POLICY", None) + if policy_name: + policy = getattr(paramiko, f"{policy_name}Policy") + client.set_missing_host_key_policy(policy()) + try: + client.connect( + server, port, username=username, key_filename=keyfile, look_for_keys=True + ) + except paramiko.AuthenticationException: + return False + else: + client.close() + return True + + +def tunnel_connection( + socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60 +): + """Connect a socket to an address via an ssh tunnel. + + This is a wrapper for socket.connect(addr), when addr is not accessible + from the local machine. It simply creates an ssh tunnel using the remaining args, + and calls socket.connect('tcp://localhost:lport') where lport is the randomly + selected local port of the tunnel. + + """ + new_url, tunnel = open_tunnel( + addr, + server, + keyfile=keyfile, + password=password, + paramiko=paramiko, + timeout=timeout, + ) + socket.connect(new_url) + return tunnel + + +def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60): + """Open a tunneled connection from a 0MQ url. + + For use inside tunnel_connection. + + Returns + ------- + + (url, tunnel) : (str, object) + The 0MQ url that has been forwarded, and the tunnel object + """ + + lport = select_random_ports(1)[0] + transport, addr = addr.split('://') + ip, rport = addr.split(':') + rport = int(rport) + if paramiko is None: + paramiko = sys.platform == 'win32' + if paramiko: + tunnelf = paramiko_tunnel + else: + tunnelf = openssh_tunnel + + tunnel = tunnelf( + lport, + rport, + server, + remoteip=ip, + keyfile=keyfile, + password=password, + timeout=timeout, + ) + return f'tcp://127.0.0.1:{lport}', tunnel + + +def openssh_tunnel( + lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60 +): + """Create an ssh tunnel using command-line ssh that connects port lport + on this machine to localhost:rport on server. The tunnel + will automatically close when not in use, remaining open + for a minimum of timeout seconds for an initial connection. + + This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`, + as seen from `server`. + + keyfile and password may be specified, but ssh config is checked for defaults. + + Parameters + ---------- + + lport : int + local port for connecting to the tunnel from this machine. + rport : int + port on the remote machine to connect to. + server : str + The ssh server to connect to. The full ssh server string will be parsed. + user@server:port + remoteip : str [Default: 127.0.0.1] + The remote ip, specifying the destination of the tunnel. + Default is localhost, which means that the tunnel would redirect + localhost:lport on this machine to localhost:rport on the *server*. + + keyfile : str; path to private key file + This specifies a key to be used in ssh login, default None. + Regular default ssh keys will be used without specifying this argument. + password : str; + Your ssh password to the ssh server. Note that if this is left None, + you will be prompted for it if passwordless key based login is unavailable. + timeout : int [default: 60] + The time (in seconds) after which no activity will result in the tunnel + closing. This prevents orphaned tunnels from running forever. + """ + if pexpect is None: + raise ImportError("pexpect unavailable, use paramiko_tunnel") + ssh = "ssh " + if keyfile: + ssh += "-i " + keyfile + + if ':' in server: + server, port = server.split(':') + ssh += f" -p {port}" + + cmd = f"{ssh} -O check {server}" + (output, exitstatus) = pexpect.run(cmd, withexitstatus=True) + if not exitstatus: + pid = int(output[output.find(b"(pid=") + 5 : output.find(b")")]) + cmd = f"{ssh} -O forward -L 127.0.0.1:{lport}:{remoteip}:{rport} {server}" + (output, exitstatus) = pexpect.run(cmd, withexitstatus=True) + if not exitstatus: + atexit.register(_stop_tunnel, cmd.replace("-O forward", "-O cancel", 1)) + return pid + cmd = f"{ssh} -f -S none -L 127.0.0.1:{lport}:{remoteip}:{rport} {server} sleep {timeout}" + + # pop SSH_ASKPASS from env + env = os.environ.copy() + env.pop('SSH_ASKPASS', None) + + ssh_newkey = 'Are you sure you want to continue connecting' + tunnel = pexpect.spawn(cmd, env=env) + failed = False + MAX_RETRY = 10 + for _ in range(MAX_RETRY): + try: + i = tunnel.expect([ssh_newkey, _password_pat], timeout=0.1) + if i == 0: + raise SSHException( + 'The authenticity of the host can\'t be established.' + ) + except pexpect.TIMEOUT: + continue + except pexpect.EOF: + if tunnel.exitstatus: + print(tunnel.exitstatus) + print(tunnel.before) + print(tunnel.after) + raise RuntimeError(f"tunnel '{cmd}' failed to start") + else: + return tunnel.pid + else: + if failed: + print("Password rejected, try again") + password = None + if password is None: + password = getpass(f"{server}'s password: ") + tunnel.sendline(password) + failed = True + raise MaxRetryExceeded(f"Failed after {MAX_RETRY} attempts") + + +def _stop_tunnel(cmd): + pexpect.run(cmd) + + +def _split_server(server): + if '@' in server: + username, server = server.split('@', 1) + else: + username = getuser() + if ':' in server: + server, port = server.split(':') + port = int(port) + else: + port = 22 + return username, server, port + + +def paramiko_tunnel( + lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60 +): + """launch a tunner with paramiko in a subprocess. This should only be used + when shell ssh is unavailable (e.g. Windows). + + This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`, + as seen from `server`. + + If you are familiar with ssh tunnels, this creates the tunnel: + + ssh server -L localhost:lport:remoteip:rport + + keyfile and password may be specified, but ssh config is checked for defaults. + + + Parameters + ---------- + + lport : int + local port for connecting to the tunnel from this machine. + rport : int + port on the remote machine to connect to. + server : str + The ssh server to connect to. The full ssh server string will be parsed. + user@server:port + remoteip : str [Default: 127.0.0.1] + The remote ip, specifying the destination of the tunnel. + Default is localhost, which means that the tunnel would redirect + localhost:lport on this machine to localhost:rport on the *server*. + + keyfile : str; path to private key file + This specifies a key to be used in ssh login, default None. + Regular default ssh keys will be used without specifying this argument. + password : str; + Your ssh password to the ssh server. Note that if this is left None, + you will be prompted for it if passwordless key based login is unavailable. + timeout : int [default: 60] + The time (in seconds) after which no activity will result in the tunnel + closing. This prevents orphaned tunnels from running forever. + + """ + if paramiko is None: + raise ImportError("Paramiko not available") + + if password is None: + if not _try_passwordless_paramiko(server, keyfile): + password = getpass(f"{server}'s password: ") + + p = Process( + target=_paramiko_tunnel, + args=(lport, rport, server, remoteip), + kwargs=dict(keyfile=keyfile, password=password), + ) + p.daemon = True + p.start() + return p + + +def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None): + """Function for actually starting a paramiko tunnel, to be passed + to multiprocessing.Process(target=this), and not called directly. + """ + username, server, port = _split_server(server) + client = paramiko.SSHClient() + client.load_system_host_keys() + client.set_missing_host_key_policy(paramiko.WarningPolicy()) + + try: + client.connect( + server, + port, + username=username, + key_filename=keyfile, + look_for_keys=True, + password=password, + ) + # except paramiko.AuthenticationException: + # if password is None: + # password = getpass("%s@%s's password: "%(username, server)) + # client.connect(server, port, username=username, password=password) + # else: + # raise + except Exception as e: + print(f'*** Failed to connect to {server}:{port}: {e!r}') + sys.exit(1) + + # Don't let SIGINT kill the tunnel subprocess + signal.signal(signal.SIGINT, signal.SIG_IGN) + + try: + forward_tunnel(lport, remoteip, rport, client.get_transport()) + except KeyboardInterrupt: + print('SIGINT: Port forwarding stopped cleanly') + sys.exit(0) + except Exception as e: + print(f"Port forwarding stopped uncleanly: {e}") + sys.exit(255) + + +if sys.platform == 'win32': + ssh_tunnel = paramiko_tunnel +else: + ssh_tunnel = openssh_tunnel + + +__all__ = [ + 'tunnel_connection', + 'ssh_tunnel', + 'openssh_tunnel', + 'paramiko_tunnel', + 'try_passwordless_ssh', +] diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/__init__.py b/.venv/lib/python3.11/site-packages/zmq/sugar/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a4b8cf3324ed7d02a6320d5dc07f79f1e98cf09e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/sugar/__init__.py @@ -0,0 +1,24 @@ +"""pure-Python sugar wrappers for core 0MQ objects.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from zmq import error +from zmq.sugar import context, frame, poll, socket, tracker, version + +__all__ = [] +for submod in (context, error, frame, poll, socket, tracker, version): + __all__.extend(submod.__all__) + +from zmq.error import * # noqa +from zmq.sugar.context import * # noqa +from zmq.sugar.frame import * # noqa +from zmq.sugar.poll import * # noqa +from zmq.sugar.socket import * # noqa + +# deprecated: +from zmq.sugar.stopwatch import Stopwatch # noqa +from zmq.sugar.tracker import * # noqa +from zmq.sugar.version import * # noqa + +__all__.append('Stopwatch') diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/__init__.pyi b/.venv/lib/python3.11/site-packages/zmq/sugar/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..732f605017a063e9b66403921623a468f1ea0abe --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/sugar/__init__.pyi @@ -0,0 +1,10 @@ +from zmq.error import * + +from . import constants as constants +from .constants import * +from .context import * +from .frame import * +from .poll import * +from .socket import * +from .tracker import * +from .version import * diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9feeb30198ff2558b739aadd6f9fb663a1d0ae73 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/attrsettr.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/attrsettr.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..feec2a032820462ff4f2f020bad4f2e55b565451 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/attrsettr.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/context.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/context.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d2cc1c7b6ee98270144294687be7c20628bb066 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/context.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/frame.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/frame.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5b35435504ea50992c6944209cf4a638fcdacb8 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/frame.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/poll.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/poll.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a717b2f05be721e1fa46fe4da72691c1abcafdf3 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/poll.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/socket.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/socket.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44af21732c527c5e7894f978b5d04f38017ba6fc Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/socket.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/stopwatch.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/stopwatch.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d15ca96ecb9ad9c6b3d242cbb9f69a6b4c8282ca Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/stopwatch.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/tracker.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/tracker.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17bae0f891a79c44e918c26011168c036de9d390 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/tracker.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/version.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/version.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34e65deaa5714a09151ab914e81bdd179942a345 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/sugar/__pycache__/version.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/attrsettr.py b/.venv/lib/python3.11/site-packages/zmq/sugar/attrsettr.py new file mode 100644 index 0000000000000000000000000000000000000000..844fce606e53def45930d2e3a19873b4e8b4aeac --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/sugar/attrsettr.py @@ -0,0 +1,79 @@ +"""Mixin for mapping set/getattr to self.set/get""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. +from __future__ import annotations + +import errno +from typing import TypeVar, Union + +from .. import constants + +T = TypeVar("T") +OptValT = Union[str, bytes, int] + + +class AttributeSetter: + def __setattr__(self, key: str, value: OptValT) -> None: + """set zmq options by attribute""" + + if key in self.__dict__: + object.__setattr__(self, key, value) + return + # regular setattr only allowed for class-defined attributes + for cls in self.__class__.mro(): + if key in cls.__dict__ or key in getattr(cls, "__annotations__", {}): + object.__setattr__(self, key, value) + return + + upper_key = key.upper() + try: + opt = getattr(constants, upper_key) + except AttributeError: + raise AttributeError( + f"{self.__class__.__name__} has no such option: {upper_key}" + ) + else: + self._set_attr_opt(upper_key, opt, value) + + def _set_attr_opt(self, name: str, opt: int, value: OptValT) -> None: + """override if setattr should do something other than call self.set""" + self.set(opt, value) + + def __getattr__(self, key: str) -> OptValT: + """get zmq options by attribute""" + upper_key = key.upper() + try: + opt = getattr(constants, upper_key) + except AttributeError: + raise AttributeError( + f"{self.__class__.__name__} has no such option: {upper_key}" + ) from None + else: + from zmq import ZMQError + + try: + return self._get_attr_opt(upper_key, opt) + except ZMQError as e: + # EINVAL will be raised on access for write-only attributes. + # Turn that into an AttributeError + # necessary for mocking + if e.errno in {errno.EINVAL, errno.EFAULT}: + raise AttributeError(f"{key} attribute is write-only") + else: + raise + + def _get_attr_opt(self, name, opt) -> OptValT: + """override if getattr should do something other than call self.get""" + return self.get(opt) + + def get(self, opt: int) -> OptValT: + """Override in subclass""" + raise NotImplementedError("override in subclass") + + def set(self, opt: int, val: OptValT) -> None: + """Override in subclass""" + raise NotImplementedError("override in subclass") + + +__all__ = ['AttributeSetter'] diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/context.py b/.venv/lib/python3.11/site-packages/zmq/sugar/context.py new file mode 100644 index 0000000000000000000000000000000000000000..a83e4cc881e532bef2dee01edacfb34fac3212eb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/sugar/context.py @@ -0,0 +1,422 @@ +"""Python bindings for 0MQ.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from __future__ import annotations + +import atexit +import os +from threading import Lock +from typing import Any, Callable, Generic, TypeVar, overload +from warnings import warn +from weakref import WeakSet + +import zmq +from zmq._typing import TypeAlias +from zmq.backend import Context as ContextBase +from zmq.constants import ContextOption, Errno, SocketOption +from zmq.error import ZMQError +from zmq.utils.interop import cast_int_addr + +from .attrsettr import AttributeSetter, OptValT +from .socket import Socket, SyncSocket + +# notice when exiting, to avoid triggering term on exit +_exiting = False + + +def _notice_atexit() -> None: + global _exiting + _exiting = True + + +atexit.register(_notice_atexit) + +_ContextType = TypeVar('_ContextType', bound='Context') +_SocketType = TypeVar('_SocketType', bound='Socket', covariant=True) + + +class Context(ContextBase, AttributeSetter, Generic[_SocketType]): + """Create a zmq Context + + A zmq Context creates sockets via its ``ctx.socket`` method. + + .. versionchanged:: 24 + + When using a Context as a context manager (``with zmq.Context()``), + or deleting a context without closing it first, + ``ctx.destroy()`` is called, + closing any leftover sockets, + instead of `ctx.term()` which requires sockets to be closed first. + + This prevents hangs caused by `ctx.term()` if sockets are left open, + but means that unclean destruction of contexts + (with sockets left open) is not safe + if sockets are managed in other threads. + + .. versionadded:: 25 + + Contexts can now be shadowed by passing another Context. + This helps in creating an async copy of a sync context or vice versa:: + + ctx = zmq.Context(async_ctx) + + Which previously had to be:: + + ctx = zmq.Context.shadow(async_ctx.underlying) + """ + + sockopts: dict[int, Any] + _instance: Any = None + _instance_lock = Lock() + _instance_pid: int | None = None + _shadow = False + _shadow_obj = None + _warn_destroy_close = False + _sockets: WeakSet + # mypy doesn't like a default value here + _socket_class: type[_SocketType] = Socket # type: ignore + + @overload + def __init__(self: SyncContext, io_threads: int = 1): ... + + @overload + def __init__(self: SyncContext, io_threads: Context): + # this should be positional-only, but that requires 3.8 + ... + + @overload + def __init__(self: SyncContext, *, shadow: Context | int): ... + + def __init__( + self: SyncContext, + io_threads: int | Context = 1, + shadow: Context | int = 0, + ) -> None: + if isinstance(io_threads, Context): + # allow positional shadow `zmq.Context(zmq.asyncio.Context())` + # this s + shadow = io_threads + io_threads = 1 + + shadow_address: int = 0 + if shadow: + self._shadow = True + # hold a reference to the shadow object + self._shadow_obj = shadow + if not isinstance(shadow, int): + try: + shadow = shadow.underlying + except AttributeError: + pass + shadow_address = cast_int_addr(shadow) + else: + self._shadow = False + super().__init__(io_threads=io_threads, shadow=shadow_address) + self.sockopts = {} + self._sockets = WeakSet() + + def __del__(self) -> None: + """Deleting a Context without closing it destroys it and all sockets. + + .. versionchanged:: 24 + Switch from threadsafe `term()` which hangs in the event of open sockets + to less safe `destroy()` which + warns about any leftover sockets and closes them. + """ + + # Calling locals() here conceals issue #1167 on Windows CPython 3.5.4. + locals() + + if not self._shadow and not _exiting and not self.closed: + self._warn_destroy_close = True + if warn is not None and getattr(self, "_sockets", None) is not None: + # warn can be None during process teardown + warn( + f"Unclosed context {self}", + ResourceWarning, + stacklevel=2, + source=self, + ) + self.destroy() + + _repr_cls = "zmq.Context" + + def __repr__(self) -> str: + cls = self.__class__ + # look up _repr_cls on exact class, not inherited + _repr_cls = cls.__dict__.get("_repr_cls", None) + if _repr_cls is None: + _repr_cls = f"{cls.__module__}.{cls.__name__}" + + closed = ' closed' if self.closed else '' + if getattr(self, "_sockets", None): + n_sockets = len(self._sockets) + s = 's' if n_sockets > 1 else '' + sockets = f"{n_sockets} socket{s}" + else: + sockets = "" + return f"<{_repr_cls}({sockets}) at {hex(id(self))}{closed}>" + + def __enter__(self: _ContextType) -> _ContextType: + return self + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + # warn about any leftover sockets before closing them + self._warn_destroy_close = True + self.destroy() + + def __copy__(self: _ContextType, memo: Any = None) -> _ContextType: + """Copying a Context creates a shadow copy""" + return self.__class__.shadow(self.underlying) + + __deepcopy__ = __copy__ + + @classmethod + def shadow(cls: type[_ContextType], address: int | zmq.Context) -> _ContextType: + """Shadow an existing libzmq context + + address is a zmq.Context or an integer (or FFI pointer) + representing the address of the libzmq context. + + .. versionadded:: 14.1 + + .. versionadded:: 25 + Support for shadowing `zmq.Context` objects, + instead of just integer addresses. + """ + return cls(shadow=address) + + @classmethod + def shadow_pyczmq(cls: type[_ContextType], ctx: Any) -> _ContextType: + """Shadow an existing pyczmq context + + ctx is the FFI `zctx_t *` pointer + + .. versionadded:: 14.1 + """ + from pyczmq import zctx # type: ignore + + from zmq.utils.interop import cast_int_addr + + underlying = zctx.underlying(ctx) + address = cast_int_addr(underlying) + return cls(shadow=address) + + # static method copied from tornado IOLoop.instance + @classmethod + def instance(cls: type[_ContextType], io_threads: int = 1) -> _ContextType: + """Returns a global Context instance. + + Most single-process applications have a single, global Context. + Use this method instead of passing around Context instances + throughout your code. + + A common pattern for classes that depend on Contexts is to use + a default argument to enable programs with multiple Contexts + but not require the argument for simpler applications:: + + class MyClass(object): + def __init__(self, context=None): + self.context = context or Context.instance() + + .. versionchanged:: 18.1 + + When called in a subprocess after forking, + a new global instance is created instead of inheriting + a Context that won't work from the parent process. + """ + if ( + cls._instance is None + or cls._instance_pid != os.getpid() + or cls._instance.closed + ): + with cls._instance_lock: + if ( + cls._instance is None + or cls._instance_pid != os.getpid() + or cls._instance.closed + ): + cls._instance = cls(io_threads=io_threads) + cls._instance_pid = os.getpid() + return cls._instance + + def term(self) -> None: + """Close or terminate the context. + + Context termination is performed in the following steps: + + - Any blocking operations currently in progress on sockets open within context shall + raise :class:`zmq.ContextTerminated`. + With the exception of socket.close(), any further operations on sockets open within this context + shall raise :class:`zmq.ContextTerminated`. + - After interrupting all blocking calls, term shall block until the following conditions are satisfied: + - All sockets open within context have been closed. + - For each socket within context, all messages sent on the socket have either been + physically transferred to a network peer, + or the socket's linger period set with the zmq.LINGER socket option has expired. + + For further details regarding socket linger behaviour refer to libzmq documentation for ZMQ_LINGER. + + This can be called to close the context by hand. If this is not called, + the context will automatically be closed when it is garbage collected, + in which case you may see a ResourceWarning about the unclosed context. + """ + super().term() + + # ------------------------------------------------------------------------- + # Hooks for ctxopt completion + # ------------------------------------------------------------------------- + + def __dir__(self) -> list[str]: + keys = dir(self.__class__) + keys.extend(ContextOption.__members__) + return keys + + # ------------------------------------------------------------------------- + # Creating Sockets + # ------------------------------------------------------------------------- + + def _add_socket(self, socket: Any) -> None: + """Add a weakref to a socket for Context.destroy / reference counting""" + self._sockets.add(socket) + + def _rm_socket(self, socket: Any) -> None: + """Remove a socket for Context.destroy / reference counting""" + # allow _sockets to be None in case of process teardown + if getattr(self, "_sockets", None) is not None: + self._sockets.discard(socket) + + def destroy(self, linger: int | None = None) -> None: + """Close all sockets associated with this context and then terminate + the context. + + .. warning:: + + destroy involves calling :meth:`Socket.close`, which is **NOT** threadsafe. + If there are active sockets in other threads, this must not be called. + + Parameters + ---------- + + linger : int, optional + If specified, set LINGER on sockets prior to closing them. + """ + if self.closed: + return + + sockets: list[_SocketType] = list(getattr(self, "_sockets", None) or []) + for s in sockets: + if s and not s.closed: + if self._warn_destroy_close and warn is not None: + # warn can be None during process teardown + warn( + f"Destroying context with unclosed socket {s}", + ResourceWarning, + stacklevel=3, + source=s, + ) + if linger is not None: + s.setsockopt(SocketOption.LINGER, linger) + s.close() + + self.term() + + def socket( + self: _ContextType, + socket_type: int, + socket_class: Callable[[_ContextType, int], _SocketType] | None = None, + **kwargs: Any, + ) -> _SocketType: + """Create a Socket associated with this Context. + + Parameters + ---------- + socket_type : int + The socket type, which can be any of the 0MQ socket types: + REQ, REP, PUB, SUB, PAIR, DEALER, ROUTER, PULL, PUSH, etc. + + socket_class: zmq.Socket + The socket class to instantiate, if different from the default for this Context. + e.g. for creating an asyncio socket attached to a default Context or vice versa. + + .. versionadded:: 25 + + kwargs: + will be passed to the __init__ method of the socket class. + """ + if self.closed: + raise ZMQError(Errno.ENOTSUP) + if socket_class is None: + socket_class = self._socket_class + s: _SocketType = ( + socket_class( # set PYTHONTRACEMALLOC=2 to get the calling frame + self, socket_type, **kwargs + ) + ) + for opt, value in self.sockopts.items(): + try: + s.setsockopt(opt, value) + except ZMQError: + # ignore ZMQErrors, which are likely for socket options + # that do not apply to a particular socket type, e.g. + # SUBSCRIBE for non-SUB sockets. + pass + self._add_socket(s) + return s + + def setsockopt(self, opt: int, value: Any) -> None: + """set default socket options for new sockets created by this Context + + .. versionadded:: 13.0 + """ + self.sockopts[opt] = value + + def getsockopt(self, opt: int) -> OptValT: + """get default socket options for new sockets created by this Context + + .. versionadded:: 13.0 + """ + return self.sockopts[opt] + + def _set_attr_opt(self, name: str, opt: int, value: OptValT) -> None: + """set default sockopts as attributes""" + if name in ContextOption.__members__: + return self.set(opt, value) + elif name in SocketOption.__members__: + self.sockopts[opt] = value + else: + raise AttributeError(f"No such context or socket option: {name}") + + def _get_attr_opt(self, name: str, opt: int) -> OptValT: + """get default sockopts as attributes""" + if name in ContextOption.__members__: + return self.get(opt) + else: + if opt not in self.sockopts: + raise AttributeError(name) + else: + return self.sockopts[opt] + + def __delattr__(self, key: str) -> None: + """delete default sockopts as attributes""" + if key in self.__dict__: + self.__dict__.pop(key) + return + key = key.upper() + try: + opt = getattr(SocketOption, key) + except AttributeError: + raise AttributeError(f"No such socket option: {key!r}") + else: + if opt not in self.sockopts: + raise AttributeError(key) + else: + del self.sockopts[opt] + + +SyncContext: TypeAlias = Context[SyncSocket] + + +__all__ = ['Context', 'SyncContext'] diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/frame.py b/.venv/lib/python3.11/site-packages/zmq/sugar/frame.py new file mode 100644 index 0000000000000000000000000000000000000000..39587433c727aee1c2f67b8e0189b6ca8b2b51c9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/sugar/frame.py @@ -0,0 +1,134 @@ +"""0MQ Frame pure Python methods.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import zmq +from zmq.backend import Frame as FrameBase + +from .attrsettr import AttributeSetter + + +def _draft(v, feature): + zmq.error._check_version(v, feature) + if not zmq.DRAFT_API: + raise RuntimeError( + f"libzmq and pyzmq must be built with draft support for {feature}" + ) + + +class Frame(FrameBase, AttributeSetter): + """ + A zmq message Frame class for non-copying send/recvs and access to message properties. + + A ``zmq.Frame`` wraps an underlying ``zmq_msg_t``. + + Message *properties* can be accessed by treating a Frame like a dictionary (``frame["User-Id"]``). + + .. versionadded:: 14.4, libzmq 4 + + Frames created by ``recv(copy=False)`` can be used to access message properties and attributes, + such as the CURVE User-Id. + + For example:: + + frames = socket.recv_multipart(copy=False) + user_id = frames[0]["User-Id"] + + This class is used if you want to do non-copying send and recvs. + When you pass a chunk of bytes to this class, e.g. ``Frame(buf)``, the + ref-count of `buf` is increased by two: once because the Frame saves `buf` as + an instance attribute and another because a ZMQ message is created that + points to the buffer of `buf`. This second ref-count increase makes sure + that `buf` lives until all messages that use it have been sent. + Once 0MQ sends all the messages and it doesn't need the buffer of ``buf``, + 0MQ will call ``Py_DECREF(s)``. + + Parameters + ---------- + + data : object, optional + any object that provides the buffer interface will be used to + construct the 0MQ message data. + track : bool + whether a MessageTracker_ should be created to track this object. + Tracking a message has a cost at creation, because it creates a threadsafe + Event object. + copy : bool + default: use copy_threshold + Whether to create a copy of the data to pass to libzmq + or share the memory with libzmq. + If unspecified, copy_threshold is used. + copy_threshold: int + default: :const:`zmq.COPY_THRESHOLD` + If copy is unspecified, messages smaller than this many bytes + will be copied and messages larger than this will be shared with libzmq. + """ + + def __getitem__(self, key): + # map Frame['User-Id'] to Frame.get('User-Id') + return self.get(key) + + def __repr__(self): + """Return the str form of the message.""" + nbytes = len(self) + msg_suffix = "" + if nbytes > 16: + msg_bytes = bytes(memoryview(self)[:12]) + if nbytes >= 1e9: + unit = "GB" + n = nbytes // 1e9 + elif nbytes >= 2**20: + unit = "MB" + n = nbytes // 1e6 + elif nbytes >= 1e3: + unit = "kB" + n = nbytes // 1e3 + else: + unit = "B" + n = nbytes + msg_suffix = f'...{n:.0f}{unit}' + else: + msg_bytes = self.bytes + + _module = self.__class__.__module__ + if _module == "zmq.sugar.frame": + _module = "zmq" + return f"<{_module}.{self.__class__.__name__}({msg_bytes!r}{msg_suffix})>" + + @property + def group(self): + """The RADIO-DISH group of the message. + + Requires libzmq >= 4.2 and pyzmq built with draft APIs enabled. + + .. versionadded:: 17 + """ + _draft((4, 2), "RADIO-DISH") + return self.get('group') + + @group.setter + def group(self, group): + _draft((4, 2), "RADIO-DISH") + self.set('group', group) + + @property + def routing_id(self): + """The CLIENT-SERVER routing id of the message. + + Requires libzmq >= 4.2 and pyzmq built with draft APIs enabled. + + .. versionadded:: 17 + """ + _draft((4, 2), "CLIENT-SERVER") + return self.get('routing_id') + + @routing_id.setter + def routing_id(self, routing_id): + _draft((4, 2), "CLIENT-SERVER") + self.set('routing_id', routing_id) + + +# keep deprecated alias +Message = Frame +__all__ = ['Frame', 'Message'] diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/poll.py b/.venv/lib/python3.11/site-packages/zmq/sugar/poll.py new file mode 100644 index 0000000000000000000000000000000000000000..27baad46e75626d68a0691b6c89ba0fbd7c353e2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/sugar/poll.py @@ -0,0 +1,172 @@ +"""0MQ polling related functions and classes.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from __future__ import annotations + +from typing import Any + +from zmq.backend import zmq_poll +from zmq.constants import POLLERR, POLLIN, POLLOUT + +# ----------------------------------------------------------------------------- +# Polling related methods +# ----------------------------------------------------------------------------- + + +class Poller: + """A stateful poll interface that mirrors Python's built-in poll.""" + + sockets: list[tuple[Any, int]] + _map: dict + + def __init__(self) -> None: + self.sockets = [] + self._map = {} + + def __contains__(self, socket: Any) -> bool: + return socket in self._map + + def register(self, socket: Any, flags: int = POLLIN | POLLOUT): + """p.register(socket, flags=POLLIN|POLLOUT) + + Register a 0MQ socket or native fd for I/O monitoring. + + register(s,0) is equivalent to unregister(s). + + Parameters + ---------- + socket : zmq.Socket or native socket + A zmq.Socket or any Python object having a ``fileno()`` + method that returns a valid file descriptor. + flags : int + The events to watch for. Can be POLLIN, POLLOUT or POLLIN|POLLOUT. + If `flags=0`, socket will be unregistered. + """ + if flags: + if socket in self._map: + idx = self._map[socket] + self.sockets[idx] = (socket, flags) + else: + idx = len(self.sockets) + self.sockets.append((socket, flags)) + self._map[socket] = idx + elif socket in self._map: + # uregister sockets registered with no events + self.unregister(socket) + else: + # ignore new sockets with no events + pass + + def modify(self, socket, flags=POLLIN | POLLOUT): + """Modify the flags for an already registered 0MQ socket or native fd.""" + self.register(socket, flags) + + def unregister(self, socket: Any): + """Remove a 0MQ socket or native fd for I/O monitoring. + + Parameters + ---------- + socket : Socket + The socket instance to stop polling. + """ + idx = self._map.pop(socket) + self.sockets.pop(idx) + # shift indices after deletion + for socket, flags in self.sockets[idx:]: + self._map[socket] -= 1 + + def poll(self, timeout: int | None = None) -> list[tuple[Any, int]]: + """Poll the registered 0MQ or native fds for I/O. + + If there are currently events ready to be processed, this function will return immediately. + Otherwise, this function will return as soon the first event is available or after timeout + milliseconds have elapsed. + + Parameters + ---------- + timeout : int + The timeout in milliseconds. If None, no `timeout` (infinite). This + is in milliseconds to be compatible with ``select.poll()``. + + Returns + ------- + events : list + The list of events that are ready to be processed. + This is a list of tuples of the form ``(socket, event_mask)``, where the 0MQ Socket + or integer fd is the first element, and the poll event mask (POLLIN, POLLOUT) is the second. + It is common to call ``events = dict(poller.poll())``, + which turns the list of tuples into a mapping of ``socket : event_mask``. + """ + if timeout is None or timeout < 0: + timeout = -1 + elif isinstance(timeout, float): + timeout = int(timeout) + return zmq_poll(self.sockets, timeout=timeout) + + +def select( + rlist: list, wlist: list, xlist: list, timeout: float | None = None +) -> tuple[list, list, list]: + """select(rlist, wlist, xlist, timeout=None) -> (rlist, wlist, xlist) + + Return the result of poll as a lists of sockets ready for r/w/exception. + + This has the same interface as Python's built-in ``select.select()`` function. + + Parameters + ---------- + timeout : float, optional + The timeout in seconds. If None, no timeout (infinite). This is in seconds to be + compatible with ``select.select()``. + rlist : list + sockets/FDs to be polled for read events + wlist : list + sockets/FDs to be polled for write events + xlist : list + sockets/FDs to be polled for error events + + Returns + ------- + rlist: list + list of sockets or FDs that are readable + wlist: list + list of sockets or FDs that are writable + xlist: list + list of sockets or FDs that had error events (rare) + """ + if timeout is None: + timeout = -1 + # Convert from sec -> ms for zmq_poll. + # zmq_poll accepts 3.x style timeout in ms + timeout = int(timeout * 1000.0) + if timeout < 0: + timeout = -1 + sockets = [] + for s in set(rlist + wlist + xlist): + flags = 0 + if s in rlist: + flags |= POLLIN + if s in wlist: + flags |= POLLOUT + if s in xlist: + flags |= POLLERR + sockets.append((s, flags)) + return_sockets = zmq_poll(sockets, timeout) + rlist, wlist, xlist = [], [], [] + for s, flags in return_sockets: + if flags & POLLIN: + rlist.append(s) + if flags & POLLOUT: + wlist.append(s) + if flags & POLLERR: + xlist.append(s) + return rlist, wlist, xlist + + +# ----------------------------------------------------------------------------- +# Symbols to export +# ----------------------------------------------------------------------------- + +__all__ = ['Poller', 'select'] diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/socket.py b/.venv/lib/python3.11/site-packages/zmq/sugar/socket.py new file mode 100644 index 0000000000000000000000000000000000000000..a4a906b531d2272cbaf649afe430048c335d8260 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/sugar/socket.py @@ -0,0 +1,1134 @@ +"""0MQ Socket pure Python methods.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from __future__ import annotations + +import errno +import pickle +import random +import sys +from typing import ( + Any, + Callable, + Generic, + List, + Sequence, + TypeVar, + Union, + cast, + overload, +) +from warnings import warn + +import zmq +from zmq._typing import Literal, TypeAlias +from zmq.backend import Socket as SocketBase +from zmq.error import ZMQBindError, ZMQError +from zmq.utils import jsonapi +from zmq.utils.interop import cast_int_addr + +from ..constants import SocketOption, SocketType, _OptType +from .attrsettr import AttributeSetter +from .poll import Poller + +try: + DEFAULT_PROTOCOL = pickle.DEFAULT_PROTOCOL +except AttributeError: + DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL + +_SocketType = TypeVar("_SocketType", bound="Socket") + + +class _SocketContext(Generic[_SocketType]): + """Context Manager for socket bind/unbind""" + + socket: _SocketType + kind: str + addr: str + + def __repr__(self): + return f"" + + def __init__( + self: _SocketContext[_SocketType], socket: _SocketType, kind: str, addr: str + ): + assert kind in {"bind", "connect"} + self.socket = socket + self.kind = kind + self.addr = addr + + def __enter__(self: _SocketContext[_SocketType]) -> _SocketType: + return self.socket + + def __exit__(self, *args): + if self.socket.closed: + return + if self.kind == "bind": + self.socket.unbind(self.addr) + elif self.kind == "connect": + self.socket.disconnect(self.addr) + + +SocketReturnType = TypeVar("SocketReturnType") + + +class Socket(SocketBase, AttributeSetter, Generic[SocketReturnType]): + """The ZMQ socket object + + To create a Socket, first create a Context:: + + ctx = zmq.Context.instance() + + then call ``ctx.socket(socket_type)``:: + + s = ctx.socket(zmq.ROUTER) + + .. versionadded:: 25 + + Sockets can now be shadowed by passing another Socket. + This helps in creating an async copy of a sync socket or vice versa:: + + s = zmq.Socket(async_socket) + + Which previously had to be:: + + s = zmq.Socket.shadow(async_socket.underlying) + """ + + _shadow = False + _shadow_obj = None + _monitor_socket = None + _type_name = 'UNKNOWN' + + @overload + def __init__( + self: Socket[bytes], + ctx_or_socket: zmq.Context, + socket_type: int, + *, + copy_threshold: int | None = None, + ): ... + + @overload + def __init__( + self: Socket[bytes], + *, + shadow: Socket | int, + copy_threshold: int | None = None, + ): ... + + @overload + def __init__( + self: Socket[bytes], + ctx_or_socket: Socket, + ): ... + + def __init__( + self: Socket[bytes], + ctx_or_socket: zmq.Context | Socket | None = None, + socket_type: int = 0, + *, + shadow: Socket | int = 0, + copy_threshold: int | None = None, + ): + shadow_context: zmq.Context | None = None + if isinstance(ctx_or_socket, zmq.Socket): + # positional Socket(other_socket) + shadow = ctx_or_socket + ctx_or_socket = None + + shadow_address: int = 0 + + if shadow: + self._shadow = True + # hold a reference to the shadow object + self._shadow_obj = shadow + if not isinstance(shadow, int): + if isinstance(shadow, zmq.Socket): + shadow_context = shadow.context + try: + shadow = cast(int, shadow.underlying) + except AttributeError: + pass + shadow_address = cast_int_addr(shadow) + else: + self._shadow = False + + super().__init__( + ctx_or_socket, + socket_type, + shadow=shadow_address, + copy_threshold=copy_threshold, + ) + if self._shadow_obj and shadow_context: + # keep self.context reference if shadowing a Socket object + self.context = shadow_context + + try: + socket_type = cast(int, self.get(zmq.TYPE)) + except Exception: + pass + else: + try: + self.__dict__["type"] = stype = SocketType(socket_type) + except ValueError: + self._type_name = str(socket_type) + else: + self._type_name = stype.name + + def __del__(self): + if not self._shadow and not self.closed: + if warn is not None: + # warn can be None during process teardown + warn( + f"Unclosed socket {self}", + ResourceWarning, + stacklevel=2, + source=self, + ) + self.close() + + _repr_cls = "zmq.Socket" + + def __repr__(self): + cls = self.__class__ + # look up _repr_cls on exact class, not inherited + _repr_cls = cls.__dict__.get("_repr_cls", None) + if _repr_cls is None: + _repr_cls = f"{cls.__module__}.{cls.__name__}" + + closed = ' closed' if self._closed else '' + + return f"<{_repr_cls}(zmq.{self._type_name}) at {hex(id(self))}{closed}>" + + # socket as context manager: + def __enter__(self: _SocketType) -> _SocketType: + """Sockets are context managers + + .. versionadded:: 14.4 + """ + return self + + def __exit__(self, *args, **kwargs): + self.close() + + # ------------------------------------------------------------------------- + # Socket creation + # ------------------------------------------------------------------------- + + def __copy__(self: _SocketType, memo=None) -> _SocketType: + """Copying a Socket creates a shadow copy""" + return self.__class__.shadow(self.underlying) + + __deepcopy__ = __copy__ + + @classmethod + def shadow(cls: type[_SocketType], address: int | zmq.Socket) -> _SocketType: + """Shadow an existing libzmq socket + + address is a zmq.Socket or an integer (or FFI pointer) + representing the address of the libzmq socket. + + .. versionadded:: 14.1 + + .. versionadded:: 25 + Support for shadowing `zmq.Socket` objects, + instead of just integer addresses. + """ + return cls(shadow=address) + + def close(self, linger=None) -> None: + """ + Close the socket. + + If linger is specified, LINGER sockopt will be set prior to closing. + + Note: closing a zmq Socket may not close the underlying sockets + if there are undelivered messages. + Only after all messages are delivered or discarded by reaching the socket's LINGER timeout + (default: forever) + will the underlying sockets be closed. + + This can be called to close the socket by hand. If this is not + called, the socket will automatically be closed when it is + garbage collected, + in which case you may see a ResourceWarning about the unclosed socket. + """ + if self.context: + self.context._rm_socket(self) + super().close(linger=linger) + + # ------------------------------------------------------------------------- + # Connect/Bind context managers + # ------------------------------------------------------------------------- + + def _connect_cm(self: _SocketType, addr: str) -> _SocketContext[_SocketType]: + """Context manager to disconnect on exit + + .. versionadded:: 20.0 + """ + return _SocketContext(self, 'connect', addr) + + def _bind_cm(self: _SocketType, addr: str) -> _SocketContext[_SocketType]: + """Context manager to unbind on exit + + .. versionadded:: 20.0 + """ + try: + # retrieve last_endpoint + # to support binding on random ports via + # `socket.bind('tcp://127.0.0.1:0')` + addr = cast(bytes, self.get(zmq.LAST_ENDPOINT)).decode("utf8") + except (AttributeError, ZMQError, UnicodeDecodeError): + pass + return _SocketContext(self, 'bind', addr) + + def bind(self: _SocketType, addr: str) -> _SocketContext[_SocketType]: + """s.bind(addr) + + Bind the socket to an address. + + This causes the socket to listen on a network port. Sockets on the + other side of this connection will use ``Socket.connect(addr)`` to + connect to this socket. + + Returns a context manager which will call unbind on exit. + + .. versionadded:: 20.0 + Can be used as a context manager. + + .. versionadded:: 26.0 + binding to port 0 can be used as a context manager + for binding to a random port. + The URL can be retrieved as `socket.last_endpoint`. + + Parameters + ---------- + addr : str + The address string. This has the form 'protocol://interface:port', + for example 'tcp://127.0.0.1:5555'. Protocols supported include + tcp, udp, pgm, epgm, inproc and ipc. If the address is unicode, it is + encoded to utf-8 first. + + """ + try: + super().bind(addr) + except ZMQError as e: + e.strerror += f" (addr={addr!r})" + raise + return self._bind_cm(addr) + + def connect(self: _SocketType, addr: str) -> _SocketContext[_SocketType]: + """s.connect(addr) + + Connect to a remote 0MQ socket. + + Returns a context manager which will call disconnect on exit. + + .. versionadded:: 20.0 + Can be used as a context manager. + + Parameters + ---------- + addr : str + The address string. This has the form 'protocol://interface:port', + for example 'tcp://127.0.0.1:5555'. Protocols supported are + tcp, udp, pgm, inproc and ipc. If the address is unicode, it is + encoded to utf-8 first. + + """ + try: + super().connect(addr) + except ZMQError as e: + e.strerror += f" (addr={addr!r})" + raise + return self._connect_cm(addr) + + # ------------------------------------------------------------------------- + # Deprecated aliases + # ------------------------------------------------------------------------- + + @property + def socket_type(self) -> int: + warn("Socket.socket_type is deprecated, use Socket.type", DeprecationWarning) + return cast(int, self.type) + + # ------------------------------------------------------------------------- + # Hooks for sockopt completion + # ------------------------------------------------------------------------- + + def __dir__(self): + keys = dir(self.__class__) + keys.extend(SocketOption.__members__) + return keys + + # ------------------------------------------------------------------------- + # Getting/Setting options + # ------------------------------------------------------------------------- + setsockopt = SocketBase.set + getsockopt = SocketBase.get + + def __setattr__(self, key, value): + """Override to allow setting zmq.[UN]SUBSCRIBE even though we have a subscribe method""" + if key in self.__dict__: + object.__setattr__(self, key, value) + return + _key = key.lower() + if _key in ('subscribe', 'unsubscribe'): + if isinstance(value, str): + value = value.encode('utf8') + if _key == 'subscribe': + self.set(zmq.SUBSCRIBE, value) + else: + self.set(zmq.UNSUBSCRIBE, value) + return + super().__setattr__(key, value) + + def fileno(self) -> int: + """Return edge-triggered file descriptor for this socket. + + This is a read-only edge-triggered file descriptor for both read and write events on this socket. + It is important that all available events be consumed when an event is detected, + otherwise the read event will not trigger again. + + .. versionadded:: 17.0 + """ + return self.FD + + def subscribe(self, topic: str | bytes) -> None: + """Subscribe to a topic + + Only for SUB sockets. + + .. versionadded:: 15.3 + """ + if isinstance(topic, str): + topic = topic.encode('utf8') + self.set(zmq.SUBSCRIBE, topic) + + def unsubscribe(self, topic: str | bytes) -> None: + """Unsubscribe from a topic + + Only for SUB sockets. + + .. versionadded:: 15.3 + """ + if isinstance(topic, str): + topic = topic.encode('utf8') + self.set(zmq.UNSUBSCRIBE, topic) + + def set_string(self, option: int, optval: str, encoding='utf-8') -> None: + """Set socket options with a unicode object. + + This is simply a wrapper for setsockopt to protect from encoding ambiguity. + + See the 0MQ documentation for details on specific options. + + Parameters + ---------- + option : int + The name of the option to set. Can be any of: SUBSCRIBE, + UNSUBSCRIBE, IDENTITY + optval : str + The value of the option to set. + encoding : str + The encoding to be used, default is utf8 + """ + if not isinstance(optval, str): + raise TypeError(f"strings only, not {type(optval)}: {optval!r}") + return self.set(option, optval.encode(encoding)) + + setsockopt_unicode = setsockopt_string = set_string + + def get_string(self, option: int, encoding='utf-8') -> str: + """Get the value of a socket option. + + See the 0MQ documentation for details on specific options. + + Parameters + ---------- + option : int + The option to retrieve. + + Returns + ------- + optval : str + The value of the option as a unicode string. + """ + if SocketOption(option)._opt_type != _OptType.bytes: + raise TypeError(f"option {option} will not return a string to be decoded") + return cast(bytes, self.get(option)).decode(encoding) + + getsockopt_unicode = getsockopt_string = get_string + + def bind_to_random_port( + self: _SocketType, + addr: str, + min_port: int = 49152, + max_port: int = 65536, + max_tries: int = 100, + ) -> int: + """Bind this socket to a random port in a range. + + If the port range is unspecified, the system will choose the port. + + Parameters + ---------- + addr : str + The address string without the port to pass to ``Socket.bind()``. + min_port : int, optional + The minimum port in the range of ports to try (inclusive). + max_port : int, optional + The maximum port in the range of ports to try (exclusive). + max_tries : int, optional + The maximum number of bind attempts to make. + + Returns + ------- + port : int + The port the socket was bound to. + + Raises + ------ + ZMQBindError + if `max_tries` reached before successful bind + """ + if ( + (zmq.zmq_version_info() >= (3, 2)) + and min_port == 49152 + and max_port == 65536 + ): + # if LAST_ENDPOINT is supported, and min_port / max_port weren't specified, + # we can bind to port 0 and let the OS do the work + self.bind(f"{addr}:*") + url = cast(bytes, self.last_endpoint).decode('ascii', 'replace') + _, port_s = url.rsplit(':', 1) + return int(port_s) + + for i in range(max_tries): + try: + port = random.randrange(min_port, max_port) + self.bind(f'{addr}:{port}') + except ZMQError as exception: + en = exception.errno + if en == zmq.EADDRINUSE: + continue + elif sys.platform == 'win32' and en == errno.EACCES: + continue + else: + raise + else: + return port + raise ZMQBindError("Could not bind socket to random port.") + + def get_hwm(self) -> int: + """Get the High Water Mark. + + On libzmq ≥ 3, this gets SNDHWM if available, otherwise RCVHWM + """ + major = zmq.zmq_version_info()[0] + if major >= 3: + # return sndhwm, fallback on rcvhwm + try: + return cast(int, self.get(zmq.SNDHWM)) + except zmq.ZMQError: + pass + + return cast(int, self.get(zmq.RCVHWM)) + else: + return cast(int, self.get(zmq.HWM)) + + def set_hwm(self, value: int) -> None: + """Set the High Water Mark. + + On libzmq ≥ 3, this sets both SNDHWM and RCVHWM + + + .. warning:: + + New values only take effect for subsequent socket + bind/connects. + """ + major = zmq.zmq_version_info()[0] + if major >= 3: + raised = None + try: + self.sndhwm = value + except Exception as e: + raised = e + try: + self.rcvhwm = value + except Exception as e: + raised = e + + if raised: + raise raised + else: + self.set(zmq.HWM, value) + + hwm = property( + get_hwm, + set_hwm, + None, + """Property for High Water Mark. + + Setting hwm sets both SNDHWM and RCVHWM as appropriate. + It gets SNDHWM if available, otherwise RCVHWM. + """, + ) + + # ------------------------------------------------------------------------- + # Sending and receiving messages + # ------------------------------------------------------------------------- + + @overload + def send( + self, + data: Any, + flags: int = ..., + copy: bool = ..., + *, + track: Literal[True], + routing_id: int | None = ..., + group: str | None = ..., + ) -> zmq.MessageTracker: ... + + @overload + def send( + self, + data: Any, + flags: int = ..., + copy: bool = ..., + *, + track: Literal[False], + routing_id: int | None = ..., + group: str | None = ..., + ) -> None: ... + + @overload + def send( + self, + data: Any, + flags: int = ..., + *, + copy: bool = ..., + routing_id: int | None = ..., + group: str | None = ..., + ) -> None: ... + + @overload + def send( + self, + data: Any, + flags: int = ..., + copy: bool = ..., + track: bool = ..., + routing_id: int | None = ..., + group: str | None = ..., + ) -> zmq.MessageTracker | None: ... + + def send( + self, + data: Any, + flags: int = 0, + copy: bool = True, + track: bool = False, + routing_id: int | None = None, + group: str | None = None, + ) -> zmq.MessageTracker | None: + """Send a single zmq message frame on this socket. + + This queues the message to be sent by the IO thread at a later time. + + With flags=NOBLOCK, this raises :class:`ZMQError` if the queue is full; + otherwise, this waits until space is available. + See :class:`Poller` for more general non-blocking I/O. + + Parameters + ---------- + data : bytes, Frame, memoryview + The content of the message. This can be any object that provides + the Python buffer API (i.e. `memoryview(data)` can be called). + flags : int + 0, NOBLOCK, SNDMORE, or NOBLOCK|SNDMORE. + copy : bool + Should the message be sent in a copying or non-copying manner. + track : bool + Should the message be tracked for notification that ZMQ has + finished with it? (ignored if copy=True) + routing_id : int + For use with SERVER sockets + group : str + For use with RADIO sockets + + Returns + ------- + None : if `copy` or not track + None if message was sent, raises an exception otherwise. + MessageTracker : if track and not copy + a MessageTracker object, whose `done` property will + be False until the send is completed. + + Raises + ------ + TypeError + If a unicode object is passed + ValueError + If `track=True`, but an untracked Frame is passed. + ZMQError + If the send does not succeed for any reason (including + if NOBLOCK is set and the outgoing queue is full). + + + .. versionchanged:: 17.0 + + DRAFT support for routing_id and group arguments. + """ + if routing_id is not None: + if not isinstance(data, zmq.Frame): + data = zmq.Frame( + data, + track=track, + copy=copy or None, + copy_threshold=self.copy_threshold, + ) + data.routing_id = routing_id + if group is not None: + if not isinstance(data, zmq.Frame): + data = zmq.Frame( + data, + track=track, + copy=copy or None, + copy_threshold=self.copy_threshold, + ) + data.group = group + return super().send(data, flags=flags, copy=copy, track=track) + + def send_multipart( + self, + msg_parts: Sequence, + flags: int = 0, + copy: bool = True, + track: bool = False, + **kwargs, + ): + """Send a sequence of buffers as a multipart message. + + The zmq.SNDMORE flag is added to all msg parts before the last. + + Parameters + ---------- + msg_parts : iterable + A sequence of objects to send as a multipart message. Each element + can be any sendable object (Frame, bytes, buffer-providers) + flags : int, optional + Any valid flags for :func:`Socket.send`. + SNDMORE is added automatically for frames before the last. + copy : bool, optional + Should the frame(s) be sent in a copying or non-copying manner. + If copy=False, frames smaller than self.copy_threshold bytes + will be copied anyway. + track : bool, optional + Should the frame(s) be tracked for notification that ZMQ has + finished with it (ignored if copy=True). + + Returns + ------- + None : if copy or not track + MessageTracker : if track and not copy + a MessageTracker object, whose `done` property will + be False until the last send is completed. + """ + # typecheck parts before sending: + for i, msg in enumerate(msg_parts): + if isinstance(msg, (zmq.Frame, bytes, memoryview)): + continue + try: + memoryview(msg) + except Exception: + rmsg = repr(msg) + if len(rmsg) > 32: + rmsg = rmsg[:32] + '...' + raise TypeError( + f"Frame {i} ({rmsg}) does not support the buffer interface." + ) + for msg in msg_parts[:-1]: + self.send(msg, zmq.SNDMORE | flags, copy=copy, track=track) + # Send the last part without the extra SNDMORE flag. + return self.send(msg_parts[-1], flags, copy=copy, track=track) + + @overload + def recv_multipart( + self, flags: int = ..., *, copy: Literal[True], track: bool = ... + ) -> list[bytes]: ... + + @overload + def recv_multipart( + self, flags: int = ..., *, copy: Literal[False], track: bool = ... + ) -> list[zmq.Frame]: ... + + @overload + def recv_multipart(self, flags: int = ..., *, track: bool = ...) -> list[bytes]: ... + + @overload + def recv_multipart( + self, flags: int = 0, copy: bool = True, track: bool = False + ) -> list[zmq.Frame] | list[bytes]: ... + + def recv_multipart( + self, flags: int = 0, copy: bool = True, track: bool = False + ) -> list[zmq.Frame] | list[bytes]: + """Receive a multipart message as a list of bytes or Frame objects + + Parameters + ---------- + flags : int, optional + Any valid flags for :func:`Socket.recv`. + copy : bool, optional + Should the message frame(s) be received in a copying or non-copying manner? + If False a Frame object is returned for each part, if True a copy of + the bytes is made for each frame. + track : bool, optional + Should the message frame(s) be tracked for notification that ZMQ has + finished with it? (ignored if copy=True) + + Returns + ------- + msg_parts : list + A list of frames in the multipart message; either Frames or bytes, + depending on `copy`. + + Raises + ------ + ZMQError + for any of the reasons :func:`~Socket.recv` might fail + """ + parts = [self.recv(flags, copy=copy, track=track)] + # have first part already, only loop while more to receive + while self.getsockopt(zmq.RCVMORE): + part = self.recv(flags, copy=copy, track=track) + parts.append(part) + # cast List[Union] to Union[List] + # how do we get mypy to recognize that return type is invariant on `copy`? + return cast(Union[List[zmq.Frame], List[bytes]], parts) + + def _deserialize( + self, + recvd: bytes, + load: Callable[[bytes], Any], + ) -> Any: + """Deserialize a received message + + Override in subclass (e.g. Futures) if recvd is not the raw bytes. + + The default implementation expects bytes and returns the deserialized message immediately. + + Parameters + ---------- + + load: callable + Callable that deserializes bytes + recvd: + The object returned by self.recv + + """ + return load(recvd) + + def send_serialized(self, msg, serialize, flags=0, copy=True, **kwargs): + """Send a message with a custom serialization function. + + .. versionadded:: 17 + + Parameters + ---------- + msg : The message to be sent. Can be any object serializable by `serialize`. + serialize : callable + The serialization function to use. + serialize(msg) should return an iterable of sendable message frames + (e.g. bytes objects), which will be passed to send_multipart. + flags : int, optional + Any valid flags for :func:`Socket.send`. + copy : bool, optional + Whether to copy the frames. + + """ + frames = serialize(msg) + return self.send_multipart(frames, flags=flags, copy=copy, **kwargs) + + def recv_serialized(self, deserialize, flags=0, copy=True): + """Receive a message with a custom deserialization function. + + .. versionadded:: 17 + + Parameters + ---------- + deserialize : callable + The deserialization function to use. + deserialize will be called with one argument: the list of frames + returned by recv_multipart() and can return any object. + flags : int, optional + Any valid flags for :func:`Socket.recv`. + copy : bool, optional + Whether to recv bytes or Frame objects. + + Returns + ------- + obj : object + The object returned by the deserialization function. + + Raises + ------ + ZMQError + for any of the reasons :func:`~Socket.recv` might fail + """ + frames = self.recv_multipart(flags=flags, copy=copy) + return self._deserialize(frames, deserialize) + + def send_string( + self, + u: str, + flags: int = 0, + copy: bool = True, + encoding: str = 'utf-8', + **kwargs, + ) -> zmq.Frame | None: + """Send a Python unicode string as a message with an encoding. + + 0MQ communicates with raw bytes, so you must encode/decode + text (str) around 0MQ. + + Parameters + ---------- + u : str + The unicode string to send. + flags : int, optional + Any valid flags for :func:`Socket.send`. + encoding : str + The encoding to be used + """ + if not isinstance(u, str): + raise TypeError("str objects only") + return self.send(u.encode(encoding), flags=flags, copy=copy, **kwargs) + + send_unicode = send_string + + def recv_string(self, flags: int = 0, encoding: str = 'utf-8') -> str: + """Receive a unicode string, as sent by send_string. + + Parameters + ---------- + flags : int + Any valid flags for :func:`Socket.recv`. + encoding : str + The encoding to be used + + Returns + ------- + s : str + The Python unicode string that arrives as encoded bytes. + + Raises + ------ + ZMQError + for any of the reasons :func:`Socket.recv` might fail + """ + msg = self.recv(flags=flags) + return self._deserialize(msg, lambda buf: buf.decode(encoding)) + + recv_unicode = recv_string + + def send_pyobj( + self, obj: Any, flags: int = 0, protocol: int = DEFAULT_PROTOCOL, **kwargs + ) -> zmq.Frame | None: + """ + Send a Python object as a message using pickle to serialize. + + .. warning:: + + Never deserialize an untrusted message with pickle, + which can involve arbitrary code execution. + Make sure to authenticate the sources of messages + before unpickling them, e.g. with transport-level security + (e.g. CURVE, ZAP, or IPC permissions) + or signed messages. + + Parameters + ---------- + obj : Python object + The Python object to send. + flags : int + Any valid flags for :func:`Socket.send`. + protocol : int + The pickle protocol number to use. The default is pickle.DEFAULT_PROTOCOL + where defined, and pickle.HIGHEST_PROTOCOL elsewhere. + """ + msg = pickle.dumps(obj, protocol) + return self.send(msg, flags=flags, **kwargs) + + def recv_pyobj(self, flags: int = 0) -> Any: + """ + Receive a Python object as a message using UNSAFE pickle to serialize. + + .. warning:: + + Never deserialize an untrusted message with pickle, + which can involve arbitrary code execution. + Make sure to authenticate the sources of messages + before unpickling them, e.g. with transport-level security + (such as CURVE or IPC permissions) + or authenticating messages themselves before deserializing. + + Parameters + ---------- + flags : int + Any valid flags for :func:`Socket.recv`. + + Returns + ------- + obj : Python object + The Python object that arrives as a message. + + Raises + ------ + ZMQError + for any of the reasons :func:`~Socket.recv` might fail + """ + msg = self.recv(flags) + return self._deserialize(msg, pickle.loads) + + def send_json(self, obj: Any, flags: int = 0, **kwargs) -> None: + """Send a Python object as a message using json to serialize. + + Keyword arguments are passed on to json.dumps + + Parameters + ---------- + obj : Python object + The Python object to send + flags : int + Any valid flags for :func:`Socket.send` + """ + send_kwargs = {} + for key in ('routing_id', 'group'): + if key in kwargs: + send_kwargs[key] = kwargs.pop(key) + msg = jsonapi.dumps(obj, **kwargs) + return self.send(msg, flags=flags, **send_kwargs) + + def recv_json(self, flags: int = 0, **kwargs) -> list | str | int | float | dict: + """Receive a Python object as a message using json to serialize. + + Keyword arguments are passed on to json.loads + + Parameters + ---------- + flags : int + Any valid flags for :func:`Socket.recv`. + + Returns + ------- + obj : Python object + The Python object that arrives as a message. + + Raises + ------ + ZMQError + for any of the reasons :func:`~Socket.recv` might fail + """ + msg = self.recv(flags) + return self._deserialize(msg, lambda buf: jsonapi.loads(buf, **kwargs)) + + _poller_class = Poller + + def poll(self, timeout: int | None = None, flags: int = zmq.POLLIN) -> int: + """Poll the socket for events. + + See :class:`Poller` to wait for multiple sockets at once. + + Parameters + ---------- + timeout : int + The timeout (in milliseconds) to wait for an event. If unspecified + (or specified None), will wait forever for an event. + flags : int + default: POLLIN. + POLLIN, POLLOUT, or POLLIN|POLLOUT. The event flags to poll for. + + Returns + ------- + event_mask : int + The poll event mask (POLLIN, POLLOUT), + 0 if the timeout was reached without an event. + """ + + if self.closed: + raise ZMQError(zmq.ENOTSUP) + + p = self._poller_class() + p.register(self, flags) + evts = dict(p.poll(timeout)) + # return 0 if no events, otherwise return event bitfield + return evts.get(self, 0) + + def get_monitor_socket( + self: _SocketType, events: int | None = None, addr: str | None = None + ) -> _SocketType: + """Return a connected PAIR socket ready to receive the event notifications. + + .. versionadded:: libzmq-4.0 + .. versionadded:: 14.0 + + Parameters + ---------- + events : int + default: `zmq.EVENT_ALL` + The bitmask defining which events are wanted. + addr : str + The optional endpoint for the monitoring sockets. + + Returns + ------- + socket : zmq.Socket + The PAIR socket, connected and ready to receive messages. + """ + # safe-guard, method only available on libzmq >= 4 + if zmq.zmq_version_info() < (4,): + raise NotImplementedError( + f"get_monitor_socket requires libzmq >= 4, have {zmq.zmq_version()}" + ) + + # if already monitoring, return existing socket + if self._monitor_socket: + if self._monitor_socket.closed: + self._monitor_socket = None + else: + return self._monitor_socket + + if addr is None: + # create endpoint name from internal fd + addr = f"inproc://monitor.s-{self.FD}" + if events is None: + # use all events + events = zmq.EVENT_ALL + # attach monitoring socket + self.monitor(addr, events) + # create new PAIR socket and connect it + self._monitor_socket = self.context.socket(zmq.PAIR) + self._monitor_socket.connect(addr) + return self._monitor_socket + + def disable_monitor(self) -> None: + """Shutdown the PAIR socket (created using get_monitor_socket) + that is serving socket events. + + .. versionadded:: 14.4 + """ + self._monitor_socket = None + self.monitor(None, 0) + + +SyncSocket: TypeAlias = Socket[bytes] + +__all__ = ['Socket', 'SyncSocket'] diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/stopwatch.py b/.venv/lib/python3.11/site-packages/zmq/sugar/stopwatch.py new file mode 100644 index 0000000000000000000000000000000000000000..2001e670a92761a5c3a10e40d93f39e2c21a3a10 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/sugar/stopwatch.py @@ -0,0 +1,36 @@ +"""Deprecated Stopwatch implementation""" + +# Copyright (c) PyZMQ Development Team. +# Distributed under the terms of the Modified BSD License. + + +class Stopwatch: + """Deprecated zmq.Stopwatch implementation + + You can use Python's builtin timers (time.monotonic, etc.). + """ + + def __init__(self): + import warnings + + warnings.warn( + "zmq.Stopwatch is deprecated. Use stdlib time.monotonic and friends instead", + DeprecationWarning, + stacklevel=2, + ) + self._start = 0 + import time + + try: + self._monotonic = time.monotonic + except AttributeError: + self._monotonic = time.time + + def start(self): + """Start the counter""" + self._start = self._monotonic() + + def stop(self): + """Return time since start in microseconds""" + stop = self._monotonic() + return int(1e6 * (stop - self._start)) diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/tracker.py b/.venv/lib/python3.11/site-packages/zmq/sugar/tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..973fdbd6e2740951969e0f071dd07837cad1d632 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/sugar/tracker.py @@ -0,0 +1,116 @@ +"""Tracker for zero-copy messages with 0MQ.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from __future__ import annotations + +import time +from threading import Event + +from zmq.backend import Frame +from zmq.error import NotDone + + +class MessageTracker: + """A class for tracking if 0MQ is done using one or more messages. + + When you send a 0MQ message, it is not sent immediately. The 0MQ IO thread + sends the message at some later time. Often you want to know when 0MQ has + actually sent the message though. This is complicated by the fact that + a single 0MQ message can be sent multiple times using different sockets. + This class allows you to track all of the 0MQ usages of a message. + + Parameters + ---------- + towatch : Event, MessageTracker, zmq.Frame + This objects to track. This class can track the low-level + Events used by the Message class, other MessageTrackers or + actual Messages. + """ + + events: set[Event] + peers: set[MessageTracker] + + def __init__(self, *towatch: tuple[MessageTracker | Event | Frame]): + """Create a message tracker to track a set of messages. + + Parameters + ---------- + *towatch : tuple of Event, MessageTracker, Message instances. + This list of objects to track. This class can track the low-level + Events used by the Message class, other MessageTrackers or + actual Messages. + """ + self.events = set() + self.peers = set() + for obj in towatch: + if isinstance(obj, Event): + self.events.add(obj) + elif isinstance(obj, MessageTracker): + self.peers.add(obj) + elif isinstance(obj, Frame): + if not obj.tracker: + raise ValueError("Not a tracked message") + self.peers.add(obj.tracker) + else: + raise TypeError(f"Require Events or Message Frames, not {type(obj)}") + + @property + def done(self): + """Is 0MQ completely done with the message(s) being tracked?""" + for evt in self.events: + if not evt.is_set(): + return False + for pm in self.peers: + if not pm.done: + return False + return True + + def wait(self, timeout: float | int = -1): + """Wait for 0MQ to be done with the message or until `timeout`. + + Parameters + ---------- + timeout : float + default: -1, which means wait forever. + Maximum time in (s) to wait before raising NotDone. + + Returns + ------- + None + if done before `timeout` + + Raises + ------ + NotDone + if `timeout` reached before I am done. + """ + tic = time.time() + remaining: float + if timeout is False or timeout < 0: + remaining = 3600 * 24 * 7 # a week + else: + remaining = timeout + for evt in self.events: + if remaining < 0: + raise NotDone + evt.wait(timeout=remaining) + if not evt.is_set(): + raise NotDone + toc = time.time() + remaining -= toc - tic + tic = toc + + for peer in self.peers: + if remaining < 0: + raise NotDone + peer.wait(timeout=remaining) + toc = time.time() + remaining -= toc - tic + tic = toc + + +_FINISHED_TRACKER = MessageTracker() + +__all__ = ['MessageTracker', '_FINISHED_TRACKER'] diff --git a/.venv/lib/python3.11/site-packages/zmq/sugar/version.py b/.venv/lib/python3.11/site-packages/zmq/sugar/version.py new file mode 100644 index 0000000000000000000000000000000000000000..fca316be274f9693a5e2ee5de3d3f87aa91fce14 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/sugar/version.py @@ -0,0 +1,67 @@ +"""PyZMQ and 0MQ version functions.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. +from __future__ import annotations + +import re +from typing import Match, cast + +from zmq.backend import zmq_version_info + +__version__: str = "26.2.1" +_version_pat = re.compile(r"(\d+)\.(\d+)\.(\d+)(.*)") +_match = cast(Match, _version_pat.match(__version__)) +_version_groups = _match.groups() + +VERSION_MAJOR = int(_version_groups[0]) +VERSION_MINOR = int(_version_groups[1]) +VERSION_PATCH = int(_version_groups[2]) +VERSION_EXTRA = _version_groups[3].lstrip(".") + +version_info: tuple[int, int, int] | tuple[int, int, int, float] = ( + VERSION_MAJOR, + VERSION_MINOR, + VERSION_PATCH, +) + +if VERSION_EXTRA: + version_info = ( + VERSION_MAJOR, + VERSION_MINOR, + VERSION_PATCH, + float('inf'), + ) + +__revision__: str = '' + + +def pyzmq_version() -> str: + """return the version of pyzmq as a string""" + if __revision__: + return '+'.join([__version__, __revision__[:6]]) + else: + return __version__ + + +def pyzmq_version_info() -> tuple[int, int, int] | tuple[int, int, int, float]: + """return the pyzmq version as a tuple of at least three numbers + + If pyzmq is a development version, `inf` will be appended after the third integer. + """ + return version_info + + +def zmq_version() -> str: + """return the version of libzmq as a string""" + return "{}.{}.{}".format(*zmq_version_info()) + + +__all__ = [ + 'zmq_version', + 'zmq_version_info', + 'pyzmq_version', + 'pyzmq_version_info', + '__version__', + '__revision__', +] diff --git a/.venv/lib/python3.11/site-packages/zmq/tests/__init__.py b/.venv/lib/python3.11/site-packages/zmq/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6fc832334b78722f7d79b7d83e7c70308ae43c2e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zmq/tests/__init__.py @@ -0,0 +1,263 @@ +# Copyright (c) PyZMQ Developers. +# Distributed under the terms of the Modified BSD License. + +import os +import platform +import signal +import sys +import time +import warnings +from functools import partial +from threading import Thread +from typing import List +from unittest import SkipTest, TestCase + +from pytest import mark + +import zmq +from zmq.utils import jsonapi + +try: + import gevent + + from zmq import green as gzmq + + have_gevent = True +except ImportError: + have_gevent = False + + +PYPY = platform.python_implementation() == 'PyPy' + +# ----------------------------------------------------------------------------- +# skip decorators (directly from unittest) +# ----------------------------------------------------------------------------- +warnings.warn( + "zmq.tests is deprecated in pyzmq 25, we recommend managing your own contexts and sockets.", + DeprecationWarning, +) + + +def _id(x): + return x + + +skip_pypy = mark.skipif(PYPY, reason="Doesn't work on PyPy") +require_zmq_4 = mark.skipif(zmq.zmq_version_info() < (4,), reason="requires zmq >= 4") + +# ----------------------------------------------------------------------------- +# Base test class +# ----------------------------------------------------------------------------- + + +def term_context(ctx, timeout): + """Terminate a context with a timeout""" + t = Thread(target=ctx.term) + t.daemon = True + t.start() + t.join(timeout=timeout) + if t.is_alive(): + # reset Context.instance, so the failure to term doesn't corrupt subsequent tests + zmq.sugar.context.Context._instance = None + raise RuntimeError( + "context could not terminate, open sockets likely remain in test" + ) + + +class BaseZMQTestCase(TestCase): + green = False + teardown_timeout = 10 + test_timeout_seconds = int(os.environ.get("ZMQ_TEST_TIMEOUT") or 60) + sockets: List[zmq.Socket] + + @property + def _is_pyzmq_test(self): + return self.__class__.__module__.split(".", 1)[0] == __name__.split(".", 1)[0] + + @property + def _should_test_timeout(self): + return ( + self._is_pyzmq_test + and hasattr(signal, 'SIGALRM') + and self.test_timeout_seconds + ) + + @property + def Context(self): + if self.green: + return gzmq.Context + else: + return zmq.Context + + def socket(self, socket_type): + s = self.context.socket(socket_type) + self.sockets.append(s) + return s + + def _alarm_timeout(self, timeout, *args): + raise TimeoutError(f"Test did not complete in {timeout} seconds") + + def setUp(self): + super().setUp() + if self.green and not have_gevent: + raise SkipTest("requires gevent") + + self.context = self.Context.instance() + self.sockets = [] + if self._should_test_timeout: + # use SIGALRM to avoid test hangs + signal.signal( + signal.SIGALRM, partial(self._alarm_timeout, self.test_timeout_seconds) + ) + signal.alarm(self.test_timeout_seconds) + + def tearDown(self): + if self._should_test_timeout: + # cancel the timeout alarm, if there was one + signal.alarm(0) + contexts = {self.context} + while self.sockets: + sock = self.sockets.pop() + contexts.add(sock.context) # in case additional contexts are created + sock.close(0) + for ctx in contexts: + try: + term_context(ctx, self.teardown_timeout) + except Exception: + # reset Context.instance, so the failure to term doesn't corrupt subsequent tests + zmq.sugar.context.Context._instance = None + raise + + super().tearDown() + + def create_bound_pair( + self, type1=zmq.PAIR, type2=zmq.PAIR, interface='tcp://127.0.0.1' + ): + """Create a bound socket pair using a random port.""" + s1 = self.context.socket(type1) + s1.setsockopt(zmq.LINGER, 0) + port = s1.bind_to_random_port(interface) + s2 = self.context.socket(type2) + s2.setsockopt(zmq.LINGER, 0) + s2.connect(f'{interface}:{port}') + self.sockets.extend([s1, s2]) + return s1, s2 + + def ping_pong(self, s1, s2, msg): + s1.send(msg) + msg2 = s2.recv() + s2.send(msg2) + msg3 = s1.recv() + return msg3 + + def ping_pong_json(self, s1, s2, o): + if jsonapi.jsonmod is None: + raise SkipTest("No json library") + s1.send_json(o) + o2 = s2.recv_json() + s2.send_json(o2) + o3 = s1.recv_json() + return o3 + + def ping_pong_pyobj(self, s1, s2, o): + s1.send_pyobj(o) + o2 = s2.recv_pyobj() + s2.send_pyobj(o2) + o3 = s1.recv_pyobj() + return o3 + + def assertRaisesErrno(self, errno, func, *args, **kwargs): + try: + func(*args, **kwargs) + except zmq.ZMQError as e: + self.assertEqual( + e.errno, + errno, + f"wrong error raised, expected '{zmq.ZMQError(errno)}' \ +got '{zmq.ZMQError(e.errno)}'", + ) + else: + self.fail("Function did not raise any error") + + def _select_recv(self, multipart, socket, **kwargs): + """call recv[_multipart] in a way that raises if there is nothing to receive""" + if zmq.zmq_version_info() >= (3, 1, 0): + # zmq 3.1 has a bug, where poll can return false positives, + # so we wait a little bit just in case + # See LIBZMQ-280 on JIRA + time.sleep(0.1) + + r, w, x = zmq.select([socket], [], [], timeout=kwargs.pop('timeout', 5)) + assert len(r) > 0, "Should have received a message" + kwargs['flags'] = zmq.DONTWAIT | kwargs.get('flags', 0) + + recv = socket.recv_multipart if multipart else socket.recv + return recv(**kwargs) + + def recv(self, socket, **kwargs): + """call recv in a way that raises if there is nothing to receive""" + return self._select_recv(False, socket, **kwargs) + + def recv_multipart(self, socket, **kwargs): + """call recv_multipart in a way that raises if there is nothing to receive""" + return self._select_recv(True, socket, **kwargs) + + +class PollZMQTestCase(BaseZMQTestCase): + pass + + +class GreenTest: + """Mixin for making green versions of test classes""" + + green = True + teardown_timeout = 10 + + def assertRaisesErrno(self, errno, func, *args, **kwargs): + if errno == zmq.EAGAIN: + raise SkipTest("Skipping because we're green.") + try: + func(*args, **kwargs) + except zmq.ZMQError: + e = sys.exc_info()[1] + self.assertEqual( + e.errno, + errno, + f"wrong error raised, expected '{zmq.ZMQError(errno)}' \ +got '{zmq.ZMQError(e.errno)}'", + ) + else: + self.fail("Function did not raise any error") + + def tearDown(self): + if self._should_test_timeout: + # cancel the timeout alarm, if there was one + signal.alarm(0) + contexts = {self.context} + while self.sockets: + sock = self.sockets.pop() + contexts.add(sock.context) # in case additional contexts are created + sock.close() + try: + gevent.joinall( + [gevent.spawn(ctx.term) for ctx in contexts], + timeout=self.teardown_timeout, + raise_error=True, + ) + except gevent.Timeout: + raise RuntimeError( + "context could not terminate, open sockets likely remain in test" + ) + + def skip_green(self): + raise SkipTest("Skipping because we are green") + + +def skip_green(f): + def skipping_test(self, *args, **kwargs): + if self.green: + raise SkipTest("Skipping because we are green") + else: + return f(self, *args, **kwargs) + + return skipping_test diff --git a/.venv/lib/python3.11/site-packages/zmq/tests/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zmq/tests/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e1d2b29edcfabb233c344c23489c4a793f77752 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zmq/tests/__pycache__/__init__.cpython-311.pyc differ