repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx | rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx/tests/test_ucxx_config.py | from __future__ import annotations
import os
from time import sleep
import pytest
import dask
from distributed import Client
from distributed.utils import get_ip, open_port
from distributed.utils_test import popen
from distributed_ucxx.ucxx import _prepare_ucx_config
from distributed_ucxx.utils_test import gen_test
pytestmark = pytest.mark.gpu
try:
HOST = get_ip()
except Exception:
HOST = "127.0.0.1"
ucxx = pytest.importorskip("ucxx")
rmm = pytest.importorskip("rmm")
@gen_test()
async def test_ucx_config(ucxx_loop, cleanup):
ucx = {
"nvlink": True,
"infiniband": True,
"rdmacm": False,
"tcp": True,
"cuda-copy": True,
}
with dask.config.set({"distributed.comm.ucx": ucx}):
ucx_config, ucx_environment = _prepare_ucx_config()
assert ucx_config == {
"TLS": "rc,tcp,cuda_copy,cuda_ipc",
"SOCKADDR_TLS_PRIORITY": "tcp",
}
assert ucx_environment == {}
ucx = {
"nvlink": False,
"infiniband": True,
"rdmacm": False,
"tcp": True,
"cuda-copy": False,
}
with dask.config.set({"distributed.comm.ucx": ucx}):
ucx_config, ucx_environment = _prepare_ucx_config()
assert ucx_config == {"TLS": "rc,tcp", "SOCKADDR_TLS_PRIORITY": "tcp"}
assert ucx_environment == {}
ucx = {
"nvlink": False,
"infiniband": True,
"rdmacm": True,
"tcp": True,
"cuda-copy": True,
}
with dask.config.set({"distributed.comm.ucx": ucx}):
ucx_config, ucx_environment = _prepare_ucx_config()
assert ucx_config == {
"TLS": "rc,tcp,cuda_copy",
"SOCKADDR_TLS_PRIORITY": "rdmacm",
}
assert ucx_environment == {}
ucx = {
"nvlink": None,
"infiniband": None,
"rdmacm": None,
"tcp": None,
"cuda-copy": None,
}
with dask.config.set({"distributed.comm.ucx": ucx}):
ucx_config, ucx_environment = _prepare_ucx_config()
assert ucx_config == {}
assert ucx_environment == {}
ucx = {
"nvlink": False,
"infiniband": True,
"rdmacm": True,
"tcp": True,
"cuda-copy": True,
}
with dask.config.set(
{
"distributed.comm.ucx": ucx,
"distributed.comm.ucx.environment": {
"tls": "all",
"memtrack-dest": "stdout",
},
}
):
ucx_config, ucx_environment = _prepare_ucx_config()
assert ucx_config == {
"TLS": "rc,tcp,cuda_copy",
"SOCKADDR_TLS_PRIORITY": "rdmacm",
}
assert ucx_environment == {"UCX_MEMTRACK_DEST": "stdout"}
@pytest.mark.skipif(
int(os.environ.get("UCXPY_ENABLE_PYTHON_FUTURE", "0")) != 0,
reason="Workers running without a `Nanny` can't be closed properly",
)
def test_ucx_config_w_env_var(ucxx_loop, cleanup, loop):
env = os.environ.copy()
env["DASK_DISTRIBUTED__RMM__POOL_SIZE"] = "1000.00 MB"
port = str(open_port())
# Using localhost appears to be less flaky than {HOST}. Additionally, this is
# closer to how other dask worker tests are written.
sched_addr = f"ucxx://127.0.0.1:{port}"
with popen(
["dask", "scheduler", "--no-dashboard", "--protocol", "ucxx", "--port", port],
env=env,
):
with popen(
[
"dask",
"worker",
sched_addr,
"--host",
"127.0.0.1",
"--no-dashboard",
"--protocol",
"ucxx",
"--no-nanny",
],
env=env,
):
with Client(sched_addr, loop=loop, timeout=60) as c:
while not c.scheduler_info()["workers"]:
sleep(0.1)
# Check for RMM pool resource type
rmm_resource = c.run_on_scheduler(
rmm.mr.get_current_device_resource_type
)
assert rmm_resource == rmm.mr.PoolMemoryResource
rmm_resource_workers = c.run(rmm.mr.get_current_device_resource_type)
for v in rmm_resource_workers.values():
assert v == rmm.mr.PoolMemoryResource
| 0 |
rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx | rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx/tests/conftest.py | # We try to be as close as possible to Distributed's testing, thus this file
# was taken from https://github.com/dask/distributed/blob/main/conftest.py,
# and minimal changes were applied.
# https://pytest.org/latest/example/simple.html#control-skipping-of-tests-according-to-command-line-option
from __future__ import annotations
import pytest
try:
import faulthandler
except ImportError:
pass
else:
try:
faulthandler.enable()
except Exception:
pass
# Make all fixtures available
from distributed_ucxx.utils_test import * # noqa
def pytest_addoption(parser):
parser.addoption("--runslow", action="store_true", help="run slow tests")
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
if "ws" in item.fixturenames:
item.add_marker(pytest.mark.workerstate)
| 0 |
rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx | rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx/tests/test_nanny.py | from distributed import Nanny, Scheduler
from distributed.utils_test import gen_test
from distributed.worker import Worker
class KeyboardInterruptWorker(Worker):
"""A Worker that raises KeyboardInterrupt almost immediately"""
async def heartbeat(self):
def raise_err():
raise KeyboardInterrupt()
self.loop.add_callback(raise_err)
@gen_test(timeout=120)
async def test_nanny_closed_by_keyboard_interrupt(ucxx_loop):
async with Scheduler(protocol="ucxx", dashboard_address=":0") as s:
async with Nanny(
s.address, nthreads=1, worker_class=KeyboardInterruptWorker
) as n:
await n.process.stopped.wait()
# Check that the scheduler has been notified about the closed worker
assert "remove-worker" in str(s.events)
| 0 |
rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx | rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx/tests/test_deserialize.py | from __future__ import annotations
import asyncio
import os
from functools import partial
from distributed.comm import connect, listen
from distributed.protocol import Serialized, deserialize, serialize, to_serialize
from distributed.utils_test import gen_test
# Test deserialization
#
# `check_*deserialize` are verbatim copies of Distributed, since they aren't isn't
# exposed publicly can't be expose
async def check_listener_deserialize(addr, deserialize, in_value, check_out):
q = asyncio.Queue()
async def handle_comm(comm):
try:
msg = await comm.read()
except Exception as exc:
q.put_nowait(exc)
else:
q.put_nowait(msg)
finally:
await comm.close()
async with listen(addr, handle_comm, deserialize=deserialize) as listener:
comm = await connect(listener.contact_address)
await comm.write(in_value)
out_value = await q.get()
if isinstance(out_value, Exception):
raise out_value # Prevents deadlocks, get actual deserialization exception
check_out(out_value)
await comm.close()
async def check_connector_deserialize(addr, deserialize, in_value, check_out):
done = asyncio.Event()
async def handle_comm(comm):
try:
await comm.write(in_value)
await done.wait()
finally:
await comm.close()
async with listen(addr, handle_comm) as listener:
comm = await connect(listener.contact_address, deserialize=deserialize)
try:
out_value = await comm.read()
done.set()
finally:
await comm.close()
check_out(out_value)
async def check_deserialize(addr):
"""
Check the "deserialize" flag on connect() and listen().
"""
# Test with Serialize and Serialized objects
msg = {
"op": "update",
"x": b"abc",
"to_ser": [to_serialize(123)],
"ser": Serialized(*serialize(456)),
}
msg_orig = msg.copy()
def check_out_false(out_value):
# Check output with deserialize=False
out_value = out_value.copy() # in case transport passed the object as-is
to_ser = out_value.pop("to_ser")
ser = out_value.pop("ser")
expected_msg = msg_orig.copy()
del expected_msg["ser"]
del expected_msg["to_ser"]
assert out_value == expected_msg
assert isinstance(ser, Serialized)
assert deserialize(ser.header, ser.frames) == 456
assert isinstance(to_ser, (tuple, list)) and len(to_ser) == 1
(to_ser,) = to_ser
# The to_serialize() value could have been actually serialized
# or not (it's a transport-specific optimization)
if isinstance(to_ser, Serialized):
assert deserialize(to_ser.header, to_ser.frames) == 123
else:
assert to_ser == to_serialize(123)
def check_out_true(out_value):
# Check output with deserialize=True
expected_msg = msg.copy()
expected_msg["ser"] = 456
expected_msg["to_ser"] = [123]
# Notice, we allow "to_ser" to be a tuple or a list
assert list(out_value.pop("to_ser")) == expected_msg.pop("to_ser")
assert out_value == expected_msg
await check_listener_deserialize(addr, False, msg, check_out_false)
await check_connector_deserialize(addr, False, msg, check_out_false)
await check_listener_deserialize(addr, True, msg, check_out_true)
await check_connector_deserialize(addr, True, msg, check_out_true)
# Test with long bytestrings, large enough to be transferred
# as a separate payload
# TODO: currently bytestrings are not transferred as a separate payload
_uncompressible = os.urandom(1024**2) * 4 # end size: 8 MB
msg = {
"op": "update",
"x": _uncompressible,
"to_ser": (to_serialize(_uncompressible),),
"ser": Serialized(*serialize(_uncompressible)),
}
msg_orig = msg.copy()
def check_out(deserialize_flag, out_value):
# Check output with deserialize=False
assert sorted(out_value) == sorted(msg_orig)
out_value = out_value.copy() # in case transport passed the object as-is
to_ser = out_value.pop("to_ser")
ser = out_value.pop("ser")
expected_msg = msg_orig.copy()
del expected_msg["ser"]
del expected_msg["to_ser"]
assert out_value == expected_msg
if deserialize_flag:
assert isinstance(ser, (bytes, bytearray))
assert bytes(ser) == _uncompressible
else:
assert isinstance(ser, Serialized)
assert deserialize(ser.header, ser.frames) == _uncompressible
assert isinstance(to_ser, tuple) and len(to_ser) == 1
(to_ser,) = to_ser
# The to_serialize() value could have been actually serialized
# or not (it's a transport-specific optimization)
if isinstance(to_ser, Serialized):
assert deserialize(to_ser.header, to_ser.frames) == _uncompressible
else:
assert to_ser == to_serialize(_uncompressible)
await check_listener_deserialize(addr, False, msg, partial(check_out, False))
await check_connector_deserialize(addr, False, msg, partial(check_out, False))
await check_listener_deserialize(addr, True, msg, partial(check_out, True))
await check_connector_deserialize(addr, True, msg, partial(check_out, True))
@gen_test()
async def test_ucxx_deserialize(ucxx_loop):
# Note we see this error on some systems with this test:
# `socket.gaierror: [Errno -5] No address associated with hostname`
# This may be due to a system configuration issue.
await check_deserialize("tcp://")
| 0 |
rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx | rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx/tests/test_worker.py | import pytest
from distributed import Client, Nanny
from distributed.scheduler import Scheduler
from distributed.worker import Worker
from distributed_ucxx.utils_test import gen_test
@pytest.mark.parametrize("Worker", [Worker, Nanny])
@gen_test()
async def test_protocol_from_scheduler_address(ucxx_loop, Worker):
pytest.importorskip("ucxx")
async with Scheduler(protocol="ucxx", dashboard_address=":0") as s:
assert s.address.startswith("ucxx://")
async with Worker(s.address) as w:
assert w.address.startswith("ucxx://")
async with Client(s.address, asynchronous=True) as c:
info = c.scheduler_info()
assert info["address"].startswith("ucxx://")
| 0 |
rapidsai_public_repos/ucxx/python | rapidsai_public_repos/ucxx/python/ucxx/core.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import gc
import logging
import os
import re
import weakref
from ._lib import libucxx as ucx_api
from ._lib.libucxx import UCXError
from ._lib_async import ApplicationContext
logger = logging.getLogger("ucx")
# The module should only instantiate one instance of the application context
# However, the init of CUDA must happen after all process forks thus we delay
# the instantiation of the application context to the first use of the API.
_ctx = None
def _get_ctx():
global _ctx
if _ctx is None:
_ctx = ApplicationContext()
return _ctx
# The following functions initialize and use a single ApplicationContext instance
def init(
options={},
env_takes_precedence=False,
progress_mode=None,
enable_delayed_submission=None,
enable_python_future=None,
):
"""Initiate UCX.
Usually this is done automatically at the first API call
but this function makes it possible to set UCX options programmable.
Alternatively, UCX options can be specified through environment variables.
Parameters
----------
options: dict, optional
UCX options send to the underlying UCX library
env_takes_precedence: bool, optional
Whether environment variables takes precedence over the `options`
specified here.
progress_mode: string, optional
If None, thread UCX progress mode is used unless the environment variable
`UCXPY_PROGRESS_MODE` is defined. Otherwise the options are 'blocking',
'polling', 'thread'.
enable_delayed_submission: boolean, optional
If None, delayed submission is disabled unless
`UCXPY_ENABLE_DELAYED_SUBMISSION` is defined with a value other than `0`.
enable_python_future: boolean, optional
If None, request notification via Python futures is disabled unless
`UCXPY_ENABLE_PYTHON_FUTURE` is defined with a value other than `0`.
"""
global _ctx
if _ctx is not None:
raise RuntimeError(
"UCX is already initiated. Call reset() and init() "
"in order to re-initate UCX with new options."
)
options = options.copy()
for k, v in options.items():
env_k = f"UCX_{k}"
env_v = os.environ.get(env_k)
if env_v is not None:
if env_takes_precedence:
options[k] = env_v
logger.debug(
f"Ignoring option {k}={v}; using environment {env_k}={env_v}"
)
else:
logger.debug(
f"Ignoring environment {env_k}={env_v}; using option {k}={v}"
)
_ctx = ApplicationContext(
options,
progress_mode=progress_mode,
enable_delayed_submission=enable_delayed_submission,
enable_python_future=enable_python_future,
)
def reset():
"""Resets the UCX library by shutting down all of UCX.
The library is initiated at next API call.
"""
stop_notifier_thread()
global _ctx
if _ctx is not None:
weakref_ctx = weakref.ref(_ctx)
_ctx = None
gc.collect()
if weakref_ctx() is not None:
msg = (
"Trying to reset UCX but not all Endpoints and/or Listeners "
"are closed(). The following objects are still referencing "
"ApplicationContext: "
)
for o in gc.get_referrers(weakref_ctx()):
msg += "\n %s" % str(o)
raise UCXError(msg)
def stop_notifier_thread():
global _ctx
if _ctx:
_ctx.stop_notifier_thread()
else:
logger.debug("UCX is not initialized.")
def get_ucx_version():
"""Return the version of the underlying UCX installation
Notice, this function doesn't initialize UCX.
Returns
-------
tuple
The version as a tuple e.g. (1, 7, 0)
"""
return ucx_api.get_ucx_version()
def progress():
"""Try to progress the communication layer
Warning, it is illegal to call this from a call-back function such as
the call-back function given to create_listener.
"""
return _get_ctx().worker.progress()
def get_config():
"""Returns all UCX configuration options as a dict.
If UCX is uninitialized, the options returned are the
options used if UCX were to be initialized now.
Notice, this function doesn't initialize UCX.
Returns
-------
dict
The current UCX configuration options
"""
if _ctx is None:
return ucx_api.get_current_options()
else:
return _get_ctx().get_config()
def create_listener(
callback_func,
port=None,
endpoint_error_handling=True,
exchange_peer_info_timeout=5.0,
):
return _get_ctx().create_listener(
callback_func,
port,
endpoint_error_handling=endpoint_error_handling,
exchange_peer_info_timeout=exchange_peer_info_timeout,
)
async def create_endpoint(
ip_address, port, endpoint_error_handling=True, exchange_peer_info_timeout=5.0
):
return await _get_ctx().create_endpoint(
ip_address,
port,
endpoint_error_handling=endpoint_error_handling,
exchange_peer_info_timeout=exchange_peer_info_timeout,
)
async def create_endpoint_from_worker_address(
address,
endpoint_error_handling=True,
):
return await _get_ctx().create_endpoint_from_worker_address(
address,
endpoint_error_handling=endpoint_error_handling,
)
def get_ucp_context_info():
"""Gets information on the current UCX context, obtained from
`ucp_context_print_info`.
"""
return _get_ctx().ucp_context_info()
def get_ucp_worker_info():
"""Gets information on the current UCX worker, obtained from
`ucp_worker_print_info`.
"""
return _get_ctx().ucp_worker_info()
def get_active_transports():
"""Returns a list of all transports that are available and are currently
active in UCX, meaning UCX **may** use them depending on the type of
transfers and how it is configured but is not required to do so.
"""
info = get_ucp_context_info()
resources = re.findall("^#.*resource.*md.*dev.*flags.*$", info, re.MULTILINE)
return set([r.split()[-1].split("/")[0] for r in resources])
def continuous_ucx_progress(event_loop=None):
_get_ctx().continuous_ucx_progress(event_loop=event_loop)
def get_ucp_worker():
return _get_ctx().get_ucp_worker()
def get_worker_address():
return _get_ctx().get_worker_address()
def get_ucx_address_from_buffer(buffer):
return ucx_api.UCXAddress.create_from_buffer(buffer)
async def recv(buffer, tag):
return await _get_ctx().recv(buffer, tag=tag)
# Setting the __doc__
create_listener.__doc__ = ApplicationContext.create_listener.__doc__
create_endpoint.__doc__ = ApplicationContext.create_endpoint.__doc__
continuous_ucx_progress.__doc__ = ApplicationContext.continuous_ucx_progress.__doc__
get_ucp_worker.__doc__ = ApplicationContext.get_ucp_worker.__doc__
stop_notifier_thread.__doc__ = ApplicationContext.stop_notifier_thread.__doc__
| 0 |
rapidsai_public_repos/ucxx/python | rapidsai_public_repos/ucxx/python/ucxx/testing.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import time
from multiprocessing.process import BaseProcess
from typing import Type, Union
def terminate_process(
process: Type[BaseProcess], kill_wait: Union[float, int] = 3.0
) -> None:
"""
Ensure a spawned process is terminated.
Ensure a spawned process is really terminated to prevent the parent process
(such as pytest) from freezing upon exit.
Parameters
----------
process:
The process to be terminated.
kill_wait: float or integer
Maximum time to wait for the kill signal to terminate the process.
Raises
------
RuntimeError
If the process terminated with a non-zero exit code.
ValueError
If the process was still alive after ``kill_wait`` seconds.
"""
# Ensure process doesn't remain alive and hangs pytest
if process.is_alive():
process.kill()
start_time = time.monotonic()
while time.monotonic() - start_time < kill_wait:
if not process.is_alive():
break
if process.is_alive():
process.close()
elif process.exitcode != 0:
raise RuntimeError(
f"Process did not exit cleanly (exit code: {process.exitcode})"
)
def wait_requests(worker, progress_mode, requests):
if not isinstance(requests, list):
requests = [requests]
while not all([r.is_completed() for r in requests]):
if progress_mode == "blocking":
worker.progress_worker_event()
for r in requests:
r.check_error()
| 0 |
rapidsai_public_repos/ucxx/python | rapidsai_public_repos/ucxx/python/ucxx/exceptions.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
from ._lib.libucxx import ( # noqa
UCXAlreadyExistsError,
UCXBufferTooSmallError,
UCXBusyError,
UCXCanceled,
UCXCanceledError,
UCXCloseError,
UCXConfigError,
UCXConnectionResetError,
UCXEndpointTimeoutError,
UCXError,
UCXExceedsLimitError,
UCXFirstEndpointFailureError,
UCXFirstLinkFailureError,
UCXInvalidAddrError,
UCXInvalidParamError,
UCXIOError,
UCXLastEndpointFailureError,
UCXLastLinkFailureError,
UCXMessageTruncatedError,
UCXMsgTruncated,
UCXNoDeviceError,
UCXNoElemError,
UCXNoMemoryError,
UCXNoMessageError,
UCXNoProgressError,
UCXNoResourceError,
UCXNotConnectedError,
UCXNotImplementedError,
UCXOutOfRangeError,
UCXRejectedError,
UCXShmemSegmentError,
UCXSomeConnectsFailedError,
UCXTimedOutError,
UCXUnreachableError,
UCXUnsupportedError,
)
| 0 |
rapidsai_public_repos/ucxx/python | rapidsai_public_repos/ucxx/python/ucxx/__init__.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
"""UCXX: Python bindings for the Unified Communication X library (UCX <www.openucx.org>)"""
import logging
import os
logger = logging.getLogger("ucx")
# Notice, if we have to update environment variables we need to do it
# before importing UCX, which must happen also before the Cython code
# import to prevent UCS unused variable warnings.
if "UCX_MEMTYPE_CACHE" not in os.environ:
# See <https://github.com/openucx/ucx/wiki/NVIDIA-GPU-Support#known-issues>
logger.debug("Setting env UCX_MEMTYPE_CACHE=n, which is required by UCX")
os.environ["UCX_MEMTYPE_CACHE"] = "n"
from . import exceptions, testing # noqa
from ._lib import libucxx # type: ignore
from .core import * # noqa
from .utils import get_address, get_ucxpy_logger # noqa
try:
import pynvml
except ImportError:
pynvml = None
# Setup UCX-Py logger
logger = get_ucxpy_logger()
if "UCX_RNDV_THRESH" not in os.environ:
logger.info("Setting UCX_RNDV_THRESH=8192")
os.environ["UCX_RNDV_THRESH"] = "8192"
if "UCX_RNDV_FRAG_MEM_TYPE" not in os.environ:
logger.info("Setting UCX_RNDV_FRAG_MEM_TYPE=cuda")
os.environ["UCX_RNDV_FRAG_MEM_TYPE"] = "cuda"
if (
pynvml is not None
and "UCX_CUDA_COPY_MAX_REG_RATIO" not in os.environ
and get_ucx_version() >= (1, 12, 0)
):
try:
pynvml.nvmlInit()
device_count = pynvml.nvmlDeviceGetCount()
large_bar1 = [False] * device_count
def _is_mig_device(handle):
try:
pynvml.nvmlDeviceGetMigMode(handle)[0]
except pynvml.NVMLError:
return False
return True
for dev_idx in range(device_count):
handle = pynvml.nvmlDeviceGetHandleByIndex(dev_idx)
# Ignore MIG devices and use rely on UCX's default for now. Increasing
# `UCX_CUDA_COPY_MAX_REG_RATIO` should be thoroughly tested, as it's
# not yet clear whether it would be safe to set `1.0` for those
# instances too.
if _is_mig_device(handle):
continue
total_memory = pynvml.nvmlDeviceGetMemoryInfo(handle).total
bar1_total = pynvml.nvmlDeviceGetBAR1MemoryInfo(handle).bar1Total
if total_memory <= bar1_total:
large_bar1[dev_idx] = True
if all(large_bar1):
logger.info("Setting UCX_CUDA_COPY_MAX_REG_RATIO=1.0")
os.environ["UCX_CUDA_COPY_MAX_REG_RATIO"] = "1.0"
except (
pynvml.NVMLError_LibraryNotFound,
pynvml.NVMLError_DriverNotLoaded,
pynvml.NVMLError_Unknown,
):
pass
if "UCX_MAX_RNDV_RAILS" not in os.environ and get_ucx_version() >= (1, 12, 0):
logger.info("Setting UCX_MAX_RNDV_RAILS=1")
os.environ["UCX_MAX_RNDV_RAILS"] = "1"
__version__ = "0.36.00"
__ucx_version__ = "%d.%d.%d" % get_ucx_version()
if get_ucx_version() < (1, 11, 1):
raise ImportError(
f"Support for UCX {__ucx_version__} has ended. Please upgrade to "
"1.11.1 or newer."
)
| 0 |
rapidsai_public_repos/ucxx/python | rapidsai_public_repos/ucxx/python/ucxx/utils.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import fcntl
import glob
import logging
import multiprocessing as mp
import os
import socket
import struct
import time
import numpy as np
mp = mp.get_context("spawn")
try:
from nvtx import annotate as nvtx_annotate
except ImportError:
# If nvtx module is not installed, `annotate` yields only.
from contextlib import contextmanager
@contextmanager
def nvtx_annotate(message=None, color=None, domain=None):
yield
try:
from dask.utils import format_bytes, format_time, parse_bytes
except ImportError:
def format_time(x):
if x < 1e-6:
return f"{x * 1e9:.3f} ns"
if x < 1e-3:
return f"{x * 1e6:.3f} us"
if x < 1:
return f"{x * 1e3:.3f} ms"
else:
return f"{x:.3f} s"
def format_bytes(x):
"""Return formatted string in B, KiB, MiB, GiB or TiB"""
if x < 1024:
return f"{x} B"
elif x < 1024**2:
return f"{x / 1024:.2f} KiB"
elif x < 1024**3:
return f"{x / 1024**2:.2f} MiB"
elif x < 1024**4:
return f"{x / 1024**3:.2f} GiB"
else:
return f"{x / 1024**4:.2f} TiB"
parse_bytes = None
def print_separator(separator="-", length=80):
"""Print a single separator character multiple times"""
print(separator * length)
def print_key_value(key, value, key_length=25):
"""Print a key and value with fixed key-field length"""
print(f"{key: <{key_length}} | {value}")
def print_multi(values, key_length=25):
"""Print a key and value with fixed key-field length"""
assert isinstance(values, tuple) or isinstance(values, list)
assert len(values) > 1
print_str = "".join(f"{s: <{key_length}} | " for s in values[:-1])
print_str += values[-1]
print(print_str)
def hmean(a):
"""Harmonic mean"""
if len(a):
return 1 / np.mean(1 / a)
else:
return 0
def get_ucxpy_logger():
"""
Get UCX-Py logger with custom formatting
Returns
-------
logger : logging.Logger
Logger object
Examples
--------
>>> logger = get_ucxpy_logger()
>>> logger.warning("Test")
[1585175070.2911468] [dgx12:1054] UCXPY WARNING Test
"""
_level_enum = logging.getLevelName(os.getenv("UCXPY_LOG_LEVEL", "WARNING"))
logger = logging.getLogger("ucx")
# Avoid duplicate logging
logger.propagate = False
class LoggingFilter(logging.Filter):
def filter(self, record):
record.hostname = socket.gethostname()
record.timestamp = str("%.6f" % time.time())
return True
formatter = logging.Formatter(
"[%(timestamp)s] [%(hostname)s:%(process)d] UCXPY %(levelname)s %(message)s"
)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.addFilter(LoggingFilter())
logger.addHandler(handler)
logger.setLevel(_level_enum)
return logger
def get_address(ifname=None):
"""
Get the address associated with a network interface.
Parameters
----------
ifname : str
The network interface name to find the address for.
If None, it uses the value of environment variable `UCXPY_IFNAME`
and if `UCXPY_IFNAME` is not set it defaults to "ib0"
An OSError is raised for invalid interfaces.
Returns
-------
address : str
The inet addr associated with an interface.
Examples
--------
>>> get_address()
'10.33.225.160'
>>> get_address(ifname='lo')
'127.0.0.1'
"""
def _get_address(ifname):
ifname = ifname.encode()
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
return socket.inet_ntoa(
fcntl.ioctl(
s.fileno(), 0x8915, struct.pack("256s", ifname[:15]) # SIOCGIFADDR
)[20:24]
)
def _try_interfaces():
prefix_priority = ["ib", "eth", "en"]
iftypes = {p: [] for p in prefix_priority}
for i in glob.glob("/sys/class/net/*"):
name = i.split("/")[-1]
for p in prefix_priority:
if name.startswith(p):
iftypes[p].append(name)
for p in prefix_priority:
iftype = iftypes[p]
iftype.sort()
for i in iftype:
try:
return _get_address(i)
except OSError:
pass
if ifname is None:
ifname = os.environ.get("UCXPY_IFNAME")
if ifname is not None:
return _get_address(ifname)
else:
return _try_interfaces()
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/application_context.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import logging
import os
import threading
import weakref
from queue import Queue
import ucxx._lib.libucxx as ucx_api
from ucxx._lib.arr import Array
from ucxx.exceptions import UCXMessageTruncatedError
from .continuous_ucx_progress import PollingMode, ThreadMode
from .endpoint import Endpoint
from .exchange_peer_info import exchange_peer_info
from .listener import ActiveClients, Listener, _listener_handler
from .notifier_thread import _notifierThread
from .utils import get_event_loop, hash64bits
logger = logging.getLogger("ucx")
class ApplicationContext:
"""
The context of the Asyncio interface of UCX.
"""
def __init__(
self,
config_dict={},
progress_mode=None,
enable_delayed_submission=None,
enable_python_future=None,
exchange_peer_info_timeout=10.0,
):
self.progress_tasks = []
self.notifier_thread_q = None
self.notifier_thread = None
self._listener_active_clients = ActiveClients()
self._next_listener_id = 0
self.progress_mode = ApplicationContext._check_progress_mode(progress_mode)
enable_delayed_submission = ApplicationContext._check_enable_delayed_submission(
enable_delayed_submission,
self.progress_mode,
)
enable_python_future = ApplicationContext._check_enable_python_future(
enable_python_future, self.progress_mode
)
self.exchange_peer_info_timeout = exchange_peer_info_timeout
# For now, a application context only has one worker
self.context = ucx_api.UCXContext(config_dict)
self.worker = ucx_api.UCXWorker(
self.context,
enable_delayed_submission=enable_delayed_submission,
enable_python_future=enable_python_future,
)
self.start_notifier_thread()
weakref.finalize(self, self.progress_tasks.clear)
# Ensure progress even before Endpoints get created, for example to
# receive messages directly on a worker after a remote endpoint
# connected with `create_endpoint_from_worker_address`.
self.continuous_ucx_progress()
@staticmethod
def _check_progress_mode(progress_mode):
if progress_mode is None:
if "UCXPY_PROGRESS_MODE" in os.environ:
progress_mode = os.environ["UCXPY_PROGRESS_MODE"]
else:
progress_mode = "thread"
valid_progress_modes = ["polling", "thread", "thread-polling"]
if not isinstance(progress_mode, str) or not any(
progress_mode == m for m in valid_progress_modes
):
raise ValueError(
f"Unknown progress mode {progress_mode}, "
"valid modes are: 'blocking', 'polling', 'thread' or 'thread-polling'"
)
return progress_mode
@staticmethod
def _check_enable_delayed_submission(enable_delayed_submission, progress_mode):
if enable_delayed_submission is None:
if "UCXPY_ENABLE_DELAYED_SUBMISSION" in os.environ:
explicit_enable_delayed_submission = (
False
if os.environ["UCXPY_ENABLE_DELAYED_SUBMISSION"] == "0"
else True
)
else:
explicit_enable_delayed_submission = progress_mode.startswith("thread")
else:
explicit_enable_delayed_submission = enable_delayed_submission
if (
not progress_mode.startswith("thread")
and explicit_enable_delayed_submission
):
raise ValueError(
f"Delayed submission requested, but {progress_mode} does not "
"support it, 'thread' or 'thread-polling' progress mode required."
)
return explicit_enable_delayed_submission
@staticmethod
def _check_enable_python_future(enable_python_future, progress_mode):
if enable_python_future is None:
if "UCXPY_ENABLE_PYTHON_FUTURE" in os.environ:
explicit_enable_python_future = (
os.environ["UCXPY_ENABLE_PYTHON_FUTURE"] != "0"
)
else:
explicit_enable_python_future = False
else:
explicit_enable_python_future = enable_python_future
if not progress_mode.startswith("thread") and explicit_enable_python_future:
logger.warning(
f"Notifier thread requested, but {progress_mode} does not "
"support it, using Python wait_yield()."
)
explicit_enable_python_future = False
return explicit_enable_python_future
def start_notifier_thread(self):
if self.worker.is_python_future_enabled():
logger.debug("UCXX_ENABLE_PYTHON available, enabling notifier thread")
loop = get_event_loop()
self.notifier_thread_q = Queue()
self.notifier_thread = threading.Thread(
target=_notifierThread,
args=(loop, self.worker, self.notifier_thread_q),
name="UCX-Py Async Notifier Thread",
)
self.notifier_thread.start()
else:
logger.debug(
"UCXX not compiled with UCXX_ENABLE_PYTHON, disabling notifier thread"
)
def stop_notifier_thread(self):
"""
Stop Python future notifier thread
Stop the notifier thread if context is running with Python future
notification enabled via `UCXPY_ENABLE_PYTHON_FUTURE=1` or
`ucxx.init(..., enable_python_future=True)`.
.. warning:: When the notifier thread is enabled it may be necessary to
explicitly call this method before shutting down the process or
or application, otherwise it may block indefinitely waiting for
the thread to terminate. Executing `ucxx.reset()` will also run
this method, so it's not necessary to have both.
"""
if self.notifier_thread_q and self.notifier_thread:
self.notifier_thread_q.put("shutdown")
while True:
# Having a timeout is required. During the notifier thread shutdown
# it may require the GIL, which will cause a deadlock with the `join()`
# call otherwise.
self.notifier_thread.join(timeout=0.01)
if not self.notifier_thread.is_alive():
break
logger.debug("Notifier thread stopped")
else:
logger.debug("Notifier thread not running")
def create_listener(
self,
callback_func,
port=0,
endpoint_error_handling=True,
exchange_peer_info_timeout=5.0,
):
"""Create and start a listener to accept incoming connections
callback_func is the function or coroutine that takes one
argument -- the Endpoint connected to the client.
Notice, the listening is closed when the returned Listener
goes out of scope thus remember to keep a reference to the object.
Parameters
----------
callback_func: function or coroutine
A callback function that gets invoked when an incoming
connection is accepted
port: int, optional
An unused port number for listening, or `0` to let UCX assign
an unused port.
endpoint_error_handling: boolean, optional
If `True` (default) enable endpoint error handling raising
exceptions when an error occurs, may incur in performance penalties
but prevents a process from terminating unexpectedly that may
happen when disabled. If `False` endpoint endpoint error handling
is disabled.
exchange_peer_info_timeout: float
Timeout in seconds for exchanging peer info. In some cases, exchanging
peer information may hang indefinitely, a timeout prevents that. If the
chosen value is too high it may cause the operation to be stuck for too
long rather than quickly raising a `TimeoutError` that may be recovered
from by the application, but under high-load a higher timeout may
be helpful to prevent exchanging peer info from failing too fast.
Returns
-------
Listener
The new listener. When this object is deleted, the listening stops
"""
self.continuous_ucx_progress()
if port is None:
port = 0
loop = get_event_loop()
logger.info("create_listener() - Start listening on port %d" % port)
listener_id = self._next_listener_id
self._next_listener_id += 1
ret = Listener(
ucx_api.UCXListener.create(
worker=self.worker,
port=port,
cb_func=_listener_handler,
cb_args=(
loop,
callback_func,
self,
endpoint_error_handling,
exchange_peer_info_timeout,
listener_id,
self._listener_active_clients,
),
deliver_endpoint=True,
),
listener_id,
self._listener_active_clients,
)
return ret
async def create_endpoint(
self,
ip_address,
port,
endpoint_error_handling=True,
exchange_peer_info_timeout=5.0,
):
"""Create a new endpoint to a server
Parameters
----------
ip_address: str
IP address of the server the endpoint should connect to
port: int
IP address of the server the endpoint should connect to
endpoint_error_handling: boolean, optional
If `True` (default) enable endpoint error handling raising
exceptions when an error occurs, may incur in performance penalties
but prevents a process from terminating unexpectedly that may
happen when disabled. If `False` endpoint endpoint error handling
is disabled.
exchange_peer_info_timeout: float
Timeout in seconds for exchanging peer info. In some cases, exchanging
peer information may hang indefinitely, a timeout prevents that. If the
chosen value is too high it may cause the operation to be stuck for too
long rather than quickly raising a `TimeoutError` that may be recovered
from by the application, but under high-load a higher timeout may
be helpful to prevent exchanging peer info from failing too fast.
Returns
-------
Endpoint
The new endpoint
"""
self.continuous_ucx_progress()
ucx_ep = ucx_api.UCXEndpoint.create(
self.worker, ip_address, port, endpoint_error_handling
)
if not self.progress_mode.startswith("thread"):
self.worker.progress()
# We create the Endpoint in three steps:
# 1) Generate unique IDs to use as tags
# 2) Exchange endpoint info such as tags
# 3) Use the info to create an endpoint
seed = os.urandom(16)
msg_tag = hash64bits("msg_tag", seed, ucx_ep.handle)
ctrl_tag = hash64bits("ctrl_tag", seed, ucx_ep.handle)
try:
peer_info = await exchange_peer_info(
endpoint=ucx_ep,
msg_tag=msg_tag,
ctrl_tag=ctrl_tag,
listener=False,
stream_timeout=exchange_peer_info_timeout,
)
except UCXMessageTruncatedError:
# A truncated message occurs if the remote endpoint closed before
# exchanging peer info, in that case we should raise the endpoint
# error instead.
ucx_ep.raise_on_error()
tags = {
"msg_send": peer_info["msg_tag"],
"msg_recv": msg_tag,
"ctrl_send": peer_info["ctrl_tag"],
"ctrl_recv": ctrl_tag,
}
ep = Endpoint(endpoint=ucx_ep, ctx=self, tags=tags)
logger.debug(
"create_endpoint() client: %s, error handling: %s, msg-tag-send: %s, "
"msg-tag-recv: %s, ctrl-tag-send: %s, ctrl-tag-recv: %s"
% (
hex(ep._ep.handle),
endpoint_error_handling,
hex(ep._tags["msg_send"]),
hex(ep._tags["msg_recv"]),
hex(ep._tags["ctrl_send"]),
hex(ep._tags["ctrl_recv"]),
)
)
return ep
async def create_endpoint_from_worker_address(
self,
address,
endpoint_error_handling=True,
):
"""Create a new endpoint to a server
Parameters
----------
address: UCXAddress
endpoint_error_handling: boolean, optional
If `True` (default) enable endpoint error handling raising
exceptions when an error occurs, may incur in performance penalties
but prevents a process from terminating unexpectedly that may
happen when disabled. If `False` endpoint endpoint error handling
is disabled.
Returns
-------
Endpoint
The new endpoint
"""
self.continuous_ucx_progress()
ucx_ep = ucx_api.UCXEndpoint.create_from_worker_address(
self.worker,
address,
endpoint_error_handling,
)
if not self.progress_mode.startswith("thread"):
self.worker.progress()
ep = Endpoint(endpoint=ucx_ep, ctx=self, tags=None)
logger.debug(
"create_endpoint() client: %s, error handling: %s"
% (hex(ep._ep.handle), endpoint_error_handling)
)
return ep
def continuous_ucx_progress(self, event_loop=None):
"""Guarantees continuous UCX progress
Use this function to associate UCX progress with an event loop.
Notice, multiple event loops can be associate with UCX progress.
This function is automatically called when calling
`create_listener()` or `create_endpoint()`.
Parameters
----------
event_loop: asyncio.event_loop, optional
The event loop to evoke UCX progress. If None,
`asyncio.get_event_loop()` (`asyncio.new_event_loop()` in
Python 3.10+) is used.
"""
loop = event_loop if event_loop is not None else get_event_loop()
if loop in self.progress_tasks:
return # Progress has already been guaranteed for the current event loop
if self.progress_mode == "thread":
task = ThreadMode(self.worker, loop, polling_mode=False)
elif self.progress_mode == "thread-polling":
task = ThreadMode(self.worker, loop, polling_mode=True)
elif self.progress_mode == "polling":
task = PollingMode(self.worker, loop)
self.progress_tasks.append(task)
def get_ucp_worker(self):
"""Returns the underlying UCP worker handle (ucp_worker_h)
as a Python integer.
"""
return self.worker.handle
def get_config(self):
"""Returns all UCX configuration options as a dict.
Returns
-------
dict
The current UCX configuration options
"""
return self.context.get_config()
def ucp_context_info(self):
"""Return low-level UCX info about this endpoint as a string"""
return self.context.info
def ucp_worker_info(self):
"""Return low-level UCX info about this endpoint as a string"""
return self.worker.info
def get_worker_address(self):
return self.worker.get_address()
# @ucx_api.nvtx_annotate("UCXPY_WORKER_RECV", color="red", domain="ucxpy")
async def recv(self, buffer, tag):
"""Receive directly on worker without a local Endpoint into `buffer`.
Parameters
----------
buffer: exposing the buffer protocol or array/cuda interface
The buffer to receive into. Raise ValueError if buffer
is smaller than nbytes or read-only.
tag: hashable, optional
Set a tag that must match the received message.
"""
if not isinstance(buffer, Array):
buffer = Array(buffer)
nbytes = buffer.nbytes
log = "[Worker Recv] worker: %s, tag: %s, nbytes: %d, type: %s" % (
hex(self.worker.handle),
hex(tag),
nbytes,
type(buffer.obj),
)
logger.debug(log)
req = self.worker.tag_recv(buffer, tag)
return await req.wait()
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/utils_test.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import io
import logging
import os
from contextlib import contextmanager
import numpy as np
import pytest
import ucxx
normal_env = {
"UCX_RNDV_SCHEME": "put_zcopy",
"UCX_MEMTYPE_CACHE": "n",
"UCX_TLS": "rc,cuda_copy,cuda_ipc",
"CUDA_VISIBLE_DEVICES": "0",
}
def set_env():
os.environ.update(normal_env)
def get_num_gpus():
import pynvml
pynvml.nvmlInit()
ngpus = pynvml.nvmlDeviceGetCount()
pynvml.nvmlShutdown()
return ngpus
def get_cuda_devices():
if "CUDA_VISIBLE_DEVICES" in os.environ:
return os.environ["CUDA_VISIBLE_DEVICES"].split(",")
else:
ngpus = get_num_gpus()
return list(range(ngpus))
@contextmanager
def captured_logger(logger, level=logging.INFO, propagate=None):
"""Capture output from the given Logger."""
if isinstance(logger, str):
logger = logging.getLogger(logger)
orig_level = logger.level
orig_handlers = logger.handlers[:]
if propagate is not None:
orig_propagate = logger.propagate
logger.propagate = propagate
sio = io.StringIO()
logger.handlers[:] = [logging.StreamHandler(sio)]
logger.setLevel(level)
try:
yield sio
finally:
logger.handlers[:] = orig_handlers
logger.setLevel(orig_level)
if propagate is not None:
logger.propagate = orig_propagate
def cuda_array(size):
try:
import rmm
return rmm.DeviceBuffer(size=size)
except ImportError:
import numba.cuda
return numba.cuda.device_array((size,), dtype="u1")
async def send(ep, frames):
pytest.importorskip("distributed")
from distributed.utils import nbytes
await ep.send(np.array([len(frames)], dtype=np.uint64))
await ep.send(
np.array(
[hasattr(f, "__cuda_array_interface__") for f in frames], dtype=np.bool
)
)
await ep.send(np.array([nbytes(f) for f in frames], dtype=np.uint64))
# Send frames
for frame in frames:
if nbytes(frame) > 0:
await ep.send(frame)
async def recv(ep):
pytest.importorskip("distributed")
from distributed.comm.utils import from_frames
try:
# Recv meta data
nframes = np.empty(1, dtype=np.uint64)
await ep.recv(nframes)
is_cudas = np.empty(nframes[0], dtype=np.bool)
await ep.recv(is_cudas)
sizes = np.empty(nframes[0], dtype=np.uint64)
await ep.recv(sizes)
except (ucxx.exceptions.UCXCanceledError, ucxx.exceptions.UCXCloseError) as e:
msg = "SOMETHING TERRIBLE HAS HAPPENED IN THE TEST"
raise e(msg)
# Recv frames
frames = []
for is_cuda, size in zip(is_cudas.tolist(), sizes.tolist()):
if size > 0:
if is_cuda:
frame = cuda_array(size)
else:
frame = np.empty(size, dtype=np.uint8)
await ep.recv(frame)
frames.append(frame)
else:
if is_cuda:
frames.append(cuda_array(size))
else:
frames.append(b"")
msg = await from_frames(frames)
return frames, msg
async def am_send(ep, frames):
await ep.am_send(np.array([len(frames)], dtype=np.uint64))
# Send frames
for frame in frames:
await ep.am_send(frame)
async def am_recv(ep):
pytest.importorskip("distributed")
from distributed.comm.utils import from_frames
try:
# Recv meta data
nframes = (await ep.am_recv()).view(np.uint64)
except (ucxx.exceptions.UCXCanceledError, ucxx.exceptions.UCXCloseError) as e:
msg = "SOMETHING TERRIBLE HAS HAPPENED IN THE TEST"
raise e(msg)
# Recv frames
frames = []
for _ in range(nframes[0]):
frame = await ep.am_recv()
frames.append(frame)
msg = await from_frames(frames)
return frames, msg
async def wait_listener_client_handlers(listener):
pass
while listener.active_clients > 0:
await asyncio.sleep(0)
if not ucxx.core._get_ctx().progress_mode.startswith("thread"):
ucxx.progress()
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/exchange_peer_info.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import logging
import struct
from ucxx._lib.arr import Array
from .utils import hash64bits
logger = logging.getLogger("ucx")
async def exchange_peer_info(endpoint, msg_tag, ctrl_tag, listener, stream_timeout=5.0):
"""Help function that exchange endpoint information"""
# Pack peer information incl. a checksum
fmt = "QQQ"
my_info = struct.pack(fmt, msg_tag, ctrl_tag, hash64bits(msg_tag, ctrl_tag))
peer_info = bytearray(len(my_info))
my_info_arr = Array(my_info)
peer_info_arr = Array(peer_info)
# Send/recv peer information. Notice, we force an `await` between the two
# streaming calls (see <https://github.com/rapidsai/ucx-py/pull/509>)
if listener is True:
req = endpoint.stream_send(my_info_arr)
await asyncio.wait_for(req.wait(), timeout=stream_timeout)
req = endpoint.stream_recv(peer_info_arr)
await asyncio.wait_for(req.wait(), timeout=stream_timeout)
else:
req = endpoint.stream_recv(peer_info_arr)
await asyncio.wait_for(req.wait(), timeout=stream_timeout)
req = endpoint.stream_send(my_info_arr)
await asyncio.wait_for(req.wait(), timeout=stream_timeout)
# Unpacking and sanity check of the peer information
ret = {}
(ret["msg_tag"], ret["ctrl_tag"], ret["checksum"]) = struct.unpack(fmt, peer_info)
expected_checksum = hash64bits(ret["msg_tag"], ret["ctrl_tag"])
if expected_checksum != ret["checksum"]:
raise RuntimeError(
f'Checksum invalid! {hex(expected_checksum)} != {hex(ret["checksum"])}'
)
return ret
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/endpoint.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import array
import asyncio
import logging
import ucxx._lib.libucxx as ucx_api
from ucxx._lib.arr import Array
from ucxx._lib.libucxx import UCXCanceled, UCXCloseError, UCXError
from .utils import hash64bits
logger = logging.getLogger("ucx")
class Endpoint:
"""An endpoint represents a connection to a peer
Please use `create_listener()` and `create_endpoint()`
to create an Endpoint.
"""
def __init__(self, endpoint, ctx, tags=None):
from .application_context import ApplicationContext
if not isinstance(endpoint, ucx_api.UCXEndpoint):
raise ValueError("endpoint must be an instance of UCXEndpoint")
if not isinstance(ctx, ApplicationContext):
raise ValueError("ctx must be an instance of ApplicationContext")
self._ep = endpoint
self._ctx = ctx
self._send_count = 0 # Number of calls to self.send()
self._recv_count = 0 # Number of calls to self.recv()
self._finished_recv_count = 0 # Number of returned (finished) self.recv() calls
self._shutting_down_peer = False # Told peer to shutdown
self._close_after_n_recv = None
self._tags = tags
def __del__(self):
self.abort()
@property
def uid(self):
"""The unique ID of the underlying UCX endpoint"""
return self._ep.handle
def closed(self):
"""Is this endpoint closed?"""
return self._ep is None or not self._ep.is_alive()
def abort(self):
"""Close the communication immediately and abruptly.
Useful in destructors or generators' ``finally`` blocks.
Notice, this functions doesn't signal the connected peer to close.
To do that, use `Endpoint.close()`
"""
if self._ep is not None:
logger.debug("Endpoint.abort(): 0x%x" % self.uid)
self._ep.close()
self._ep = None
self._ctx = None
async def close(self):
"""Close the endpoint cleanly.
This will attempt to flush outgoing buffers before actually
closing the underlying UCX endpoint.
"""
if self.closed():
self.abort()
return
try:
# Making sure we only tell peer to shutdown once
if self._shutting_down_peer:
return
self._shutting_down_peer = True
finally:
if not self.closed():
# Give all current outstanding send() calls a chance to return
if not self._ctx.progress_mode.startswith("thread"):
self._ctx.worker.progress()
await asyncio.sleep(0)
self.abort()
async def am_send(self, buffer):
"""Send `buffer` to connected peer via active messages.
Parameters
----------
buffer: exposing the buffer protocol or array/cuda interface
The buffer to send. Raise ValueError if buffer is smaller
than nbytes.
"""
self._ep.raise_on_error()
if self.closed():
raise UCXCloseError("Endpoint closed")
if not isinstance(buffer, Array):
buffer = Array(buffer)
# Optimization to eliminate producing logger string overhead
if logger.isEnabledFor(logging.DEBUG):
nbytes = buffer.nbytes
log = "[AM Send #%03d] ep: 0x%x, nbytes: %d, type: %s" % (
self._send_count,
self.uid,
nbytes,
type(buffer.obj),
)
logger.debug(log)
self._send_count += 1
try:
request = self._ep.am_send(buffer)
return await request.wait()
except UCXCanceled as e:
# If self._ep has already been closed and destroyed, we reraise the
# UCXCanceled exception.
if self._ep is None:
raise e
# @ucx_api.nvtx_annotate("UCXPY_SEND", color="green", domain="ucxpy")
async def send(self, buffer, tag=None, force_tag=False):
"""Send `buffer` to connected peer.
Parameters
----------
buffer: exposing the buffer protocol or array/cuda interface
The buffer to send. Raise ValueError if buffer is smaller
than nbytes.
tag: hashable, optional
Set a tag that the receiver must match. Currently the tag
is hashed together with the internal Endpoint tag that is
agreed with the remote end at connection time. To enforce
using the user tag, make sure to specify `force_tag=True`.
force_tag: bool
If true, force using `tag` as is, otherwise the value
specified with `tag` (if any) will be hashed with the
internal Endpoint tag.
"""
self._ep.raise_on_error()
if self.closed():
raise UCXCloseError("Endpoint closed")
if not isinstance(buffer, Array):
buffer = Array(buffer)
if tag is None:
tag = self._tags["msg_send"]
elif not force_tag:
tag = hash64bits(self._tags["msg_send"], hash(tag))
# Optimization to eliminate producing logger string overhead
if logger.isEnabledFor(logging.DEBUG):
nbytes = buffer.nbytes
log = "[Send #%03d] ep: 0x%x, tag: 0x%x, nbytes: %d, type: %s" % (
self._send_count,
self.uid,
tag,
nbytes,
type(buffer.obj),
)
logger.debug(log)
self._send_count += 1
try:
request = self._ep.tag_send(buffer, tag)
return await request.wait()
except UCXCanceled as e:
# If self._ep has already been closed and destroyed, we reraise the
# UCXCanceled exception.
if self._ep is None:
raise e
async def send_multi(self, buffers, tag=None, force_tag=False):
"""Send `buffer` to connected peer.
Parameters
----------
buffer: exposing the buffer protocol or array/cuda interface
The buffer to send. Raise ValueError if buffer is smaller
than nbytes.
tag: hashable, optional
Set a tag that the receiver must match. Currently the tag
is hashed together with the internal Endpoint tag that is
agreed with the remote end at connection time. To enforce
using the user tag, make sure to specify `force_tag=True`.
force_tag: bool
If true, force using `tag` as is, otherwise the value
specified with `tag` (if any) will be hashed with the
internal Endpoint tag.
"""
self._ep.raise_on_error()
if self.closed():
raise UCXCloseError("Endpoint closed")
if not (isinstance(buffers, list) or isinstance(buffers, tuple)):
raise ValueError("The `buffers` argument must be a `list` or `tuple`")
buffers = tuple([Array(b) if not isinstance(b, Array) else b for b in buffers])
if tag is None:
tag = self._tags["msg_send"]
elif not force_tag:
tag = hash64bits(self._tags["msg_send"], hash(tag))
# Optimization to eliminate producing logger string overhead
if logger.isEnabledFor(logging.DEBUG):
log = "[Send Multi #%03d] ep: 0x%x, tag: 0x%x, nbytes: %s, type: %s" % (
self._send_count,
self.uid,
tag,
tuple([b.nbytes for b in buffers]), # nbytes,
tuple([type(b.obj) for b in buffers]),
)
logger.debug(log)
self._send_count += 1
try:
buffer_requests = self._ep.tag_send_multi(buffers, tag)
await buffer_requests.wait()
buffer_requests.check_error()
except UCXCanceled as e:
# If self._ep has already been closed and destroyed, we reraise the
# UCXCanceled exception.
if self._ep is None:
raise e
async def send_obj(self, obj, tag=None):
"""Send `obj` to connected peer that calls `recv_obj()`.
The transfer includes an extra message containing the size of `obj`,
which increases the overhead slightly.
Parameters
----------
obj: exposing the buffer protocol or array/cuda interface
The object to send.
tag: hashable, optional
Set a tag that the receiver must match.
Example
-------
>>> await ep.send_obj(pickle.dumps([1,2,3]))
"""
if not isinstance(obj, Array):
obj = Array(obj)
nbytes = Array(array.array("Q", [obj.nbytes]))
await self.send(nbytes, tag=tag)
await self.send(obj, tag=tag)
async def am_recv(self):
"""Receive from connected peer via active messages."""
if not self._ep.am_probe():
self._ep.raise_on_error()
if self.closed():
raise UCXCloseError("Endpoint closed")
# Optimization to eliminate producing logger string overhead
if logger.isEnabledFor(logging.DEBUG):
log = "[AM Recv #%03d] ep: 0x%x" % (
self._recv_count,
self.uid,
)
logger.debug(log)
self._recv_count += 1
req = self._ep.am_recv()
await req.wait()
buffer = req.get_recv_buffer()
if logger.isEnabledFor(logging.DEBUG):
log = "[AM Recv Completed #%03d] ep: 0x%x, nbytes: %d, type: %s" % (
self._recv_count,
self.uid,
buffer.nbytes,
type(buffer),
)
logger.debug(log)
self._finished_recv_count += 1
if (
self._close_after_n_recv is not None
and self._finished_recv_count >= self._close_after_n_recv
):
self.abort()
return buffer
# @ucx_api.nvtx_annotate("UCXPY_RECV", color="red", domain="ucxpy")
async def recv(self, buffer, tag=None, force_tag=False):
"""Receive from connected peer into `buffer`.
Parameters
----------
buffer: exposing the buffer protocol or array/cuda interface
The buffer to receive into. Raise ValueError if buffer
is smaller than nbytes or read-only.
tag: hashable, optional
Set a tag that must match the received message. Currently
the tag is hashed together with the internal Endpoint tag
that is agreed with the remote end at connection time.
To enforce using the user tag, make sure to specify
`force_tag=True`.
force_tag: bool
If true, force using `tag` as is, otherwise the value
specified with `tag` (if any) will be hashed with the
internal Endpoint tag.
"""
if tag is None:
tag = self._tags["msg_recv"]
elif not force_tag:
tag = hash64bits(self._tags["msg_recv"], hash(tag))
try:
self._ep.raise_on_error()
if self.closed():
raise UCXCloseError("Endpoint closed")
except Exception as e:
# Only probe the worker as last resort. To be reliable, probing for the tag
# requires progressing the worker, thus prevent that happening too often.
if not self._ctx.worker.tag_probe(tag):
raise e
if not isinstance(buffer, Array):
buffer = Array(buffer)
# Optimization to eliminate producing logger string overhead
if logger.isEnabledFor(logging.DEBUG):
nbytes = buffer.nbytes
log = "[Recv #%03d] ep: 0x%x, tag: 0x%x, nbytes: %d, type: %s" % (
self._recv_count,
self.uid,
tag,
nbytes,
type(buffer.obj),
)
logger.debug(log)
self._recv_count += 1
req = self._ep.tag_recv(buffer, tag)
ret = await req.wait()
self._finished_recv_count += 1
if (
self._close_after_n_recv is not None
and self._finished_recv_count >= self._close_after_n_recv
):
self.abort()
return ret
async def recv_multi(self, tag=None, force_tag=False):
"""Receive from connected peer into `buffer`.
Parameters
----------
tag: hashable, optional
Set a tag that must match the received message. Currently
the tag is hashed together with the internal Endpoint tag
that is agreed with the remote end at connection time.
To enforce using the user tag, make sure to specify
`force_tag=True`.
force_tag: bool
If true, force using `tag` as is, otherwise the value
specified with `tag` (if any) will be hashed with the
internal Endpoint tag.
"""
if tag is None:
tag = self._tags["msg_recv"]
elif not force_tag:
tag = hash64bits(self._tags["msg_recv"], hash(tag))
try:
self._ep.raise_on_error()
if self.closed():
raise UCXCloseError("Endpoint closed")
except Exception as e:
# Only probe the worker as last resort. To be reliable, probing for the tag
# requires progressing the worker, thus prevent that happening too often.
if not self._ctx.worker.tag_probe(tag):
raise e
# Optimization to eliminate producing logger string overhead
if logger.isEnabledFor(logging.DEBUG):
log = "[Recv Multi #%03d] ep: 0x%x, tag: 0x%x" % (
self._recv_count,
self.uid,
tag,
)
logger.debug(log)
self._recv_count += 1
buffer_requests = self._ep.tag_recv_multi(tag)
await buffer_requests.wait()
buffer_requests.check_error()
for r in buffer_requests.get_requests():
r.check_error()
buffers = buffer_requests.get_py_buffers()
self._finished_recv_count += 1
if (
self._close_after_n_recv is not None
and self._finished_recv_count >= self._close_after_n_recv
):
self.abort()
return buffers
async def recv_obj(self, tag=None, allocator=bytearray):
"""Receive from connected peer that calls `send_obj()`.
As opposed to `recv()`, this function returns the received object.
Data is received into a buffer allocated by `allocator`.
The transfer includes an extra message containing the size of `obj`,
which increses the overhead slightly.
Parameters
----------
tag: hashable, optional
Set a tag that must match the received message. Notice, currently
UCX-Py doesn't support a "any tag" thus `tag=None` only matches a
send that also sets `tag=None`.
allocator: callabale, optional
Function to allocate the received object. The function should
take the number of bytes to allocate as input and return a new
buffer of that size as output.
Example
-------
>>> await pickle.loads(ep.recv_obj())
"""
nbytes = array.array("Q", [0])
await self.recv(nbytes, tag=tag)
nbytes = nbytes[0]
ret = allocator(nbytes)
await self.recv(ret, tag=tag)
return ret
def get_ucp_worker(self):
"""Returns the underlying UCP worker handle (ucp_worker_h)
as a Python integer.
"""
return self._ctx.worker.handle
def get_ucp_endpoint(self):
"""Returns the underlying UCP endpoint handle (ucp_ep_h)
as a Python integer.
"""
return self._ep.handle
def close_after_n_recv(self, n, count_from_ep_creation=False):
"""Close the endpoint after `n` received messages.
Parameters
----------
n: int
Number of messages to received before closing the endpoint.
count_from_ep_creation: bool, optional
Whether to count `n` from this function call (default) or
from the creation of the endpoint.
"""
if not count_from_ep_creation:
n += self._finished_recv_count # Make `n` absolute
if self._close_after_n_recv is not None:
raise UCXError(
"close_after_n_recv has already been set to: %d (abs)"
% self._close_after_n_recv
)
if n == self._finished_recv_count:
self.abort()
elif n > self._finished_recv_count:
self._close_after_n_recv = n
else:
raise UCXError(
"`n` cannot be less than current recv_count: %d (abs) < %d (abs)"
% (n, self._finished_recv_count)
)
def set_close_callback(self, callback_func, cb_args=None, cb_kwargs=None):
"""Register a user callback function to be called on Endpoint's closing.
Allows the user to register a callback function to be called when the
Endpoint's error callback is called, or during its finalizer if the error
callback is never called.
Once the callback is called, it's not possible to send any more messages.
However, receiving messages may still be possible, as UCP may still have
incoming messages in transit.
Parameters
----------
callback_func: callable
The callback function to be called when the Endpoint's error callback
is called, otherwise called on its finalizer.
cb_args: tuple or None
The arguments to be passed to the callback function as a `tuple`, or
`None` (default).
cb_kwargs: dict or None
The keyword arguments to be passed to the callback function as a
`dict`, or `None` (default).
Example
>>> ep.set_close_callback(lambda: print("Executing close callback"))
"""
self._ep.set_close_callback(callback_func, cb_args, cb_kwargs)
def is_alive(self):
return self._ep.is_alive()
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/notifier_thread.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import logging
from concurrent.futures import TimeoutError
import ucxx._lib.libucxx as ucx_api
logger = logging.getLogger("ucx")
async def _run_request_notifier(worker):
return worker.run_request_notifier()
async def _notifier_coroutine(worker):
worker.populate_python_futures_pool()
finished = worker.wait_request_notifier()
if finished:
return True
# Notify all enqueued waiting futures
await _run_request_notifier(worker)
return False
def _notifierThread(event_loop, worker, q):
logger.debug("Starting Notifier Thread")
asyncio.set_event_loop(event_loop)
shutdown = False
while True:
worker.populate_python_futures_pool()
state = worker.wait_request_notifier(period_ns=int(1e9)) # 1 second timeout
if not q.empty():
q_val = q.get()
if q_val == "shutdown":
logger.debug("_notifierThread shutting down")
shutdown = True
else:
logger.warning(
f"_notifierThread got unknown message from IPC queue: {q_val}"
)
if state == ucx_api.PythonRequestNotifierWaitState.Shutdown or shutdown is True:
return
elif state == ucx_api.PythonRequestNotifierWaitState.Timeout:
continue
# Notify all enqueued waiting futures
task = asyncio.run_coroutine_threadsafe(
_run_request_notifier(worker), event_loop
)
try:
task.result(0.01)
except TimeoutError:
task.cancel()
logger.debug("Notifier Thread Result Timeout")
except Exception as e:
logger.debug(f"Notifier Thread Result Exception: {e}")
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/__init__.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
from .application_context import ApplicationContext # noqa
from .endpoint import Endpoint # noqa
from .listener import Listener # noqa
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/continuous_ucx_progress.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
class ProgressTask(object):
def __init__(self, worker, event_loop):
"""Creates a task that keeps calling worker.progress()
Notice, class and created task is carefull not to hold a
reference to `worker` so that a danling progress task will
not prevent `worker` to be garbage collected.
Parameters
----------
worker: UCXWorker
The UCX worker context to progress
event_loop: asyncio.EventLoop
The event loop to do progress in.
"""
self.worker = worker
self.event_loop = event_loop
self.asyncio_task = None
def __del__(self):
if self.asyncio_task is not None:
# FIXME: This does not work, the cancellation must be awaited.
# Running with polling mode will always cause
# `Task was destroyed but it is pending!` errors at ucxx.reset().
self.asyncio_task.cancel()
# Hash and equality is based on the event loop
def __hash__(self):
return hash(self.event_loop)
def __eq__(self, other):
return hash(self) == hash(other)
def _create_context():
import numba.cuda
numba.cuda.current_context()
class ThreadMode(ProgressTask):
def __init__(self, worker, event_loop, polling_mode=False):
super().__init__(worker, event_loop)
worker.set_progress_thread_start_callback(_create_context)
worker.start_progress_thread(polling_mode=polling_mode, epoll_timeout=1)
def __del__(self):
self.worker.stop_progress_thread()
class PollingMode(ProgressTask):
def __init__(self, worker, event_loop):
super().__init__(worker, event_loop)
self.asyncio_task = event_loop.create_task(self._progress_task())
self.worker.init_blocking_progress_mode()
async def _progress_task(self):
"""This helper function maintains a UCX progress loop."""
while True:
worker = self.worker
if worker is None:
return
worker.progress()
# Give other co-routines a chance to run.
await asyncio.sleep(0)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/listener.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import logging
import os
import threading
import ucxx._lib.libucxx as ucx_api
from ucxx.exceptions import UCXMessageTruncatedError
from .endpoint import Endpoint
from .exchange_peer_info import exchange_peer_info
from .utils import hash64bits
logger = logging.getLogger("ucx")
class ActiveClients:
"""
Handle number of active clients on `Listener`.
Each `Listener` contains a unique ID that can be used to increment/decrement the
number of currently active client handlers. Useful to warn when the `Listener` is
being destroyed but callbacks handling clients have not yet completed, which may
lead to errors as the `Listener` most likely ended prematurely.
"""
def __init__(self):
self._locks = dict()
self._active_clients = dict()
def add_listener(self, id: int) -> None:
if id in self._active_clients:
raise ValueError("Listener {id} is already registered in ActiveClients.")
self._locks[id] = threading.Lock()
self._active_clients[id] = 0
def remove_listener(self, id: int) -> None:
with self._locks[id]:
active_clients = self.get_active(id)
if active_clients > 0:
raise RuntimeError(
"Listener {id} is being removed from ActiveClients, but "
f"{active_clients} active client(s) is(are) still accounted for."
)
del self._locks[id]
del self._active_clients[id]
def inc(self, id: int) -> None:
with self._locks[id]:
self._active_clients[id] += 1
def dec(self, id: int) -> None:
with self._locks[id]:
if self._active_clients[id] == 0:
raise ValueError(f"There are no active clients for listener {id}")
self._active_clients[id] -= 1
def get_active(self, id: int) -> int:
return self._active_clients[id]
class Listener:
"""A handle to the listening service started by `create_listener()`
The listening continues as long as this object exist or `.close()` is called.
Please use `create_listener()` to create an Listener.
"""
def __init__(self, listener, id, active_clients):
if not isinstance(listener, ucx_api.UCXListener):
raise ValueError("listener must be an instance of UCXListener")
self._listener = listener
active_clients.add_listener(id)
self._id = id
self._active_clients = active_clients
def __del__(self):
try:
self._active_clients.remove_listener(self._id)
except RuntimeError:
active_clients = self._active_clients.get_active(self._id)
logger.warning(
f"Listener object is being destroyed, but {active_clients} client "
"handler(s) is(are) still alive. This usually indicates the Listener "
"was prematurely destroyed."
)
def closed(self):
"""Is the listener closed?"""
return self._listener is None
@property
def ip(self):
"""The listening network IP address"""
return self._listener.ip
@property
def port(self):
"""The listening network port"""
return self._listener.port
@property
def active_clients(self):
return self._active_clients.get_active(self._id)
def close(self):
"""Closing the listener"""
self._listener = None
async def _listener_handler_coroutine(
conn_request,
ctx,
func,
endpoint_error_handling,
exchange_peer_info_timeout,
id,
active_clients,
):
# def _listener_handler_coroutine(
# conn_request, ctx, func, endpoint_error_handling, id, active_clients
# ):
# We create the Endpoint in five steps:
# 1) Create endpoint from conn_request
# 2) Generate unique IDs to use as tags
# 3) Exchange endpoint info such as tags
# 4) Setup control receive callback
# 5) Execute the listener's callback function
active_clients.inc(id)
endpoint = conn_request
seed = os.urandom(16)
msg_tag = hash64bits("msg_tag", seed, endpoint.handle)
ctrl_tag = hash64bits("ctrl_tag", seed, endpoint.handle)
try:
peer_info = await exchange_peer_info(
endpoint=endpoint,
msg_tag=msg_tag,
ctrl_tag=ctrl_tag,
listener=True,
stream_timeout=exchange_peer_info_timeout,
)
except UCXMessageTruncatedError:
# A truncated message occurs if the remote endpoint closed before
# exchanging peer info, in that case we should raise the endpoint
# error instead.
endpoint.raise_on_error()
tags = {
"msg_send": peer_info["msg_tag"],
"msg_recv": msg_tag,
"ctrl_send": peer_info["ctrl_tag"],
"ctrl_recv": ctrl_tag,
}
ep = Endpoint(endpoint=endpoint, ctx=ctx, tags=tags)
logger.debug(
"_listener_handler() server: %s, error handling: %s, msg-tag-send: %s, "
"msg-tag-recv: %s, ctrl-tag-send: %s, ctrl-tag-recv: %s"
% (
hex(endpoint.handle),
endpoint_error_handling,
hex(ep._tags["msg_send"]),
hex(ep._tags["msg_recv"]),
hex(ep._tags["ctrl_send"]),
hex(ep._tags["ctrl_recv"]),
)
)
# Removing references here to avoid delayed clean up
del ctx
# Finally, we call `func`
if asyncio.iscoroutinefunction(func):
try:
await func(ep)
except Exception as e:
logger.error(f"Uncatched listener callback error {type(e)}: {e}")
else:
func(ep)
active_clients.dec(id)
# Ensure `ep` is destroyed and `__del__` is called
del ep
def _listener_handler(
conn_request,
event_loop,
callback_func,
ctx,
endpoint_error_handling,
exchange_peer_info_timeout,
id,
active_clients,
):
asyncio.run_coroutine_threadsafe(
_listener_handler_coroutine(
conn_request,
ctx,
callback_func,
endpoint_error_handling,
exchange_peer_info_timeout,
id,
active_clients,
),
event_loop,
)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/utils.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import hashlib
import multiprocessing as mp
mp = mp.get_context("spawn")
def get_event_loop():
"""
Get running or create new event loop
In Python 3.10, the behavior of `get_event_loop()` is deprecated and in
the future it will be an alias of `get_running_loop()`. In several
situations, UCX-Py needs to create a new event loop, so this function
will remain for now as an alternative to the behavior of `get_event_loop()`
from Python < 3.10, returning the `get_running_loop()` if an event loop
exists, or returning a new one with `new_event_loop()` otherwise.
"""
try:
return asyncio.get_running_loop()
except RuntimeError:
return asyncio.new_event_loop()
def hash64bits(*args):
"""64 bit unsigned hash of `args`"""
# 64 bits hexdigest
h = hashlib.sha1(bytes(repr(args), "utf-8")).hexdigest()[:16]
# Convert to an integer and return
return int(h, 16)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_benchmark_cluster.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import tempfile
from itertools import chain
import numpy as np
import pytest
from ucxx.benchmarks.utils import _run_cluster_server, _run_cluster_workers
async def _worker(rank, eps, args):
futures = []
# Send my rank to all others
for ep in eps.values():
futures.append(ep.send(np.array([rank], dtype="u4")))
# Recv from all others
result = np.empty(len(eps.values()), dtype="u4")
futures += list(ep.recv(result[i : i + 1]) for i, ep in enumerate(eps.values()))
# Wait for transfers to complete
await asyncio.gather(*futures)
# We expect to get the sum of all ranks excluding ours
expect = sum(range(len(eps) + 1)) - rank
assert expect == result.sum()
@pytest.mark.asyncio
async def test_benchmark_cluster(n_chunks=1, n_nodes=2, n_workers=2):
server_file = tempfile.NamedTemporaryFile()
server, server_ret = _run_cluster_server(server_file.name, n_nodes * n_workers)
# Wait for server to become available
with open(server_file.name, "r") as f:
while len(f.read()) == 0:
pass
workers = list(
chain.from_iterable(
_run_cluster_workers(server_file.name, n_chunks, n_workers, i, _worker)
for i in range(n_nodes)
)
)
for worker in workers:
worker.join()
assert not worker.exitcode
server.join()
assert not server.exitcode
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_send_recv_multi.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import pytest
from ucxx._lib_async.utils_test import wait_listener_client_handlers
import ucxx
np = pytest.importorskip("numpy")
msg_sizes = [2**i for i in range(0, 25, 4)]
# multi_sizes = [0, 1, 2, 3, 4, 8]
multi_sizes = [1, 2, 3, 4, 8]
dtypes = ["|u1", "<i8", "f8"]
def make_echo_server():
"""
Returns an echo server that calls the function `create_empty_data(nbytes)`
to create the data container.`
"""
async def echo_server(ep):
"""
Basic echo server for sized messages.
We expect the other endpoint to follow the pattern::
# size of the real message (in bytes)
>>> await ep.send(msg_size)
>>> await ep.send(msg) # send the real message
>>> await ep.recv(responds) # receive the echo
"""
msg = await ep.recv_multi()
await ep.send_multi(msg)
await ep.close()
return echo_server
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
@pytest.mark.parametrize("multi_size", multi_sizes)
async def test_send_recv_bytes(size, multi_size):
send_msg = [bytearray(b"m" * size)] * multi_size
listener = ucxx.create_listener(make_echo_server())
client = await ucxx.create_endpoint(ucxx.get_address(), listener.port)
await client.send_multi(send_msg)
recv_msg = await client.recv_multi()
for r, s in zip(recv_msg, send_msg):
np.testing.assert_array_equal(r, s)
await client.close()
await wait_listener_client_handlers(listener)
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
@pytest.mark.parametrize("multi_size", multi_sizes)
@pytest.mark.parametrize("dtype", dtypes)
async def test_send_recv_numpy(size, multi_size, dtype):
send_msg = [np.arange(size, dtype=dtype)] * multi_size
listener = ucxx.create_listener(make_echo_server())
client = await ucxx.create_endpoint(ucxx.get_address(), listener.port)
await client.send_multi(send_msg)
recv_msg = await client.recv_multi()
for r, s in zip(recv_msg, send_msg):
np.testing.assert_array_equal(r.view(dtype), s)
await wait_listener_client_handlers(listener)
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
@pytest.mark.parametrize("multi_size", multi_sizes)
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.flaky(reruns=3)
async def test_send_recv_cupy(size, multi_size, dtype):
cupy = pytest.importorskip("cupy")
send_msg = [cupy.arange(size, dtype=dtype)] * multi_size
listener = ucxx.create_listener(make_echo_server())
client = await ucxx.create_endpoint(ucxx.get_address(), listener.port)
await client.send_multi(send_msg)
recv_msg = await client.recv_multi()
for r, s in zip(recv_msg, send_msg):
cupy.testing.assert_array_equal(cupy.asarray(r).view(dtype), cupy.asarray(s))
await wait_listener_client_handlers(listener)
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
@pytest.mark.parametrize("multi_size", multi_sizes)
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.flaky(reruns=3)
async def test_send_recv_numba(size, multi_size, dtype):
cuda = pytest.importorskip("numba.cuda")
ary = np.arange(size, dtype=dtype)
send_msg = [cuda.to_device(ary)] * multi_size
listener = ucxx.create_listener(make_echo_server())
client = await ucxx.create_endpoint(ucxx.get_address(), listener.port)
await client.send_multi(send_msg)
recv_msg = await client.recv_multi()
for r, s in zip(recv_msg, send_msg):
np.testing.assert_array_equal(
r.copy_to_host().view(dtype), s.copy_to_host().view(dtype)
)
await wait_listener_client_handlers(listener)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_custom_send_recv.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import pickle
import numpy as np
import pytest
import ucxx
cudf = pytest.importorskip("cudf")
distributed = pytest.importorskip("distributed")
cuda = pytest.importorskip("numba.cuda")
@pytest.mark.asyncio
@pytest.mark.parametrize(
"g",
[
lambda cudf: cudf.Series([1, 2, 3]),
lambda cudf: cudf.Series([1, 2, 3], index=[4, 5, 6]),
lambda cudf: cudf.Series([1, None, 3]),
lambda cudf: cudf.Series(range(2**13)),
lambda cudf: cudf.DataFrame({"a": np.random.random(1200000)}),
lambda cudf: cudf.DataFrame({"a": range(2**20)}),
lambda cudf: cudf.DataFrame({"a": range(2**26)}),
lambda cudf: cudf.Series(),
lambda cudf: cudf.DataFrame(),
lambda cudf: cudf.DataFrame({"a": [], "b": []}),
lambda cudf: cudf.DataFrame({"a": [1.0], "b": [2.0]}),
lambda cudf: cudf.DataFrame(
{"a": ["a", "b", "c", "d"], "b": ["a", "b", "c", "d"]}
),
lambda cudf: cudf.datasets.timeseries(), # ts index with ints, cats, floats
],
)
async def test_send_recv_cudf(event_loop, g):
from distributed.utils import nbytes
class UCX:
def __init__(self, ep):
self.ep = ep
async def write(self, cdf):
header, _frames = cdf.serialize()
frames = [pickle.dumps(header)] + _frames
# Send meta data
await self.ep.send(np.array([len(frames)], dtype=np.uint64))
await self.ep.send(
np.array(
[hasattr(f, "__cuda_array_interface__") for f in frames],
dtype=bool,
)
)
await self.ep.send(np.array([nbytes(f) for f in frames], dtype=np.uint64))
# Send frames
for frame in frames:
if nbytes(frame) > 0:
await self.ep.send(frame)
async def read(self):
try:
# Recv meta data
nframes = np.empty(1, dtype=np.uint64)
await self.ep.recv(nframes)
is_cudas = np.empty(nframes[0], dtype=bool)
await self.ep.recv(is_cudas)
sizes = np.empty(nframes[0], dtype=np.uint64)
await self.ep.recv(sizes)
except (
ucxx.exceptions.UCXCanceledError,
ucxx.exceptions.UCXCloseError,
) as e:
msg = "SOMETHING TERRIBLE HAS HAPPENED IN THE TEST"
raise e(msg)
else:
# Recv frames
frames = []
for is_cuda, size in zip(is_cudas.tolist(), sizes.tolist()):
if size > 0:
if is_cuda:
frame = cuda.device_array((size,), dtype=np.uint8)
else:
frame = np.empty(size, dtype=np.uint8)
await self.ep.recv(frame)
frames.append(frame)
else:
if is_cuda:
frames.append(cuda.device_array((0,), dtype=np.uint8))
else:
frames.append(b"")
return frames
class UCXListener:
def __init__(self):
self.comm = None
def start(self):
async def serve_forever(ep):
ucx = UCX(ep)
self.comm = ucx
self.ucxx_server = ucxx.create_listener(serve_forever)
uu = UCXListener()
uu.start()
uu.address = ucxx.get_address()
uu.client = await ucxx.create_endpoint(uu.address, uu.ucxx_server.port)
ucx = UCX(uu.client)
await asyncio.sleep(0.2)
msg = g(cudf)
frames, _ = await asyncio.gather(uu.comm.read(), ucx.write(msg))
ucx_header = pickle.loads(frames[0])
cudf_buffer = frames[1:]
typ = type(msg)
res = typ.deserialize(ucx_header, cudf_buffer)
from cudf.testing._utils import assert_eq
assert_eq(res, msg)
await uu.comm.ep.close()
await uu.client.close()
assert uu.client.closed()
assert uu.comm.ep.closed()
del uu.ucxx_server
ucxx.reset()
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_send_recv_two_workers.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import multiprocessing
import os
import random
import numpy as np
import pytest
from ucxx._lib_async.utils import get_event_loop
from ucxx._lib_async.utils_test import (
am_recv,
am_send,
get_cuda_devices,
get_num_gpus,
recv,
send,
)
import ucxx as ucxx
cupy = pytest.importorskip("cupy")
rmm = pytest.importorskip("rmm")
distributed = pytest.importorskip("distributed")
cloudpickle = pytest.importorskip("cloudpickle")
ITERATIONS = 30
async def get_ep(name, port):
addr = ucxx.get_address()
ep = await ucxx.create_endpoint(addr, port)
return ep
def register_am_allocators():
ucxx.register_am_allocator(lambda n: np.empty(n, dtype=np.uint8), "host")
ucxx.register_am_allocator(lambda n: rmm.DeviceBuffer(size=n), "cuda")
def client(port, func, comm_api):
# wait for server to come up
# receive cudf object
# deserialize
# assert deserialized msg is cdf
# send receipt
from distributed.utils import nbytes
ucxx.init()
if comm_api == "am":
register_am_allocators()
# must create context before importing
# cudf/cupy/etc
async def read():
await asyncio.sleep(1)
ep = await get_ep("client", port)
msg = None
import cupy
cupy.cuda.set_allocator(None)
for i in range(ITERATIONS):
print(f"Client iteration {i}")
if comm_api == "tag":
frames, msg = await recv(ep)
else:
frames, msg = await am_recv(ep)
close_msg = b"shutdown listener"
if comm_api == "tag":
close_msg_size = np.array([len(close_msg)], dtype=np.uint64)
await ep.send(close_msg_size)
await ep.send(close_msg)
else:
await ep.am_send(close_msg)
print("Shutting Down Client...")
return msg["data"]
rx_cuda_obj = get_event_loop().run_until_complete(read())
rx_cuda_obj + rx_cuda_obj
num_bytes = nbytes(rx_cuda_obj)
print(f"TOTAL DATA RECEIVED: {num_bytes}")
cuda_obj_generator = cloudpickle.loads(func)
pure_cuda_obj = cuda_obj_generator()
if isinstance(rx_cuda_obj, cupy.ndarray):
cupy.testing.assert_allclose(rx_cuda_obj, pure_cuda_obj)
else:
from cudf.testing._utils import assert_eq
assert_eq(rx_cuda_obj, pure_cuda_obj)
def server(port, func, comm_api):
# create listener receiver
# write cudf object
# confirm message is sent correctly
from distributed.comm.utils import to_frames
from distributed.protocol import to_serialize
ucxx.init()
if comm_api == "am":
register_am_allocators()
async def f(listener_port):
# coroutine shows up when the client asks
# to connect
async def write(ep):
import cupy
cupy.cuda.set_allocator(None)
print("CREATING CUDA OBJECT IN SERVER...")
cuda_obj_generator = cloudpickle.loads(func)
cuda_obj = cuda_obj_generator()
msg = {"data": to_serialize(cuda_obj)}
frames = await to_frames(msg, serializers=("cuda", "dask", "pickle"))
for i in range(ITERATIONS):
print(f"Server iteration {i}")
# Send meta data
if comm_api == "tag":
await send(ep, frames)
else:
await am_send(ep, frames)
print("CONFIRM RECEIPT")
close_msg = b"shutdown listener"
if comm_api == "tag":
msg_size = np.empty(1, dtype=np.uint64)
await ep.recv(msg_size)
msg = np.empty(msg_size[0], dtype=np.uint8)
await ep.recv(msg)
else:
msg = await ep.am_recv()
recv_msg = msg.tobytes()
assert recv_msg == close_msg
print("Shutting Down Server...")
await ep.close()
lf.close()
lf = ucxx.create_listener(write, port=listener_port)
try:
while not lf.closed():
await asyncio.sleep(0.1)
# except ucxx.UCXCloseError:
# pass
except Exception as e:
print(f"Exception: {e=}")
loop = get_event_loop()
loop.run_until_complete(f(port))
def dataframe():
import numpy as np
import cudf
# always generate the same random numbers
np.random.seed(0)
size = 2**26
return cudf.DataFrame(
{"a": np.random.random(size), "b": np.random.random(size)},
index=np.random.randint(size, size=size),
)
def series():
import cudf
return cudf.Series(np.arange(90000))
def empty_dataframe():
import cudf
return cudf.DataFrame({"a": [1.0], "b": [1.0]}).head(0)
def cupy_obj():
import cupy
size = 10**8
return cupy.arange(size)
@pytest.mark.slow
@pytest.mark.skipif(
get_num_gpus() <= 2, reason="Machine does not have more than two GPUs"
)
@pytest.mark.parametrize(
"cuda_obj_generator", [dataframe, empty_dataframe, series, cupy_obj]
)
@pytest.mark.parametrize("comm_api", ["tag", "am"])
def test_send_recv_cu(cuda_obj_generator, comm_api):
if comm_api == "am":
pytest.skip("AM not implemented yet")
base_env = os.environ
env_client = base_env.copy()
# grab first two devices
cvd = get_cuda_devices()[:2]
cvd = ",".join(map(str, cvd))
# reverse CVD for other worker
env_client["CUDA_VISIBLE_DEVICES"] = cvd[::-1]
port = random.randint(13000, 15500)
# serialize function and send to the client and server
# server will use the return value of the contents,
# serialize the values, then send serialized values to client.
# client will compare return values of the deserialized
# data sent from the server
func = cloudpickle.dumps(cuda_obj_generator)
ctx = multiprocessing.get_context("spawn")
server_process = ctx.Process(
name="server", target=server, args=[port, func, comm_api]
)
client_process = ctx.Process(
name="client", target=client, args=[port, func, comm_api]
)
server_process.start()
# cudf will ping the driver for validity of device
# this will influence device on which a cuda context is created.
# work around is to update env with new CVD before spawning
os.environ.update(env_client)
client_process.start()
server_process.join()
client_process.join()
assert server_process.exitcode == 0
assert client_process.exitcode == 0
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_probe.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import pytest
import ucxx as ucxx
@pytest.mark.asyncio
@pytest.mark.parametrize("transfer_api", ["am", "tag"])
async def test_message_probe(transfer_api):
msg = bytearray(b"0" * 10)
async def server_node(ep):
# Wait for remote endpoint to close before probing the endpoint for
# in-transit message and receiving it.
while not ep.closed():
await asyncio.sleep(0) # Yield task
if transfer_api == "am":
assert ep._ep.am_probe() is True
received = bytes(await ep.am_recv())
else:
assert ep._ctx.worker.tag_probe(ep._tags["msg_recv"]) is True
received = bytearray(10)
await ep.recv(received)
assert received == msg
await ep.close()
listener.close()
async def client_node(port):
ep = await ucxx.create_endpoint(
ucxx.get_address(),
port,
)
if transfer_api == "am":
await ep.am_send(msg)
else:
await ep.send(msg)
await ep.close()
listener = ucxx.create_listener(
server_node,
)
await client_node(listener.port)
while not listener.closed():
await asyncio.sleep(0.01)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_config.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import os
from unittest.mock import patch
import pytest
from ucxx._lib_async.utils_test import captured_logger
import ucxx
def test_get_config():
with patch.dict(os.environ):
# Unset to test default value
if os.environ.get("UCX_TLS") is not None:
del os.environ["UCX_TLS"]
ucxx.reset()
config = ucxx.get_config()
assert isinstance(config, dict)
assert config["TLS"] == "all"
@patch.dict(os.environ, {"UCX_SEG_SIZE": "2M"})
def test_set_env():
ucxx.reset()
config = ucxx.get_config()
assert config["SEG_SIZE"] == os.environ["UCX_SEG_SIZE"]
@patch.dict(os.environ, {"UCX_SEG_SIZE": "2M"})
def test_init_options():
ucxx.reset()
options = {"SEG_SIZE": "3M"}
# environment specification should be ignored
ucxx.init(options)
config = ucxx.get_config()
assert config["SEG_SIZE"] == options["SEG_SIZE"]
@patch.dict(os.environ, {"UCX_SEG_SIZE": "4M"})
def test_init_options_and_env():
ucxx.reset()
options = {"SEG_SIZE": "3M"} # Should be ignored
ucxx.init(options, env_takes_precedence=True)
config = ucxx.get_config()
assert config["SEG_SIZE"] == os.environ["UCX_SEG_SIZE"]
# Provided options dict was not modified.
assert options == {"SEG_SIZE": "3M"}
@pytest.mark.skipif(
ucxx.get_ucx_version() >= (1, 12, 0),
reason="Beginning with UCX >= 1.12, it's only possible to validate "
"UCP options but not options from other modules such as UCT. "
"See https://github.com/openucx/ucx/issues/7519.",
)
def test_init_unknown_option():
ucxx.reset()
options = {"UNKNOWN_OPTION": "3M"}
with pytest.raises(ucxx.exceptions.UCXInvalidParamError):
ucxx.init(options)
def test_init_invalid_option():
ucxx.reset()
options = {"SEG_SIZE": "invalid-size"}
with pytest.raises(ucxx.exceptions.UCXInvalidParamError):
ucxx.init(options)
@patch.dict(os.environ, {"UCX_SEG_SIZE": "2M"})
def test_logging():
"""
Test default logging configuration.
"""
import logging
root = logging.getLogger("ucx")
# ucxx.init will only print INFO LINES
with captured_logger(root, level=logging.INFO) as foreign_log:
ucxx.reset()
options = {"SEG_SIZE": "3M"}
ucxx.init(options)
assert len(foreign_log.getvalue()) > 0
with captured_logger(root, level=logging.ERROR) as foreign_log:
ucxx.reset()
options = {"SEG_SIZE": "3M"}
ucxx.init(options)
assert len(foreign_log.getvalue()) == 0
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_multiple_nodes.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import numpy as np
import pytest
from ucxx._lib_async.utils_test import wait_listener_client_handlers
import ucxx
def get_somaxconn():
with open("/proc/sys/net/core/somaxconn", "r") as f:
return int(f.readline())
async def hello(ep):
msg2send = np.arange(10)
msg2recv = np.empty_like(msg2send)
f1 = ep.send(msg2send)
f2 = ep.recv(msg2recv)
await f1
await f2
np.testing.assert_array_equal(msg2send, msg2recv)
# assert isinstance(ep.ucx_info(), str)
async def server_node(ep):
await hello(ep)
# assert isinstance(ep.ucx_info(), str)
async def client_node(port):
ep = await ucxx.create_endpoint(
ucxx.get_address(), port, exchange_peer_info_timeout=10.0
)
await hello(ep)
await ep.close()
# assert isinstance(ep.ucx_info(), str)
@pytest.mark.asyncio
@pytest.mark.parametrize("num_servers", [1, 2, 4])
@pytest.mark.parametrize("num_clients", [1, 10, 50, 100])
async def test_many_servers_many_clients(num_servers, num_clients):
somaxconn = get_somaxconn()
listeners = []
for _ in range(num_servers):
listeners.append(
ucxx.create_listener(server_node, exchange_peer_info_timeout=10.0)
)
# We ensure no more than `somaxconn` connections are submitted
# at once. Doing otherwise can block and hang indefinitely.
for i in range(0, num_clients * num_servers, somaxconn):
clients = []
for __ in range(i, min(i + somaxconn, num_clients * num_servers)):
clients.append(client_node(listeners[__ % num_servers].port))
await asyncio.gather(*clients)
await asyncio.gather(
*(wait_listener_client_handlers(listener) for listener in listeners)
)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_from_worker_address.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import multiprocessing as mp
import os
import struct
import numpy as np
import pytest
from ucxx._lib_async.utils import get_event_loop, hash64bits
import ucxx
mp = mp.get_context("spawn")
def _test_from_worker_address_server(queue):
async def run():
# Send worker address to client process via multiprocessing.Queue
address = ucxx.get_worker_address()
queue.put(address)
# Receive address size
address_size = np.empty(1, dtype=np.int64)
await ucxx.recv(address_size, tag=0)
# Receive address buffer on tag 0 and create UCXAddress from it
remote_address = bytearray(address_size[0])
await ucxx.recv(remote_address, tag=0)
remote_address = ucxx.get_ucx_address_from_buffer(remote_address)
# Create endpoint to remote worker using the received address
ep = await ucxx.create_endpoint_from_worker_address(remote_address)
# Send data to client's endpoint
send_msg = np.arange(10, dtype=np.int64)
await ep.send(send_msg, tag=1, force_tag=True)
await ep.close()
loop = get_event_loop()
loop.run_until_complete(run())
ucxx.stop_notifier_thread()
loop.close()
def _test_from_worker_address_client(queue):
async def run():
# Read local worker address
address = ucxx.get_worker_address()
# Receive worker address from server via multiprocessing.Queue, create
# endpoint to server
remote_address = queue.get()
ep = await ucxx.create_endpoint_from_worker_address(remote_address)
# Send local address to server on tag 0
await ep.send(np.array(address.length, np.int64), tag=0, force_tag=True)
await ep.send(address, tag=0, force_tag=True)
# Receive message from server
recv_msg = np.empty(10, dtype=np.int64)
await ep.recv(recv_msg, tag=1, force_tag=True)
await ep.close()
np.testing.assert_array_equal(recv_msg, np.arange(10, dtype=np.int64))
loop = get_event_loop()
loop.run_until_complete(run())
ucxx.stop_notifier_thread()
loop.close()
def test_from_worker_address():
queue = mp.Queue()
server = mp.Process(
target=_test_from_worker_address_server,
args=(queue,),
)
server.start()
client = mp.Process(
target=_test_from_worker_address_client,
args=(queue,),
)
client.start()
client.join()
server.join()
assert not server.exitcode
assert not client.exitcode
def _get_address_info(address=None):
# Fixed frame size
frame_size = 10000
# Header format: Recv Tag (Q) + Send Tag (Q) + UCXAddress.length (Q)
header_fmt = "QQQ"
# Data length
data_length = frame_size - struct.calcsize(header_fmt)
# Padding length
padding_length = None if address is None else (data_length - address.length)
# Header + UCXAddress string + padding
fixed_size_address_buffer_fmt = header_fmt + str(data_length) + "s"
assert struct.calcsize(fixed_size_address_buffer_fmt) == frame_size
return {
"frame_size": frame_size,
"data_length": data_length,
"padding_length": padding_length,
"fixed_size_address_buffer_fmt": fixed_size_address_buffer_fmt,
}
def _pack_address_and_tag(address, recv_tag, send_tag):
address_info = _get_address_info(address)
fixed_size_address_packed = struct.pack(
address_info["fixed_size_address_buffer_fmt"],
recv_tag, # Recv Tag
send_tag, # Send Tag
address.length, # Address buffer length
(
bytearray(address) + bytearray(address_info["padding_length"])
), # Address buffer + padding
)
assert len(fixed_size_address_packed) == address_info["frame_size"]
return fixed_size_address_packed
def _unpack_address_and_tag(address_packed):
address_info = _get_address_info()
recv_tag, send_tag, address_length, address_padded = struct.unpack(
address_info["fixed_size_address_buffer_fmt"],
address_packed,
)
# Swap send and recv tags, as they are used by the remote process in the
# opposite direction.
return {
"address": address_padded[:address_length],
"recv_tag": send_tag,
"send_tag": recv_tag,
}
def _test_from_worker_address_server_fixedsize(num_nodes, queue):
async def run():
async def _handle_client(packed_remote_address):
# Unpack the fixed-size address+tag buffer
unpacked = _unpack_address_and_tag(packed_remote_address)
remote_address = ucxx.get_ucx_address_from_buffer(unpacked["address"])
# Create endpoint to remote worker using the received address
ep = await ucxx.create_endpoint_from_worker_address(remote_address)
# Send data to client's endpoint
send_msg = np.arange(10, dtype=np.int64)
await ep.send(send_msg, tag=unpacked["send_tag"], force_tag=True)
# Receive data from client's endpoint
recv_msg = np.empty(20, dtype=np.int64)
await ep.recv(recv_msg, tag=unpacked["recv_tag"], force_tag=True)
np.testing.assert_array_equal(recv_msg, np.arange(20, dtype=np.int64))
# Send worker address to client processes via multiprocessing.Queue,
# one entry for each client.
address = ucxx.get_worker_address()
for i in range(num_nodes):
queue.put(address)
address_info = _get_address_info()
server_tasks = []
for i in range(num_nodes):
# Receive fixed-size address+tag buffer on tag 0
packed_remote_address = bytearray(address_info["frame_size"])
await ucxx.recv(packed_remote_address, tag=0)
# Create an async task for client
server_tasks.append(_handle_client(packed_remote_address))
# Await handling each client request
await asyncio.gather(*server_tasks)
loop = get_event_loop()
loop.run_until_complete(run())
ucxx.stop_notifier_thread()
loop.close()
def _test_from_worker_address_client_fixedsize(queue):
async def run():
# Read local worker address
address = ucxx.get_worker_address()
recv_tag = hash64bits(os.urandom(16))
send_tag = hash64bits(os.urandom(16))
packed_address = _pack_address_and_tag(address, recv_tag, send_tag)
# Receive worker address from server via multiprocessing.Queue, create
# endpoint to server
remote_address = queue.get()
ep = await ucxx.create_endpoint_from_worker_address(remote_address)
# Send local address to server on tag 0
await ep.send(packed_address, tag=0, force_tag=True)
# Receive message from server
recv_msg = np.empty(10, dtype=np.int64)
await ep.recv(recv_msg, tag=recv_tag, force_tag=True)
np.testing.assert_array_equal(recv_msg, np.arange(10, dtype=np.int64))
# Send message to server
send_msg = np.arange(20, dtype=np.int64)
await ep.send(send_msg, tag=send_tag, force_tag=True)
loop = get_event_loop()
loop.run_until_complete(run())
ucxx.stop_notifier_thread()
loop.close()
@pytest.mark.parametrize("num_nodes", [1, 2, 4, 8])
def test_from_worker_address_multinode(num_nodes):
queue = mp.Queue()
server = mp.Process(
target=_test_from_worker_address_server_fixedsize,
args=(num_nodes, queue),
)
server.start()
clients = []
for i in range(num_nodes):
client = mp.Process(
target=_test_from_worker_address_client_fixedsize,
args=(queue,),
)
client.start()
clients.append(client)
for client in clients:
client.join()
server.join()
assert not server.exitcode
assert not client.exitcode
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_ucx_getters.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import pytest
from ucxx._lib_async.utils_test import wait_listener_client_handlers
import ucxx
@pytest.mark.asyncio
async def test_get_ucp_worker():
worker = ucxx.get_ucp_worker()
assert isinstance(worker, int)
async def server(ep):
assert ep.get_ucp_worker() == worker
lt = ucxx.create_listener(server)
ep = await ucxx.create_endpoint(ucxx.get_address(), lt.port)
assert ep.get_ucp_worker() == worker
await ep.close()
await wait_listener_client_handlers(lt)
@pytest.mark.asyncio
async def test_get_endpoint():
async def server(ep):
ucp_ep = ep.get_ucp_endpoint()
assert isinstance(ucp_ep, int)
assert ucp_ep > 0
lt = ucxx.create_listener(server)
ep = await ucxx.create_endpoint(ucxx.get_address(), lt.port)
ucp_ep = ep.get_ucp_endpoint()
assert isinstance(ucp_ep, int)
assert ucp_ep > 0
await ep.close()
await wait_listener_client_handlers(lt)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_info.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import pytest
import ucxx as ucxx
@pytest.fixture(autouse=True)
def reset():
ucxx.reset()
yield
ucxx.reset()
def test_context_info():
info = ucxx.get_ucp_context_info()
assert isinstance(info, str)
def test_worker_info():
info = ucxx.get_ucp_worker_info()
assert isinstance(info, str)
@pytest.mark.parametrize(
"transports",
["self", "tcp", "self,tcp"],
)
def test_check_transport(transports):
transports_list = transports.split(",")
inactive_transports = list(set(["self", "tcp"]) - set(transports_list))
ucxx.reset()
options = {"TLS": transports, "NET_DEVICES": "all"}
ucxx.init(options)
active_transports = ucxx.get_active_transports()
for t in transports_list:
assert any([at.startswith(t) for at in active_transports])
for it in inactive_transports:
assert any([not at.startswith(it) for at in active_transports])
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_disconnect.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import logging
import multiprocessing as mp
from io import StringIO
from queue import Empty
import numpy as np
import pytest
from ucxx._lib_async.utils import get_event_loop
import ucxx
mp = mp.get_context("spawn")
async def mp_queue_get_nowait(queue):
while True:
try:
return queue.get_nowait()
except Empty:
pass
await asyncio.sleep(0.01)
def _test_shutdown_unexpected_closed_peer_server(
client_queue, server_queue, endpoint_error_handling
):
global ep_is_alive
ep_is_alive = None
async def run():
async def server_node(ep):
try:
global ep_is_alive
await ep.send(np.arange(100, dtype=np.int64))
# Waiting for signal to close the endpoint
await mp_queue_get_nowait(server_queue)
# At this point, the client should have died and the endpoint
# is not alive anymore. `True` only when endpoint error
# handling is enabled.
ep_is_alive = ep._ep.is_alive()
await ep.close()
finally:
listener.close()
listener = ucxx.create_listener(
server_node, endpoint_error_handling=endpoint_error_handling
)
client_queue.put(listener.port)
while not listener.closed():
await asyncio.sleep(0.1)
log_stream = StringIO()
logging.basicConfig(stream=log_stream, level=logging.DEBUG)
get_event_loop().run_until_complete(run())
log = log_stream.getvalue()
if endpoint_error_handling is True:
assert ep_is_alive is False
else:
assert ep_is_alive
assert log.find("""UCXError('<[Send shutdown]""") != -1
ucxx.stop_notifier_thread()
def _test_shutdown_unexpected_closed_peer_client(
client_queue, server_queue, endpoint_error_handling
):
async def run():
server_port = client_queue.get()
ep = await ucxx.create_endpoint(
ucxx.get_address(),
server_port,
endpoint_error_handling=endpoint_error_handling,
)
msg = np.empty(100, dtype=np.int64)
await ep.recv(msg)
get_event_loop().run_until_complete(run())
ucxx.stop_notifier_thread()
@pytest.mark.parametrize("endpoint_error_handling", [True, False])
def test_shutdown_unexpected_closed_peer(caplog, endpoint_error_handling):
"""
Test clean server shutdown after unexpected peer close
This will causes some UCX warnings to be issued, but this as expected.
The main goal is to assert that the processes exit without errors
despite a somewhat messy initial state.
"""
if endpoint_error_handling is False:
pytest.xfail(
"Temporarily xfailing, due to https://github.com/rapidsai/ucxx/issues/21"
)
if endpoint_error_handling is False and any(
[
t.startswith(i)
for i in ("rc", "dc", "ud")
for t in ucxx.get_active_transports()
]
):
pytest.skip(
"Endpoint error handling is required when rc, dc or ud"
"transport is enabled"
)
client_queue = mp.Queue()
server_queue = mp.Queue()
p1 = mp.Process(
target=_test_shutdown_unexpected_closed_peer_server,
args=(client_queue, server_queue, endpoint_error_handling),
)
p1.start()
p2 = mp.Process(
target=_test_shutdown_unexpected_closed_peer_client,
args=(client_queue, server_queue, endpoint_error_handling),
)
p2.start()
p2.join()
server_queue.put("client is down")
p1.join()
assert not p1.exitcode
assert not p2.exitcode
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_endpoint.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
from queue import Empty, Queue
import pytest
import ucxx
@pytest.mark.asyncio
@pytest.mark.parametrize("server_close_callback", [True, False])
async def test_close_callback(server_close_callback):
closed = [False]
def _close_callback():
closed[0] = True
async def server_node(ep):
if server_close_callback is True:
ep.set_close_callback(_close_callback)
await ep.close()
async def client_node(port):
ep = await ucxx.create_endpoint(
ucxx.get_address(),
port,
)
if server_close_callback is False:
ep.set_close_callback(_close_callback)
await ep.close()
listener = ucxx.create_listener(
server_node,
)
await client_node(listener.port)
while closed[0] is False:
await asyncio.sleep(0.01)
@pytest.mark.asyncio
@pytest.mark.parametrize("transfer_api", ["am", "tag", "tag_multi"])
async def test_cancel(transfer_api):
if transfer_api == "am":
pytest.skip("AM not implemented yet")
q = Queue()
async def server_node(ep):
while True:
try:
# Make sure the listener doesn't return before the client schedules
# the message to receive. If this is not done, UCXConnectionResetError
# may be raised instead of UCXCanceledError.
q.get(timeout=0.01)
return
except Empty:
await asyncio.sleep(0)
async def client_node(port):
ep = await ucxx.create_endpoint(ucxx.get_address(), port)
try:
if transfer_api == "am":
_, pending = await asyncio.wait(
[asyncio.create_task(ep.am_recv())], timeout=0.001
)
elif transfer_api == "tag":
msg = bytearray(1)
_, pending = await asyncio.wait(
[asyncio.create_task(ep.recv(msg))], timeout=0.001
)
else:
_, pending = await asyncio.wait(
[asyncio.create_task(ep.recv_multi())], timeout=0.001
)
q.put("close")
await asyncio.wait(pending)
(pending,) = pending
result = pending.result()
assert isinstance(result, Exception)
raise result
except Exception as e:
await ep.close()
raise e
listener = ucxx.create_listener(server_node)
with pytest.raises(
ucxx.exceptions.UCXCanceledError,
# TODO: Add back custom UCXCanceledError messages?
):
await client_node(listener.port)
listener.close()
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_from_worker_address_error.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import multiprocessing as mp
import os
import re
from unittest.mock import patch
import numpy as np
import pytest
from ucxx._lib_async.utils import get_event_loop
import ucxx
mp = mp.get_context("spawn")
def _test_from_worker_address_error_server(q1, q2, error_type):
async def run():
address = bytearray(ucxx.get_worker_address())
if error_type == "unreachable":
# Shutdown worker, then send its address to client process via
# multiprocessing.Queue
ucxx.reset()
q1.put(address)
else:
# Send worker address to client process via # multiprocessing.Queue,
# wait for client to connect, then shutdown worker.
q1.put(address)
ep_ready = q2.get()
assert ep_ready == "ready"
ucxx.reset()
# q1.put("disconnected")
loop = get_event_loop()
loop.run_until_complete(run())
ucxx.stop_notifier_thread()
loop.close()
def _test_from_worker_address_error_client(q1, q2, error_type):
async def run():
# Receive worker address from server via multiprocessing.Queue
remote_address = ucxx.get_ucx_address_from_buffer(q1.get())
if error_type == "unreachable":
with pytest.raises(
ucxx.exceptions.UCXError,
match="Destination is unreachable|Endpoint timeout",
):
# Here, two cases may happen:
# 1. With TCP creating endpoint will immediately raise
# "Destination is unreachable"
# 2. With rc/ud creating endpoint will succeed, but raise
# "Endpoint timeout" after UCX_UD_TIMEOUT seconds have passed.
# We need to keep progressing ucxx until timeout is raised.
ep = await ucxx.create_endpoint_from_worker_address(remote_address)
else:
# Create endpoint to remote worker, and:
#
# 1. For timeout_am_send/timeout_send:
# - inform remote worker that local endpoint is ready for remote
# shutdown;
# - wait for remote worker to shutdown and confirm;
# - attempt to send message.
#
# 2. For timeout_am_recv/timeout_recv:
# - schedule ep.recv;
# - inform remote worker that local endpoint is ready for remote
# shutdown;
# - wait for it to shutdown and confirm
# - wait for recv message.
ep = await ucxx.create_endpoint_from_worker_address(remote_address)
if re.match("timeout.*send", error_type):
q2.put("ready")
# Wait for remote endpoint to disconnect
while ep._ep.is_alive():
await asyncio.sleep(0)
if not ucxx.core._get_ctx().progress_mode.startswith("thread"):
ucxx.progress()
# TCP generally raises `UCXConnectionResetError`, whereas InfiniBand
# raises `UCXEndpointTimeoutError`
with pytest.raises(
(
ucxx.exceptions.UCXConnectionResetError,
ucxx.exceptions.UCXEndpointTimeoutError,
)
):
if error_type == "timeout_am_send":
await asyncio.wait_for(ep.am_send(np.zeros(10)), timeout=1.0)
else:
await asyncio.wait_for(
ep.send(np.zeros(10), tag=0, force_tag=True), timeout=1.0
)
else:
# TCP generally raises `UCXConnectionResetError`, whereas InfiniBand
# raises `UCXEndpointTimeoutError`
with pytest.raises(
(
ucxx.exceptions.UCXConnectionResetError,
ucxx.exceptions.UCXEndpointTimeoutError,
)
):
if error_type == "timeout_am_recv":
task = asyncio.wait_for(ep.am_recv(), timeout=3.0)
else:
msg = np.empty(10)
task = asyncio.wait_for(
ep.recv(msg, tag=0, force_tag=True), timeout=3.0
)
q2.put("ready")
while ep._ep.is_alive():
await asyncio.sleep(0)
if not ucxx.core._get_ctx().progress_mode.startswith("thread"):
ucxx.progress()
await task
loop = get_event_loop()
loop.run_until_complete(run())
ucxx.stop_notifier_thread()
loop.close()
@pytest.mark.parametrize(
"error_type",
[
"unreachable",
"timeout_am_send",
"timeout_am_recv",
"timeout_send",
"timeout_recv",
],
)
@patch.dict(
os.environ,
{
"UCX_WARN_UNUSED_ENV_VARS": "n",
# Set low timeouts to ensure tests quickly raise as expected
"UCX_KEEPALIVE_INTERVAL": "100ms",
"UCX_UD_TIMEOUT": "100ms",
},
)
def test_from_worker_address_error(error_type):
if error_type in ["timeout_am_send", "timeout_am_recv"]:
pytest.skip("AM not implemented yet")
q1 = mp.Queue()
q2 = mp.Queue()
server = mp.Process(
target=_test_from_worker_address_error_server,
args=(q1, q2, error_type),
)
server.start()
client = mp.Process(
target=_test_from_worker_address_error_client,
args=(q1, q2, error_type),
)
client.start()
server.join()
client.join()
assert not server.exitcode
if ucxx.get_ucx_version() < (1, 12, 0) and client.exitcode == 1:
if all(t in error_type for t in ["timeout", "send"]):
pytest.xfail(
"Requires https://github.com/openucx/ucx/pull/7527 with rc/ud."
)
elif all(t in error_type for t in ["timeout", "recv"]):
pytest.xfail(
"Requires https://github.com/openucx/ucx/pull/7531 with rc/ud."
)
assert not client.exitcode
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/conftest.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import os
import pytest
import ucxx
# Prevent calls such as `cudf = pytest.importorskip("cudf")` from initializing
# a CUDA context. Such calls may cause tests that must initialize the CUDA
# context on the appropriate device to fail.
# For example, without `RAPIDS_NO_INITIALIZE=True`, `test_benchmark_cluster`
# will succeed if running alone, but fails when all tests are run in batch.
os.environ["RAPIDS_NO_INITIALIZE"] = "True"
def pytest_addoption(parser):
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
def handle_exception(loop, context):
msg = context.get("exception", context["message"])
print(msg)
# Let's make sure that UCX gets time to cancel
# progress tasks before closing the event loop.
@pytest.fixture()
def event_loop(scope="session"):
loop = asyncio.new_event_loop()
try:
loop.set_exception_handler(handle_exception)
ucxx.reset()
yield loop
ucxx.reset()
loop.run_until_complete(asyncio.sleep(0))
finally:
loop.close()
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem: pytest.Function):
"""
Add timeout for tests, and optionally rerun on failure.
Add timeout for tests with `pytest.mark.asyncio_timeout` marker as specified by the
decorator, otherwise a default timeout of 60 seconds for regular tests and 600
seconds for tests marked slow.
Optionally rerun the test if it failed, for that the test has to be marked with
`pytest.mark.rerun_on_failure(reruns)`. This is similar to `pytest-rerunfailures`,
but that module closes the event loop before this function has awaited, making the
two incompatible.
"""
timeout_marker = pyfuncitem.get_closest_marker("asyncio_timeout")
slow_marker = pyfuncitem.get_closest_marker("slow")
rerun_marker = pyfuncitem.get_closest_marker("rerun_on_failure")
default_timeout = 600.0 if slow_marker else 60.0
timeout = float(timeout_marker.args[0]) if timeout_marker else default_timeout
if timeout <= 0.0:
raise ValueError("The `pytest.mark.asyncio_timeout` value must be positive.")
if rerun_marker and len(rerun_marker.args) >= 0:
reruns = rerun_marker.args[0]
if not isinstance(reruns, int) or reruns < 0:
raise ValueError("The `pytest.mark.rerun` value must be a positive integer")
else:
reruns = 1
if asyncio.iscoroutinefunction(pyfuncitem.obj) and timeout > 0.0:
async def wrapped_obj(*args, **kwargs):
for i in range(reruns):
try:
try:
return await asyncio.wait_for(
inner_obj(*args, **kwargs), timeout=timeout
)
except (asyncio.CancelledError, asyncio.TimeoutError):
pytest.fail(
f"{pyfuncitem.name} timed out after {timeout} seconds."
)
except Exception as e:
if i == (reruns - 1):
raise e
else:
break
inner_obj = pyfuncitem.obj
pyfuncitem.obj = wrapped_obj
yield
def pytest_configure(config: pytest.Config):
config.addinivalue_line(
"markers",
"asyncio_timeout(timeout): cancels the test execution after the specified "
"number of seconds",
)
config.addinivalue_line(
"markers",
"rerun_on_failure(reruns): reruns test if it fails for the specified number "
"of reruns",
)
config.addinivalue_line("markers", "slow: mark test as slow to run")
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_tags.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import pytest
import ucxx as ucxx
@pytest.mark.asyncio
async def test_tag_match():
msg1 = bytes("msg1", "utf-8")
msg2 = bytes("msg2", "utf-8")
async def server_node(ep):
f1 = ep.send(msg1, tag="msg1")
await asyncio.sleep(1) # Let msg1 finish
f2 = ep.send(msg2, tag="msg2")
await asyncio.gather(f1, f2)
lf = ucxx.create_listener(server_node)
ep = await ucxx.create_endpoint(ucxx.get_address(), lf.port)
m1, m2 = (bytearray(len(msg1)), bytearray(len(msg2)))
# May be dropped in favor of `asyncio.create_task` only
# once Python 3.6 is dropped.
if hasattr(asyncio, "create_future"):
f2 = asyncio.create_task(ep.recv(m2, tag="msg2"))
else:
f2 = asyncio.ensure_future(ep.recv(m2, tag="msg2"))
# At this point f2 shouldn't be able to finish because its
# tag "msg2" doesn't match the servers send tag "msg1"
done, pending = await asyncio.wait({f2}, timeout=0.01)
assert f2 in pending
# "msg1" should be ready
await ep.recv(m1, tag="msg1")
assert m1 == msg1
await f2
assert m2 == msg2
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_send_recv.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import functools
import pytest
from ucxx._lib_async.utils_test import wait_listener_client_handlers
import ucxx
np = pytest.importorskip("numpy")
msg_sizes = [2**i for i in range(0, 25, 4)]
dtypes = ["|u1", "<i8", "f8"]
def make_echo_server(create_empty_data):
"""
Returns an echo server that calls the function `create_empty_data(nbytes)`
to create the data container.`
"""
async def echo_server(ep):
"""
Basic echo server for sized messages.
We expect the other endpoint to follow the pattern::
# size of the real message (in bytes)
>>> await ep.send(msg_size)
>>> await ep.send(msg) # send the real message
>>> await ep.recv(responds) # receive the echo
"""
msg_size = np.empty(1, dtype=np.uint64)
await ep.recv(msg_size)
msg = create_empty_data(msg_size[0])
await ep.recv(msg)
await ep.send(msg)
await ep.close()
return echo_server
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
async def test_send_recv_bytes(size):
msg = bytearray(b"m" * size)
msg_size = np.array([len(msg)], dtype=np.uint64)
listener = ucxx.create_listener(make_echo_server(lambda n: bytearray(n)))
client = await ucxx.create_endpoint(ucxx.get_address(), listener.port)
await client.send(msg_size)
await client.send(msg)
resp = bytearray(size)
await client.recv(resp)
assert resp == msg
await client.close()
await wait_listener_client_handlers(listener)
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
@pytest.mark.parametrize("dtype", dtypes)
async def test_send_recv_numpy(size, dtype):
msg = np.arange(size, dtype=dtype)
msg_size = np.array([msg.nbytes], dtype=np.uint64)
listener = ucxx.create_listener(
make_echo_server(lambda n: np.empty(n, dtype=np.uint8))
)
client = await ucxx.create_endpoint(ucxx.get_address(), listener.port)
await client.send(msg_size)
await client.send(msg)
resp = np.empty_like(msg)
await client.recv(resp)
np.testing.assert_array_equal(resp, msg)
await wait_listener_client_handlers(listener)
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.rerun_on_failure(3)
async def test_send_recv_cupy(size, dtype):
cupy = pytest.importorskip("cupy")
msg = cupy.arange(size, dtype=dtype)
msg_size = np.array([msg.nbytes], dtype=np.uint64)
listener = ucxx.create_listener(
make_echo_server(lambda n: cupy.empty((n,), dtype=np.uint8))
)
client = await ucxx.create_endpoint(ucxx.get_address(), listener.port)
await client.send(msg_size)
await client.send(msg)
resp = cupy.empty_like(msg)
await client.recv(resp)
np.testing.assert_array_equal(cupy.asnumpy(resp), cupy.asnumpy(msg))
await wait_listener_client_handlers(listener)
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.rerun_on_failure(3)
async def test_send_recv_numba(size, dtype):
cuda = pytest.importorskip("numba.cuda")
ary = np.arange(size, dtype=dtype)
msg = cuda.to_device(ary)
msg_size = np.array([msg.nbytes], dtype=np.uint64)
listener = ucxx.create_listener(
make_echo_server(lambda n: cuda.device_array((n,), dtype=np.uint8))
)
client = await ucxx.create_endpoint(ucxx.get_address(), listener.port)
await client.send(msg_size)
await client.send(msg)
resp = cuda.device_array_like(msg)
await client.recv(resp)
np.testing.assert_array_equal(np.array(resp), np.array(msg))
await wait_listener_client_handlers(listener)
@pytest.mark.asyncio
@pytest.mark.skip(reason="See https://github.com/rapidsai/ucxx/issues/104")
async def test_send_recv_error():
async def say_hey_server(ep):
await ep.send(bytearray(b"Hey"))
await ep.close()
listener = ucxx.create_listener(say_hey_server)
client = await ucxx.create_endpoint(ucxx.get_address(), listener.port)
msg = bytearray(100)
# TODO: remove "Message truncated" match when Python futures accept custom
# exception messages.
with pytest.raises(
ucxx.exceptions.UCXMessageTruncatedError,
match=r"length mismatch: 3 \(got\) != 100 \(expected\)|Message truncated",
):
await client.recv(msg)
await wait_listener_client_handlers(listener)
await client.close()
listener.close()
@pytest.mark.asyncio
async def test_send_recv_obj():
async def echo_obj_server(ep):
obj = await ep.recv_obj()
await ep.send_obj(obj)
listener = ucxx.create_listener(echo_obj_server)
client = await ucxx.create_endpoint(ucxx.get_address(), listener.port)
msg = bytearray(b"hello")
await client.send_obj(msg)
got = await client.recv_obj()
assert msg == got
await wait_listener_client_handlers(listener)
@pytest.mark.asyncio
async def test_send_recv_obj_numpy():
allocator = functools.partial(np.empty, dtype=np.uint8)
async def echo_obj_server(ep):
obj = await ep.recv_obj(allocator=allocator)
await ep.send_obj(obj)
listener = ucxx.create_listener(echo_obj_server)
client = await ucxx.create_endpoint(ucxx.get_address(), listener.port)
msg = bytearray(b"hello")
await client.send_obj(msg)
got = await client.recv_obj(allocator=allocator)
assert msg == got
await wait_listener_client_handlers(listener)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_shutdown.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import logging
import sys
import numpy as np
import pytest
from ucxx._lib_async.utils_test import (
captured_logger,
wait_listener_client_handlers,
)
import ucxx as ucxx
async def _shutdown_send(ep, message_type):
msg = np.arange(10**6)
if message_type == "tag":
await ep.send(msg)
else:
await ep.am_send(msg)
async def _shutdown_recv(ep, message_type):
if message_type == "tag":
msg = np.empty(10**6)
await ep.recv(msg)
else:
await ep.am_recv()
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_server_shutdown(message_type):
"""The server calls shutdown"""
if message_type == "am":
pytest.skip("AM not implemented yet")
async def server_node(ep):
with pytest.raises(ucxx.exceptions.UCXCanceledError):
await asyncio.gather(_shutdown_recv(ep, message_type), ep.close())
await ep.close()
async def client_node(port):
ep = await ucxx.create_endpoint(
ucxx.get_address(),
port,
)
with pytest.raises(ucxx.exceptions.UCXCanceledError):
await _shutdown_recv(ep, message_type)
await ep.close()
listener = ucxx.create_listener(
server_node,
)
await client_node(listener.port)
await wait_listener_client_handlers(listener)
listener.close()
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="test currently fails for python3.6"
)
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_client_shutdown(message_type):
"""The client calls shutdown"""
if message_type == "am":
pytest.skip("AM not implemented yet")
async def client_node(port):
ep = await ucxx.create_endpoint(
ucxx.get_address(),
port,
)
with pytest.raises(ucxx.exceptions.UCXCanceledError):
await asyncio.gather(_shutdown_recv(ep, message_type), ep.close())
await ep.close()
async def server_node(ep):
with pytest.raises(ucxx.exceptions.UCXCanceledError):
await _shutdown_recv(ep, message_type)
await ep.close()
listener = ucxx.create_listener(
server_node,
)
await client_node(listener.port)
await wait_listener_client_handlers(listener)
listener.close()
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_listener_close(message_type):
"""The server close the listener"""
if message_type == "am":
pytest.skip("AM not implemented yet")
async def client_node(listener):
ep = await ucxx.create_endpoint(
ucxx.get_address(),
listener.port,
)
await _shutdown_recv(ep, message_type)
await _shutdown_recv(ep, message_type)
assert listener.closed() is False
listener.close()
assert listener.closed() is True
async def server_node(ep):
await _shutdown_send(ep, message_type)
await _shutdown_send(ep, message_type)
listener = ucxx.create_listener(
server_node,
)
await client_node(listener)
await wait_listener_client_handlers(listener)
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_listener_del(message_type):
"""The client delete the listener"""
if message_type == "am":
pytest.skip("AM not implemented yet")
async def server_node(ep):
await _shutdown_send(ep, message_type)
await _shutdown_send(ep, message_type)
listener = ucxx.create_listener(
server_node,
)
ep = await ucxx.create_endpoint(
ucxx.get_address(),
listener.port,
)
await _shutdown_recv(ep, message_type)
assert listener.closed() is False
root = logging.getLogger("ucx")
with captured_logger(root, level=logging.WARN) as log:
# Deleting the listener without waiting for all client handlers to complete
# should be avoided in user code.
del listener
assert log.getvalue().startswith("Listener object is being destroyed")
await _shutdown_recv(ep, message_type)
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_close_after_n_recv(message_type):
"""The Endpoint.close_after_n_recv()"""
if message_type == "am":
pytest.skip("AM not implemented yet")
async def server_node(ep):
for _ in range(10):
await _shutdown_send(ep, message_type)
async def client_node(port):
ep = await ucxx.create_endpoint(
ucxx.get_address(),
port,
)
ep.close_after_n_recv(10)
for _ in range(10):
await _shutdown_recv(ep, message_type)
assert ep.closed()
ep = await ucxx.create_endpoint(
ucxx.get_address(),
port,
)
for _ in range(5):
await _shutdown_recv(ep, message_type)
ep.close_after_n_recv(5)
for _ in range(5):
await _shutdown_recv(ep, message_type)
assert ep.closed()
ep = await ucxx.create_endpoint(
ucxx.get_address(),
port,
)
for _ in range(5):
await _shutdown_recv(ep, message_type)
ep.close_after_n_recv(10, count_from_ep_creation=True)
for _ in range(5):
await _shutdown_recv(ep, message_type)
assert ep.closed()
ep = await ucxx.create_endpoint(
ucxx.get_address(),
port,
)
for _ in range(10):
await _shutdown_recv(ep, message_type)
with pytest.raises(
ucxx.exceptions.UCXError,
match="`n` cannot be less than current recv_count",
):
ep.close_after_n_recv(5, count_from_ep_creation=True)
ep.close_after_n_recv(1)
with pytest.raises(
ucxx.exceptions.UCXError,
match="close_after_n_recv has already been set to",
):
ep.close_after_n_recv(1)
listener = ucxx.create_listener(
server_node,
)
await client_node(listener.port)
await wait_listener_client_handlers(listener)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_reset.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import pytest
from ucxx._lib_async.utils_test import wait_listener_client_handlers
import ucxx
class ResetAfterN:
"""Calls ucxx.reset() after n calls"""
def __init__(self, n):
self.n = n
self.count = 0
def __call__(self):
self.count += 1
if self.count == self.n:
ucxx.reset()
@pytest.mark.asyncio
async def test_reset():
reset = ResetAfterN(2)
def server(ep):
ep.abort()
reset()
lt = ucxx.create_listener(server)
ep = await ucxx.create_endpoint(ucxx.get_address(), lt.port)
await wait_listener_client_handlers(lt)
del lt
del ep
reset()
@pytest.mark.asyncio
async def test_lt_still_in_scope_error():
reset = ResetAfterN(2)
def server(ep):
ep.abort()
reset()
lt = ucxx.create_listener(server)
ep = await ucxx.create_endpoint(ucxx.get_address(), lt.port)
await wait_listener_client_handlers(lt)
del ep
with pytest.raises(
ucxx.exceptions.UCXError,
match="Trying to reset UCX but not all Endpoints and/or Listeners are closed()",
):
reset()
lt.close()
@pytest.mark.asyncio
async def test_ep_still_in_scope_error():
reset = ResetAfterN(2)
def server(ep):
ep.abort()
reset()
lt = ucxx.create_listener(server)
ep = await ucxx.create_endpoint(ucxx.get_address(), lt.port)
await wait_listener_client_handlers(lt)
del lt
with pytest.raises(
ucxx.exceptions.UCXError,
match="Trying to reset UCX but not all Endpoints and/or Listeners are closed()",
):
reset()
ep.abort()
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_version.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import ucxx
def test_get_ucx_version():
version = ucxx.get_ucx_version()
assert isinstance(version, tuple)
assert len(version) == 3
# Check UCX isn't initialized
assert ucxx.core._ctx is None
def test_version_constant():
assert isinstance(ucxx.__version__, str)
def test_ucx_version_constant():
assert isinstance(ucxx.__ucx_version__, str)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_send_recv_am.py | import asyncio
from functools import partial
import numpy as np
import pytest
from ucxx._lib_async.utils_test import wait_listener_client_handlers
import ucxx
msg_sizes = [0] + [2**i for i in range(0, 25, 4)]
def _bytearray_assert_equal(a, b):
assert a == b
def get_data():
ret = [
{
"allocator": bytearray,
"generator": lambda n: bytearray(b"m" * n),
"validator": lambda recv, exp: _bytearray_assert_equal(bytes(recv), exp),
"memory_type": "host",
},
{
"allocator": partial(np.ones, dtype=np.uint8),
"generator": partial(np.arange, dtype=np.int64),
"validator": lambda recv, exp: np.testing.assert_equal(
recv.view(np.int64), exp
),
"memory_type": "host",
},
]
try:
import cupy as cp
ret.append(
{
"allocator": partial(cp.ones, dtype=np.uint8),
"generator": partial(cp.arange, dtype=np.int64),
"validator": lambda recv, exp: cp.testing.assert_array_equal(
cp.asarray(recv).view(np.int64), exp
),
"memory_type": "cuda",
}
)
except ImportError:
pass
return ret
def simple_server(size, recv):
async def server(ep):
recv = await ep.am_recv()
await ep.am_send(recv)
await ep.close()
return server
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
@pytest.mark.parametrize("recv_wait", [True, False])
@pytest.mark.parametrize("data", get_data())
async def test_send_recv_am(size, recv_wait, data):
rndv_thresh = 8192
ucxx.init(options={"RNDV_THRESH": str(rndv_thresh)})
msg = data["generator"](size)
recv = []
listener = ucxx.create_listener(simple_server(size, recv))
num_clients = 1
clients = [
await ucxx.create_endpoint(ucxx.get_address(), listener.port)
for i in range(num_clients)
]
if recv_wait:
# By sleeping here we ensure that the listener's
# ep.am_recv call will have to wait, rather than return
# immediately as receive data is already available.
await asyncio.sleep(1)
await asyncio.gather(*(c.am_send(msg) for c in clients))
recv_msgs = await asyncio.gather(*(c.am_recv() for c in clients))
for recv_msg in recv_msgs:
if data["memory_type"] == "cuda" and msg.nbytes < rndv_thresh:
# Eager messages are always received on the host, if no custom host
# allocator is registered, UCXX defaults to `np.array`.
np.testing.assert_equal(recv_msg.view(np.int64), msg.get())
else:
data["validator"](recv_msg, msg)
await asyncio.gather(*(c.close() for c in clients))
await wait_listener_client_handlers(listener)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib_async | rapidsai_public_repos/ucxx/python/ucxx/_lib_async/tests/test_worker.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import os
from unittest.mock import patch
import pytest
import ucxx
@pytest.mark.asyncio
@pytest.mark.parametrize("enable_delayed_submission", [True, False])
@pytest.mark.parametrize("enable_python_future", [True, False])
async def test_worker_capabilities_args(
enable_delayed_submission, enable_python_future
):
progress_mode = os.getenv("UCXPY_PROGRESS_MODE", "thread")
if enable_delayed_submission and not progress_mode.startswith("thread"):
with pytest.raises(ValueError, match="Delayed submission requested, but"):
ucxx.init(
enable_delayed_submission=enable_delayed_submission,
enable_python_future=enable_python_future,
)
else:
ucxx.init(
enable_delayed_submission=enable_delayed_submission,
enable_python_future=enable_python_future,
)
worker = ucxx.core._get_ctx().worker
assert worker.is_delayed_submission_enabled() is enable_delayed_submission
if progress_mode.startswith("thread"):
assert worker.is_python_future_enabled() is enable_python_future
else:
assert worker.is_python_future_enabled() is False
@pytest.mark.asyncio
@pytest.mark.parametrize("enable_delayed_submission", [True, False])
@pytest.mark.parametrize("enable_python_future", [True, False])
async def test_worker_capabilities_env(enable_delayed_submission, enable_python_future):
with patch.dict(
os.environ,
{
"UCXPY_ENABLE_DELAYED_SUBMISSION": "1"
if enable_delayed_submission
else "0",
"UCXPY_ENABLE_PYTHON_FUTURE": "1" if enable_python_future else "0",
},
):
progress_mode = os.getenv("UCXPY_PROGRESS_MODE", "thread")
if enable_delayed_submission and not progress_mode.startswith("thread"):
with pytest.raises(ValueError, match="Delayed submission requested, but"):
ucxx.init()
else:
ucxx.init()
worker = ucxx.core._get_ctx().worker
assert worker.is_delayed_submission_enabled() is enable_delayed_submission
if progress_mode.startswith("thread"):
assert worker.is_python_future_enabled() is enable_python_future
else:
assert worker.is_python_future_enabled() is False
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/benchmarks/cudf_merge.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
"""
Benchmark send receive on one machine
"""
import argparse
import asyncio
import cProfile
import gc
import io
import os
import pickle
import pstats
import sys
import tempfile
from time import monotonic as clock
import cupy
import numpy as np
from ucxx.benchmarks.asyncssh import run_ssh_cluster
from ucxx.benchmarks.utils import (
_run_cluster_server,
_run_cluster_workers,
run_cluster_server,
run_cluster_workers,
)
from ucxx.utils import (
format_bytes,
format_time,
hmean,
print_multi,
print_separator,
)
import ucxx
# Must be set _before_ importing RAPIDS libraries (cuDF, RMM)
os.environ["RAPIDS_NO_INITIALIZE"] = "True"
import cudf # noqa
import rmm # noqa
def sizeof_cudf_dataframe(df):
return int(
sum(col.memory_usage for col in df._data.columns) + df._index.memory_usage()
)
async def send_df(ep, df):
header, frames = df.serialize()
header["frame_ifaces"] = [f.__cuda_array_interface__ for f in frames]
header = pickle.dumps(header)
header_nbytes = np.array([len(header)], dtype=np.uint64)
await ep.send(header_nbytes)
await ep.send(header)
for frame in frames:
await ep.send(frame)
async def recv_df(ep):
header_nbytes = np.empty((1,), dtype=np.uint64)
await ep.recv(header_nbytes)
header = bytearray(header_nbytes[0])
await ep.recv(header)
header = pickle.loads(header)
frames = [
cupy.empty(iface["shape"], dtype=iface["typestr"])
for iface in header["frame_ifaces"]
]
for frame in frames:
await ep.recv(frame)
cudf_typ = pickle.loads(header["type-serialized"])
return cudf_typ.deserialize(header, frames)
async def barrier(rank, eps):
if rank == 0:
await asyncio.gather(*[ep.recv(np.empty(1, dtype="u1")) for ep in eps.values()])
else:
await eps[0].send(np.zeros(1, dtype="u1"))
async def send_bins(eps, bins):
futures = []
for rank, ep in eps.items():
futures.append(send_df(ep, bins[rank]))
await asyncio.gather(*futures)
async def recv_bins(eps, bins):
futures = []
for ep in eps.values():
futures.append(recv_df(ep))
bins.extend(await asyncio.gather(*futures))
async def exchange_and_concat_bins(rank, eps, bins, timings=None):
ret = [bins[rank]]
if timings is not None:
t1 = clock()
await asyncio.gather(recv_bins(eps, ret), send_bins(eps, bins))
if timings is not None:
t2 = clock()
timings.append(
(
t2 - t1,
sum(
[sizeof_cudf_dataframe(b) for i, b in enumerate(bins) if i != rank]
),
)
)
return cudf.concat(ret)
async def distributed_join(args, rank, eps, left_table, right_table, timings=None):
left_bins = left_table.partition_by_hash(["key"], args.n_chunks)
right_bins = right_table.partition_by_hash(["key"], args.n_chunks)
left_df = await exchange_and_concat_bins(rank, eps, left_bins, timings)
right_df = await exchange_and_concat_bins(rank, eps, right_bins, timings)
return left_df.merge(right_df, on="key")
def generate_chunk(i_chunk, local_size, num_chunks, chunk_type, frac_match):
cupy.random.seed(42)
if chunk_type == "build":
# Build dataframe
#
# "key" column is a unique sample within [0, local_size * num_chunks)
#
# "shuffle" column is a random selection of partitions (used for shuffle)
#
# "payload" column is a random permutation of the chunk_size
start = local_size * i_chunk
stop = start + local_size
df = cudf.DataFrame(
{
"key": cupy.arange(start, stop=stop, dtype="int64"),
"payload": cupy.arange(local_size, dtype="int64"),
}
)
else:
# Other dataframe
#
# "key" column matches values from the build dataframe
# for a fraction (`frac_match`) of the entries. The matching
# entries are perfectly balanced across each partition of the
# "base" dataframe.
#
# "payload" column is a random permutation of the chunk_size
# Step 1. Choose values that DO match
sub_local_size = local_size // num_chunks
sub_local_size_use = max(int(sub_local_size * frac_match), 1)
arrays = []
for i in range(num_chunks):
bgn = (local_size * i) + (sub_local_size * i_chunk)
end = bgn + sub_local_size
ar = cupy.arange(bgn, stop=end, dtype="int64")
arrays.append(cupy.random.permutation(ar)[:sub_local_size_use])
key_array_match = cupy.concatenate(tuple(arrays), axis=0)
# Step 2. Add values that DON'T match
missing_size = local_size - key_array_match.shape[0]
start = local_size * num_chunks + local_size * i_chunk
stop = start + missing_size
key_array_no_match = cupy.arange(start, stop=stop, dtype="int64")
# Step 3. Combine and create the final dataframe chunk
key_array_combine = cupy.concatenate(
(key_array_match, key_array_no_match), axis=0
)
df = cudf.DataFrame(
{
"key": cupy.random.permutation(key_array_combine),
"payload": cupy.arange(local_size, dtype="int64"),
}
)
return df
def _get_server_command(args, num_workers):
cmd_args = " ".join(
[
"--server",
f"--devs {args.devs}",
f"--chunks-per-dev {args.chunks_per_dev}",
f"--chunk-size {args.chunk_size}",
f"--frac-match {args.frac_match}",
f"--iter {args.iter}",
f"--warmup-iter {args.warmup_iter}",
f"--num-workers {num_workers}",
]
)
return f"{sys.executable} -m ucxx.benchmarks.cudf_merge {cmd_args}"
def _get_worker_command_without_address(
args,
num_workers,
node_idx,
):
cmd_list = [
f"--devs {args.devs}",
f"--chunks-per-dev {args.chunks_per_dev}",
f"--chunk-size {args.chunk_size}",
f"--frac-match {args.frac_match}",
f"--iter {args.iter}",
f"--warmup-iter {args.warmup_iter}",
f"--num-workers {num_workers}",
f"--node-idx {node_idx}",
]
if args.rmm_init_pool_size:
cmd_list.append(f"--rmm-init-pool-size {args.rmm_init_pool_size}")
if args.profile:
cmd_list.append(f"--profile {args.profile}")
if args.cuda_profile:
cmd_list.append("--cuda-profile")
if args.collect_garbage:
cmd_list.append("--collect-garbage")
cmd_args = " ".join(cmd_list)
return f"{sys.executable} -m ucxx.benchmarks.cudf_merge {cmd_args}"
def _get_worker_command(
server_info,
args,
num_workers,
node_idx,
):
server_address = f"{server_info['address']}:{server_info['port']}"
worker_cmd = _get_worker_command_without_address(args, num_workers, node_idx)
worker_cmd += f" --server-address {server_address}"
return worker_cmd
async def worker(rank, eps, args):
# Setting current device and make RMM use it
from rmm.allocators.cupy import rmm_cupy_allocator
rmm.reinitialize(pool_allocator=True, initial_pool_size=args.rmm_init_pool_size)
# Make cupy use RMM
cupy.cuda.set_allocator(rmm_cupy_allocator)
df1 = generate_chunk(rank, args.chunk_size, args.n_chunks, "build", args.frac_match)
df2 = generate_chunk(rank, args.chunk_size, args.n_chunks, "other", args.frac_match)
# Let's warmup and sync before benchmarking
for i in range(args.warmup_iter):
await distributed_join(args, rank, eps, df1, df2)
await barrier(rank, eps)
if args.collect_garbage:
gc.collect()
if args.cuda_profile:
cupy.cuda.profiler.start()
if args.profile:
pr = cProfile.Profile()
pr.enable()
iter_results = {"bw": [], "wallclock": [], "throughput": [], "data_processed": []}
timings = []
t1 = clock()
for i in range(args.iter):
iter_timings = []
iter_t = clock()
ret = await distributed_join(args, rank, eps, df1, df2, iter_timings)
await barrier(rank, eps)
iter_took = clock() - iter_t
# Ensure the number of matches falls within `args.frac_match` +/- 2%.
# Small chunk sizes may not have enough matches, skip check for chunks
# smaller than 100k.
if args.chunk_size >= 100_000:
expected_len = args.chunk_size * args.frac_match
expected_len_err = expected_len * 0.02
assert abs(len(ret) - expected_len) <= expected_len_err
if args.collect_garbage:
gc.collect()
iter_bw = sum(t[1] for t in iter_timings) / sum(t[0] for t in iter_timings)
iter_data_processed = len(df1) * sum([t.itemsize for t in df1.dtypes])
iter_data_processed += len(df2) * sum([t.itemsize for t in df2.dtypes])
iter_throughput = args.n_chunks * iter_data_processed / iter_took
iter_results["bw"].append(iter_bw)
iter_results["wallclock"].append(iter_took)
iter_results["throughput"].append(iter_throughput)
iter_results["data_processed"].append(iter_data_processed)
timings += iter_timings
took = clock() - t1
if args.profile:
pr.disable()
s = io.StringIO()
ps = pstats.Stats(pr, stream=s)
ps.dump_stats("%s.%0d" % (args.profile, rank))
if args.cuda_profile:
cupy.cuda.profiler.stop()
data_processed = len(df1) * sum([t.itemsize * args.iter for t in df1.dtypes])
data_processed += len(df2) * sum([t.itemsize * args.iter for t in df2.dtypes])
return {
"bw": sum(t[1] for t in timings) / sum(t[0] for t in timings),
"wallclock": took,
"throughput": args.n_chunks * data_processed / took,
"data_processed": data_processed,
"iter_results": iter_results,
}
def parse_args():
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--chunks-per-dev",
metavar="N",
default=1,
type=int,
help="Number of chunks per device",
)
parser.add_argument(
"-d",
"--devs",
metavar="LIST",
default="0",
type=str,
help='GPU devices to use (default "0").',
)
parser.add_argument(
"-l",
"--listen-address",
metavar="ip",
default=ucxx.utils.get_address(),
type=str,
help="Server listen address (default `ucxx.utils.get_address()`).",
)
parser.add_argument("-c", "--chunk-size", type=int, default=4, metavar="N")
parser.add_argument(
"--frac-match",
metavar="FRAC",
default=0.3,
type=float,
help="Fraction of rows that matches (default 0.3)",
)
parser.add_argument(
"--profile",
metavar="FILENAME",
default=None,
type=str,
help="Write profile for each worker to `filename.RANK`",
)
parser.add_argument(
"--cuda-profile",
default=False,
action="store_true",
help="Enable CUDA profiling, use with `nvprof --profile-child-processes \
--profile-from-start off`",
)
parser.add_argument(
"--rmm-init-pool-size",
metavar="BYTES",
default=None,
type=int,
help="Initial RMM pool size (default 1/2 total GPU memory)",
)
parser.add_argument(
"--collect-garbage",
default=False,
action="store_true",
help="Trigger Python garbage collection after each iteration.",
)
parser.add_argument(
"--iter",
default=1,
type=int,
help="Number of benchmark iterations.",
)
parser.add_argument(
"--warmup-iter",
default=5,
type=int,
help="Number of warmup iterations.",
)
parser.add_argument(
"--server",
default=False,
action="store_true",
help="Run server only.",
)
parser.add_argument(
"--server-file",
type=str,
help="File to store server's address (if `--server` is specified) or to "
"read its address from otherwise.",
)
parser.add_argument(
"--server-address",
type=str,
help="Address where server is listening, in the IP:PORT or HOST:PORT "
"format. Only to be used to connect to a remote server started with "
"`--server`.",
)
parser.add_argument(
"--num-workers",
type=int,
help="Number of workers in the entire cluster, mandatory when "
"`--server` is specified. This number can be calculated as: "
"`number_of_devices_per_node * number_of_nodes * chunks_per_device`.",
)
parser.add_argument(
"--node-idx",
type=int,
help="On a multi-node setup, specify the index of the node that this "
"process is running. Must be a unique number in the "
"[0, `--n-workers` / `len(--devs)`) range.",
)
parser.add_argument(
"--hosts",
type=str,
help="The list of hosts to use for a multi-node run. All hosts need "
"to be reachable via SSH without a password (i.e., with a password-less "
"key). Usage example: --hosts 'dgx12,dgx12,10.10.10.10,dgx13'. In the "
"example, the benchmark is launched with server (manages workers "
"synchronization) on dgx12 (first in the list), and then three workers "
"on hosts 'dgx12', '10.10.10.10', 'dgx13'. "
"This option cannot be used with `--server`, `--server-file`, "
"`--num-workers `, or `--node-idx` which are all used for a "
"manual multi-node setup.",
)
parser.add_argument(
"--print-commands-only",
default=False,
action="store_true",
help="Print commands for each node in case you don't want to or can't "
"use SSH for launching a cluster. To be used together with `--hosts`, "
"specifying this argument will list the commands that should be "
"launched in each node. This is only a convenience function, and the "
"user can write the same command lines by just following the guidance "
"in this file's argument descriptions and existing documentation.",
)
args = parser.parse_args()
if args.hosts:
try:
import asyncssh # noqa
except ImportError:
raise RuntimeError(
"The use of `--hosts` for SSH multi-node benchmarking requires "
"`asyncssh` to be installed."
)
if any(
arg
for arg in [
args.server,
args.num_workers,
args.node_idx,
]
):
raise RuntimeError(
"A multi-node setup using `--hosts` for automatic SSH configuration "
"cannot be used together with `--server`, `--num-workers` or "
"`--node-idx`."
)
elif args.server_file and not args.print_commands_only:
raise RuntimeError(
"Specifying `--server-file` together with `--hosts` is not "
"allowed, except when used with `--print-commands-only`."
)
else:
args.devs = [int(d) for d in args.devs.split(",")]
args.num_node_workers = len(args.devs) * args.chunks_per_dev
if any([args.server, args.server_file, args.server_address]):
if args.server_address:
server_host, server_port = args.server_address.split(":")
args.server_address = {"address": server_host, "port": int(server_port)}
args.server_info = args.server_file or args.server_address
if args.num_workers is None:
raise RuntimeError(
"A multi-node setup requires specifying `--num-workers`."
)
elif args.num_workers < 2:
raise RuntimeError("A multi-node setup requires `--num-workers >= 2`.")
if not args.server and args.node_idx is None:
raise RuntimeError(
"Each worker on a multi-node is required to specify `--node-num`."
)
args.n_chunks = args.num_workers
else:
args.n_chunks = args.num_node_workers
if args.n_chunks < 2:
raise RuntimeError(
"Number of chunks must be greater than 1 (chunks-per-dev: "
f"{args.chunks_per_dev}, devs: {args.devs})"
)
return args
def main():
args = parse_args()
if not args.server and not args.hosts:
assert args.n_chunks > 1
assert args.n_chunks % 2 == 0
if args.hosts:
hosts = args.hosts.split(",")
server_host, worker_hosts = hosts[0], hosts[1:]
num_workers = (
len(args.devs.split(",")) * len(worker_hosts) * args.chunks_per_dev
)
if args.print_commands_only:
server_cmd = _get_server_command(args, num_workers)
print(f"[{server_host}] Server command line: {server_cmd}")
for node_idx, worker_host in enumerate(worker_hosts):
worker_cmd = _get_worker_command_without_address(
args, num_workers, node_idx
)
if args.server_file:
worker_cmd += f" --server-file '{args.server_file}'"
else:
worker_cmd += " --server-address 'REPLACE WITH SERVER ADDRESS'"
print(f"[{worker_host}] Worker command line: {worker_cmd}")
return
else:
return run_ssh_cluster(
args,
server_host,
worker_hosts,
num_workers,
_get_server_command,
_get_worker_command,
)
elif args.server:
stats = run_cluster_server(
args.server_file,
args.n_chunks,
)
elif args.server_file or args.server_address:
return run_cluster_workers(
args.server_info,
args.n_chunks,
args.num_node_workers,
args.node_idx,
worker,
worker_args=args,
ensure_cuda_device=True,
)
else:
server_file = tempfile.NamedTemporaryFile()
server_proc, server_queue = _run_cluster_server(
server_file.name,
args.n_chunks,
)
# Wait for server to become available
with open(server_file.name, "r") as f:
while len(f.read()) == 0:
pass
worker_procs = _run_cluster_workers(
server_file.name,
args.n_chunks,
args.num_node_workers,
0,
worker,
worker_args=args,
ensure_cuda_device=True,
)
stats = [server_queue.get() for i in range(args.n_chunks)]
[p.join() for p in worker_procs]
server_proc.join()
wc = stats[0]["wallclock"]
bw = hmean(np.array([s["bw"] for s in stats]))
tp = stats[0]["throughput"]
dp = sum(s["data_processed"] for s in stats)
dp_iter = sum(s["iter_results"]["data_processed"][0] for s in stats)
print("cuDF merge benchmark")
print_separator(separator="-", length=110)
print_multi(values=["Device(s)", f"{args.devs}"])
print_multi(values=["Chunks per device", f"{args.chunks_per_dev}"])
print_multi(values=["Rows per chunk", f"{args.chunk_size}"])
print_multi(values=["Total data processed", f"{format_bytes(dp)}"])
print_multi(values=["Data processed per iter", f"{format_bytes(dp_iter)}"])
print_multi(values=["Row matching fraction", f"{args.frac_match}"])
print_separator(separator="=", length=110)
print_multi(values=["Wall-clock", f"{format_time(wc)}"])
print_multi(values=["Bandwidth", f"{format_bytes(bw)}/s"])
print_multi(values=["Throughput", f"{format_bytes(tp)}/s"])
print_separator(separator="=", length=110)
print_multi(values=["Run", "Wall-clock", "Bandwidth", "Throughput"])
for i in range(args.iter):
iter_results = stats[0]["iter_results"]
iter_wc = iter_results["wallclock"][i]
iter_bw = hmean(np.array([s["iter_results"]["bw"][i] for s in stats]))
iter_tp = iter_results["throughput"][i]
print_multi(
values=[
i,
f"{format_time(iter_wc)}",
f"{format_bytes(iter_bw)}/s",
f"{format_bytes(iter_tp)}/s",
]
)
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/benchmarks/asyncssh.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import json
import logging
import os
import queue
import sys
from functools import partial
logger = logging.getLogger("ucx.asyncssh")
logger.setLevel(logging.getLevelName(os.getenv("UCXPY_ASYNCSSH_LOG_LEVEL", "WARNING")))
try:
import asyncssh
class SSHProc(asyncssh.SSHClientSession):
def __init__(self, out_queue):
assert isinstance(out_queue, queue.Queue)
self.out_queue = out_queue
def data_received(self, data, datatype):
logger.debug(f"SSHProc.data_received(): {data=}")
self.out_queue.put(data)
def connection_lost(self, exc):
if exc:
logger.error(f"SSH session error: {exc}", file=sys.stderr)
else:
logger.debug(
f"SSH connection terminated succesfully {self.out_queue.empty()=}"
)
class SSHServerProc(SSHProc):
address = None
port = None
def data_received(self, data, datatype):
if self.address is None and self.port is None:
logger.debug(f"SSHServerProc.data_received() address: {data=}")
server_info = json.loads(data)
self.address = server_info["address"]
self.port = server_info["port"]
self.out_queue.put(server_info)
else:
super().data_received(data, datatype)
async def _run_ssh_cluster(
args,
server_host,
worker_hosts,
num_workers,
get_server_command,
get_worker_command,
):
"""
Run benchmarks in an SSH cluster.
The results are printed to stdout.
At the moment, only `ucxx.benchmarks.cudf_merge` is supported.
Parameters
----------
args: Namespace
The arguments that were passed to `ucxx.benchmarks.cudf_merge`.
server_host: str
String containing hostname or IP address of node where the server
will run.
worker_hosts: list
List of strings containing hostnames or IP addresses of nodes where
workers will run.
num_workers: int
get_server_command: callable
Function returning the full command that the server node will run.
Must have signature `get_server_command(args, num_workers)`,
where:
- `args` is the parsed `argparse.Namespace` object as parsed by
the caller application;
- `num_workers` number of workers in the entire cluster.
get_worker_command: callable
Function returning the full command that each worker node will run.
Must have signature `get_worker_command(args, num_workers, node_idx)`,
where:
- `args` is the parsed `argparse.Namespace` object as parsed by
the caller application;
- `num_workers` number of workers in the entire cluster;
- `node_idx` index of the node that the process will launch.
"""
logger.debug(f"{server_host=}, {worker_hosts=}")
async with asyncssh.connect(server_host, known_hosts=None) as conn:
server_queue = queue.Queue()
server_cmd = (get_server_command(args, num_workers, logger=logger),)
logger.debug(f"[{server_host}] {server_cmd=}")
server_chan, _ = await conn.create_session(
partial(SSHServerProc, server_queue),
server_cmd,
)
while True:
try:
server_info = server_queue.get_nowait()
except queue.Empty:
await asyncio.sleep(0.01)
else:
break
logger.info(f"Server session created {server_info=}")
workers_conn = await asyncio.gather(
*[asyncssh.connect(host, known_hosts=None) for host in worker_hosts]
)
workers_chan, workers_queue = [], []
for node_idx, worker_conn in enumerate(workers_conn):
worker_queue = queue.Queue()
worker_cmd = get_worker_command(
server_info,
args,
num_workers,
node_idx,
logger=logger,
)
logger.debug(f"[{worker_hosts[node_idx]}] {worker_cmd=}")
worker_chan, _ = await worker_conn.create_session(
partial(SSHProc, worker_queue),
worker_cmd,
)
workers_chan.append(worker_chan)
workers_queue.append(worker_queue)
await asyncio.gather(*[chan.wait_closed() for chan in workers_chan])
await server_chan.wait_closed()
while not server_queue.empty():
print(server_queue.get())
for i, worker_queue in enumerate(workers_queue):
if not worker_queue.empty():
logger.warning(
f"Worker {worker_hosts[i]} stdout wasn't empty. This "
"likely indicates errors may have occurred. You may "
"run with `UCXPY_ASYNCSSH_LOG_LEVEL=DEBUG` to see the "
"full output."
)
while not worker_queue.empty():
logger.debug(worker_queue.get())
def run_ssh_cluster(
args,
server_host,
worker_hosts,
num_workers,
get_server_command,
get_worker_command,
):
"""
Same as `_run_ssh_cluster()` but running on event loop until completed.
"""
try:
asyncio.get_event_loop().run_until_complete(
_run_ssh_cluster(
args,
server_host,
worker_hosts,
num_workers,
get_server_command,
get_worker_command,
)
)
except (OSError, asyncssh.Error) as exc:
sys.exit(f"SSH connection failed: {exc}")
except ImportError:
SSHProc = None
SSHServerProce = None
run_ssh_cluster = None
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/benchmarks/__init__.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/benchmarks/utils.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import json
import logging
import multiprocessing as mp
import os
import pickle
import threading
from types import ModuleType
import numpy as np
from ucxx.utils import get_address
import ucxx
logger = logging.getLogger("ucx")
def _ensure_cuda_device(devs, rank):
import numba.cuda
dev_id = devs[rank % len(devs)]
os.environ["CUDA_VISIBLE_DEVICES"] = str(dev_id)
logger.debug(f"{dev_id=}, {rank=}")
numba.cuda.current_context()
def get_allocator(
object_type: str, rmm_init_pool_size: int, rmm_managed_memory: bool
) -> ModuleType:
"""
Initialize and return array-allocator based on arguments passed.
Parameters
----------
object_type: str
The type of object the allocator should return. Options are: "numpy", "cupy"
or "rmm".
rmm_init_pool_size: int
If the object type is "rmm" (implies usage of RMM pool), define the initial
pool size.
rmm_managed_memory: bool
If the object type is "rmm", use managed memory if `True`, or default memory
otherwise.
Returns
-------
A handle to a module, one of ``numpy`` or ``cupy`` (if device memory is requested).
If the object type is ``rmm``, then ``cupy`` is configured to use RMM as an
allocator.
"""
if object_type == "numpy":
import numpy as xp
elif object_type == "cupy":
import cupy as xp
else:
import cupy as xp
import rmm
from rmm.allocators.cupy import rmm_cupy_allocator
rmm.reinitialize(
pool_allocator=True,
managed_memory=rmm_managed_memory,
initial_pool_size=rmm_init_pool_size,
)
xp.cuda.set_allocator(rmm_cupy_allocator)
return xp
async def send_pickled_msg(ep, obj):
msg = pickle.dumps(obj)
await ep.send_obj(msg)
async def recv_pickled_msg(ep):
msg = await ep.recv_obj()
return pickle.loads(msg)
def _server_process(
q,
server_file,
n_workers,
ucx_options_list,
):
if ucx_options_list is not None:
ucxx.init(ucx_options_list)
import sys
async def run():
lock = threading.Lock()
eps = {}
results = {}
async def server_handler(ep):
worker_rank, worker_ip, worker_port = await recv_pickled_msg(ep)
with lock:
eps[worker_rank] = (worker_ip, worker_port)
while len(eps) != n_workers:
await asyncio.sleep(0.1)
await send_pickled_msg(ep, eps)
worker_results = await recv_pickled_msg(ep)
with lock:
results[worker_rank] = worker_results
lf = ucxx.create_listener(server_handler)
if server_file is None:
fp = open(sys.stdout.fileno(), mode="w", closefd=False)
else:
fp = open(server_file, mode="w")
with fp:
json.dump({"address": get_address(), "port": lf.port}, fp)
while len(results) != n_workers:
await asyncio.sleep(0.1)
return results
loop = asyncio.new_event_loop()
ret = loop.run_until_complete(run())
for rank in range(n_workers):
q.put(ret[rank])
ucxx.stop_notifier_thread()
def _run_cluster_server(
server_file,
n_workers,
ucx_options_list=None,
):
"""
Create a server that synchronizes workers.
The server will wait for all `n_workers` to connect and communicate their
endpoint information, then send the aggregate information to all workers
so that they will create endpoints to each other, in a fully-connected
network. Each worker will then communicate its result back to the scheduler
which will return that result back to the caller.
Parameters
----------
server_file: str or None
A string containing the path to a file that will be populated to contain
the address and port of the server, or `None` to print that information
to stdout.
num_workers : int
Number of workers in the entire network, required to infer when all
workers have connected and completed.
ucx_options_list: list of dict
Options to pass to UCX when initializing workers, one for each worker.
Returns
-------
return : tuple
A tuple with two elements: the process spawned and a queue where results
will eventually be stored.
"""
q = mp.Queue()
p = mp.Process(
target=_server_process,
args=(
q,
server_file,
n_workers,
ucx_options_list,
),
)
p.start()
return p, q
def run_cluster_server(
server_file,
n_workers,
ucx_options_list=None,
):
"""
Blocking version of `_run_cluster_server()`.
Provides same behavior as `_run_cluster_server()`, except that it will join
processes and thus cause the function to be blocking. It will also combine
the queue as a list with results for each worker in the `[0..n_workers)` range.
"""
p, q = _run_cluster_server(
server_file=server_file,
n_workers=n_workers,
ucx_options_list=ucx_options_list,
)
# Joining the process if the queue is too large (reproducible for more than
# 32 workers) causes the process to hang. We join the queue results in a
# list and return the list instead.
ret = [q.get() for i in range(n_workers)]
p.join()
assert not p.exitcode
return ret
def _worker_process(
queue,
server_info,
num_node_workers,
rank,
ucx_options_list,
ensure_cuda_device,
func,
args,
):
if ensure_cuda_device is True:
_ensure_cuda_device(args.devs, rank % num_node_workers)
if ucx_options_list is not None:
ucxx.init(ucx_options_list[rank])
async def run():
eps = {}
async def server_handler(ep):
peer_rank = np.empty((1,), dtype=np.uint64)
await ep.recv(peer_rank)
assert peer_rank[0] not in eps
eps[peer_rank[0]] = ep
lf = ucxx.create_listener(server_handler)
logger.debug(f"Sending message info to {server_info=}, {rank=}")
server_ep = await ucxx.create_endpoint(
server_info["address"], server_info["port"]
)
await send_pickled_msg(server_ep, (rank, get_address(), lf.port))
logger.debug(f"Receiving network info from server {rank=}")
workers_info = await recv_pickled_msg(server_ep)
n_workers = len(workers_info)
logger.debug(f"Creating endpoints to network {rank=}")
for i in range(rank + 1, n_workers):
remote_worker_ip, remote_worker_port = workers_info[i]
eps[i] = await ucxx.create_endpoint(remote_worker_ip, remote_worker_port)
await eps[i].send(np.array([rank], dtype=np.uint64))
while len(eps) != n_workers - 1:
await asyncio.sleep(0.1)
logger.debug(f"Running worker {rank=}")
if asyncio.iscoroutinefunction(func):
results = await func(rank, eps, args)
else:
results = func(rank, eps, args)
await send_pickled_msg(server_ep, results)
loop = asyncio.new_event_loop()
ret = loop.run_until_complete(run())
queue.put(ret)
ucxx.stop_notifier_thread()
def _run_cluster_workers(
server_info,
num_workers,
num_node_workers,
node_idx,
worker_func,
worker_args=None,
ucx_options_list=None,
ensure_cuda_device=False,
):
"""
Create `n_workers` UCX processes that each run `worker_func`.
Each process will first connect to a server spawned with
`run_cluster_server()` which will synchronize workers across the nodes.
This function is non-blocking and the processes created by this function
call are started but not joined, making this function non-blocking. It's the
user's responsibility to join all processes in the returned list to ensure
their completion.
Parameters
----------
server_info: str or dict
A string containing the path to a file created by `run_cluster_server()`
containing the address and port of the server. Alternatively, a
dictionary containing keys `"address"` and `"port"` may be used the same
way.
num_workers : int
Number of workers in the entire network. Every node must run the same
number of workers, and thus this value should be equal to
`node_num_workers * num_cluster_nodes`.
num_node_workers: int
Number of workers that this node will run.
node_idx: int
Index of the node in the entire cluster, within the range
`[0..num_cluster_nodes)`. This value is used to calculate the rank
of each worker. Each node must have a unique index.
worker_func: callable (can be a coroutine)
Function that each worker executes.
Must have signature: `worker(rank, eps, args)` where
- rank is the worker id
- eps is a dict of ranks to ucx endpoints
- args given here as `worker_args`
worker_args: object
The argument to pass to `worker_func`.
ucx_options_list: list of dict
Options to pass to UCX when initializing workers, one for each worker.
ensure_cuda_device: bool
If `True`, sets the `CUDA_VISIBLE_DEVICES` environment variable to match
the proper CUDA device based on the worker's rank and create the CUDA
context on the corresponding device before calling `import ucxx` for the
first time on the newly-spawned worker process, otherwise continues
without modifying `CUDA_VISIBLE_DEVICES` and creating a CUDA context.
Please note that having this set to `False` may cause all workers to use
device 0 and will not ensure proper InfiniBand<->GPU mapping on UCX,
potentially leading to low performance as GPUDirectRDMA will not be
active.
Returns
-------
processes : list
The list of processes spawned (one for each worker).
"""
if isinstance(server_info, str):
with open(server_info, mode="r") as fp:
server_info = json.load(fp)
elif not isinstance(server_info, dict):
raise ValueError(
"server_info must be the path to a server file, or a dictionary "
"with the unpacked values."
)
processes = []
for worker_num in range(num_node_workers):
rank = node_idx * num_node_workers + worker_num
q = mp.Queue()
p = mp.Process(
target=_worker_process,
args=(
q,
server_info,
num_node_workers,
rank,
ucx_options_list,
ensure_cuda_device,
worker_func,
worker_args,
),
)
p.start()
processes.append(p)
return processes
def run_cluster_workers(
server_info,
num_workers,
num_node_workers,
node_idx,
worker_func,
worker_args=None,
ucx_options_list=None,
ensure_cuda_device=False,
):
"""
Blocking version of `_run_cluster_workers()`.
Provides same behavior as `_run_cluster_workers()`, except that it will join
processes and thus cause the function to be blocking.
"""
processes = _run_cluster_workers(
server_info=server_info,
num_workers=num_workers,
num_node_workers=num_node_workers,
node_idx=node_idx,
worker_func=worker_func,
worker_args=worker_args,
ucx_options_list=ucx_options_list,
ensure_cuda_device=ensure_cuda_device,
)
for proc in processes:
proc.join()
assert not proc.exitcode
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/benchmarks/send_recv.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import asyncio
import multiprocessing as mp
import os
import numpy as np
from ucxx._lib_async.utils import get_event_loop
from ucxx.benchmarks.backends.ucxx_async import (
UCXPyAsyncClient,
UCXPyAsyncServer,
)
from ucxx.benchmarks.backends.ucxx_core import UCXPyCoreClient, UCXPyCoreServer
from ucxx.utils import (
format_bytes,
parse_bytes,
print_key_value,
print_separator,
)
import ucxx
mp = mp.get_context("spawn")
def _get_backend_implementation(backend):
if backend == "ucxx-async":
return {"client": UCXPyAsyncClient, "server": UCXPyAsyncServer}
elif backend == "ucxx-core":
return {"client": UCXPyCoreClient, "server": UCXPyCoreServer}
elif backend == "tornado":
from ucxx.benchmarks.backends.tornado import (
TornadoClient,
TornadoServer,
)
return {"client": TornadoClient, "server": TornadoServer}
raise ValueError(f"Unknown backend {backend}")
def _set_cuda_device(object_type, device):
if object_type in ["cupy", "rmm"]:
import numba.cuda
os.environ["CUDA_VISIBLE_DEVICES"] = str(device)
numba.cuda.current_context()
def server(queue, args):
if args.server_cpu_affinity >= 0:
os.sched_setaffinity(0, [args.server_cpu_affinity])
_set_cuda_device(args.object_type, args.server_dev)
server = _get_backend_implementation(args.backend)["server"](args, queue)
if asyncio.iscoroutinefunction(server.run):
loop = get_event_loop()
loop.run_until_complete(server.run())
else:
server.run()
def client(queue, port, server_address, args):
if args.client_cpu_affinity >= 0:
os.sched_setaffinity(0, [args.client_cpu_affinity])
_set_cuda_device(args.object_type, args.client_dev)
client = _get_backend_implementation(args.backend)["client"](
args, queue, server_address, port
)
if asyncio.iscoroutinefunction(client.run):
loop = get_event_loop()
loop.run_until_complete(client.run())
else:
client.run()
times = queue.get()
assert len(times) == args.n_iter
bw_avg = format_bytes(2 * args.n_iter * args.n_bytes * args.n_buffers / sum(times))
bw_med = format_bytes(2 * args.n_bytes * args.n_buffers / np.median(times))
lat_avg = int(sum(times) * 1e9 / (2 * args.n_iter))
lat_med = int(np.median(times) * 1e9 / 2)
print("Roundtrip benchmark")
print_separator(separator="=")
print_key_value(key="Iterations", value=f"{args.n_iter}")
print_key_value(key="Bytes", value=f"{format_bytes(args.n_bytes)}")
print_key_value(key="Number of buffers", value=f"{args.n_buffers}")
print_key_value(key="Object type", value=f"{args.object_type}")
print_key_value(key="Reuse allocation", value=f"{args.reuse_alloc}")
client.print_backend_specific_config()
print_separator(separator="=")
if args.object_type == "numpy":
print_key_value(key="Device(s)", value="CPU-only")
s_aff = (
args.server_cpu_affinity
if args.server_cpu_affinity >= 0
else "affinity not set"
)
c_aff = (
args.client_cpu_affinity
if args.client_cpu_affinity >= 0
else "affinity not set"
)
print_key_value(key="Server CPU", value=f"{s_aff}")
print_key_value(key="Client CPU", value=f"{c_aff}")
else:
print_key_value(key="Device(s)", value=f"{args.server_dev}, {args.client_dev}")
print_separator(separator="=")
print_key_value("Bandwidth (average)", value=f"{bw_avg}/s")
print_key_value("Bandwidth (median)", value=f"{bw_med}/s")
print_key_value("Latency (average)", value=f"{lat_avg} ns")
print_key_value("Latency (median)", value=f"{lat_med} ns")
if not args.no_detailed_report:
print_separator(separator="=")
print_key_value(key="Iterations", value="Bandwidth, Latency")
print_separator(separator="-")
for i, t in enumerate(times):
ts = format_bytes(2 * args.n_bytes * args.n_buffers / t)
lat = int(t * 1e9 / 2)
print_key_value(key=i, value=f"{ts}/s, {lat}ns")
def parse_args():
parser = argparse.ArgumentParser(description="Roundtrip benchmark")
if callable(parse_bytes):
parser.add_argument(
"-n",
"--n-bytes",
metavar="BYTES",
default="10 Mb",
type=parse_bytes,
help="Message size. Default '10 Mb'.",
)
else:
parser.add_argument(
"-n",
"--n-bytes",
metavar="BYTES",
default=10_000_000,
type=int,
help="Message size in bytes. Default '10_000_000'.",
)
parser.add_argument(
"-x",
"--n-buffers",
default="1",
type=int,
help="Number of buffers to transfer using the multi-buffer transfer API. "
"All buffers will be of same size specified by --n-bytes and same type "
"specified by --object_type. (default: 1, i.e., single-buffer transfer)",
)
parser.add_argument(
"--n-iter",
metavar="N",
default=10,
type=int,
help="Number of send / recv iterations (default 10).",
)
parser.add_argument(
"--n-warmup-iter",
default=10,
type=int,
help="Number of send / recv warmup iterations (default 10).",
)
parser.add_argument(
"-b",
"--server-cpu-affinity",
metavar="N",
default=-1,
type=int,
help="CPU affinity for server process (default -1: not set).",
)
parser.add_argument(
"-c",
"--client-cpu-affinity",
metavar="N",
default=-1,
type=int,
help="CPU affinity for client process (default -1: not set).",
)
parser.add_argument(
"-o",
"--object_type",
default="numpy",
choices=["numpy", "cupy", "rmm"],
help="In-memory array type.",
)
parser.add_argument(
"-v",
"--verbose",
default=False,
action="store_true",
help="Whether to print timings per iteration.",
)
parser.add_argument(
"-s",
"--server-address",
metavar="ip",
default=ucxx.utils.get_address(),
type=str,
help="Server address (default `ucxx.utils.get_address()`).",
)
parser.add_argument(
"-d",
"--server-dev",
metavar="N",
default=0,
type=int,
help="GPU device on server (default 0).",
)
parser.add_argument(
"-e",
"--client-dev",
metavar="N",
default=0,
type=int,
help="GPU device on client (default 0).",
)
parser.add_argument(
"--reuse-alloc",
default=False,
action="store_true",
help="Reuse memory allocations between communication.",
)
parser.add_argument(
"--cuda-profile",
default=False,
action="store_true",
help="Setting CUDA profiler.start()/stop() around send/recv "
"typically used with `nvprof --profile-from-start off "
"--profile-child-processes`",
)
parser.add_argument(
"--rmm-init-pool-size",
metavar="BYTES",
default=None,
type=int,
help="Initial RMM pool size (default 1/2 total GPU memory)",
)
parser.add_argument(
"--server-only",
default=False,
action="store_true",
help="Start up only a server process (to be used with --client).",
)
parser.add_argument(
"--client-only",
default=False,
action="store_true",
help="Connect to solitary server process (to be user with --server-only)",
)
parser.add_argument(
"-p",
"--port",
default=None,
help="The port the server will bind to, if not specified, UCX will bind "
"to a random port. Must be specified when --client-only is used.",
type=int,
)
parser.add_argument(
"--enable-am",
default=False,
action="store_true",
help="Use Active Message API instead of TAG for transfers",
)
parser.add_argument(
"--rmm-managed-memory",
default=False,
action="store_true",
help="Use RMM managed memory (requires `--object-type rmm`)",
)
parser.add_argument(
"--no-detailed-report",
default=False,
action="store_true",
help="Disable detailed report per iteration.",
)
parser.add_argument(
"-l",
"--backend",
default="ucxx-async",
type=str,
help="Backend Library (-l) to use, options are: 'ucxx-async' (default), "
"'ucxx-core' and 'tornado'.",
)
parser.add_argument(
"--progress-mode",
default="thread",
help="Progress mode for the UCP worker. Valid options are: "
"'thread' (default) and 'blocking'.",
type=str,
)
parser.add_argument(
"--asyncio-wait",
default=False,
action="store_true",
help="Wait for transfer requests with Python's asyncio, requires"
"`--progress-mode=thread`. (Default: disabled)",
)
parser.add_argument(
"--delay-progress",
default=False,
action="store_true",
help="Only applies to 'ucxx-core' backend: delay ucp_worker_progress calls "
"until a minimum number of outstanding operations is reached, implies "
"non-blocking send/recv. The --max-outstanding argument may be used to "
"control number of maximum outstanding operations. (Default: disabled)",
)
parser.add_argument(
"--max-outstanding",
metavar="N",
default=32,
type=int,
help="Only applies to 'ucxx-core' backend: number of maximum outstanding "
"operations, see --delay-progress. (Default: 32)",
)
parser.add_argument(
"--error-handling",
action=argparse.BooleanOptionalAction,
default=True,
help="Enable endpoint error handling.",
)
args = parser.parse_args()
if args.cuda_profile and args.object_type == "numpy":
raise RuntimeError(
"`--cuda-profile` requires `--object_type=cupy` or `--object_type=rmm`"
)
if args.rmm_managed_memory and args.object_type != "rmm":
raise RuntimeError("`--rmm-managed-memory` requires `--object_type=rmm`")
backend_impl = _get_backend_implementation(args.backend)
if not (
backend_impl["client"].has_cuda_support
and backend_impl["server"].has_cuda_support
):
if args.object_type in {"cupy", "rmm"}:
raise RuntimeError(
f"Backend '{args.backend}' does not support CUDA transfers"
)
if args.progress_mode not in ["blocking", "polling", "thread", "thread-polling"]:
raise RuntimeError(f"Invalid `--progress-mode`: '{args.progress_mode}'")
if args.progress_mode == "blocking" and args.backend == "ucxx-async":
raise RuntimeError("Blocking progress mode not supported for ucxx-async yet")
if args.asyncio_wait and not args.progress_mode.startswith("thread"):
raise RuntimeError(
"`--asyncio-wait` requires `--progress-mode=thread` or "
"`--progress-mode=thread-polling`"
)
if args.n_buffers > 1 and args.backend != "ucxx-async":
raise RuntimeError(
"Multi-buffer transfer only support for `--backend=ucxx-async`."
)
if args.backend != "ucxx-core" and args.delay_progress:
raise RuntimeError("`--delay-progress` requires `--backend=ucxx-core`")
if args.enable_am:
raise RuntimeError("AM not implemented yet")
return args
def main():
args = parse_args()
server_address = args.server_address
# if you are the server, only start the `server process`
# if you are the client, only start the `client process`
# otherwise, start everything
if not args.client_only:
# server process
q1 = mp.Queue()
p1 = mp.Process(target=server, args=(q1, args))
p1.start()
port = q1.get()
print(f"Server Running at {server_address}:{port}")
else:
port = args.port
if not args.server_only or args.client_only:
# client process
print(f"Client connecting to server at {server_address}:{port}")
q2 = mp.Queue()
p2 = mp.Process(target=client, args=(q2, port, server_address, args))
p2.start()
p2.join()
assert not p2.exitcode
else:
p1.join()
assert not p1.exitcode
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/benchmarks | rapidsai_public_repos/ucxx/python/ucxx/benchmarks/backends/ucxx_core.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
from argparse import Namespace
from queue import Queue
from time import monotonic, sleep
import ucxx._lib.libucxx as ucx_api
from ucxx._lib.arr import Array
from ucxx._lib_async.utils import get_event_loop
from ucxx.benchmarks.backends.base import BaseClient, BaseServer
from ucxx.benchmarks.utils import get_allocator
from ucxx.utils import print_key_value
import ucxx
WireupMessage = bytearray(b"wireup")
def _create_cuda_context(device):
import numba.cuda
numba.cuda.current_context(0)
def _transfer_wireup(ep, server):
import numpy as np
# Using bytearray currently segfaults
# TODO: fix
# message = bytearray(b"wireup")
message = np.array([1], dtype="u8")
if server:
message = Array(message)
return [
ep.tag_recv(message, tag=1),
ep.tag_send(message, tag=0),
]
else:
message = Array(np.zeros_like(message))
return [
ep.tag_send(message, tag=1),
ep.tag_recv(message, tag=0),
]
async def _wait_requests_async(worker, requests):
import asyncio
await asyncio.gather(*[r.wait_yield() for r in requests])
def _wait_requests(worker, progress_mode, requests):
while not all([r.is_completed() for r in requests]):
if progress_mode == "blocking":
worker.progress_worker_event()
if progress_mode == "polling":
worker.progress()
def register_am_allocators(args: Namespace, worker: ucx_api.UCXWorker):
"""
Register Active Message allocator in worker to correct memory type if the
benchmark is set to use the Active Message API.
Parameters
----------
args
Parsed command-line arguments that will be used as parameters during to
determine whether the caller is using the Active Message API and what
memory type.
worker
UCX-Py core Worker object where to register the allocator.
"""
if not args.enable_am:
return
import numpy as np
worker.register_am_allocator(
lambda n: np.empty(n, dtype=np.uint8), ucx_api.AllocatorType.HOST
)
if args.object_type == "cupy":
import cupy as cp
worker.register_am_allocator(
lambda n: cp.empty(n, dtype=cp.uint8), ucx_api.AllocatorType.CUDA
)
elif args.object_type == "rmm":
import rmm
worker.register_am_allocator(
lambda n: rmm.DeviceBuffer(size=n), ucx_api.AllocatorType.CUDA
)
class UCXPyCoreServer(BaseServer):
has_cuda_support = True
def __init__(
self,
args: Namespace,
queue: Queue,
):
self.args = args
self.queue = queue
def run(self):
self.ep = None
ctx = ucx_api.UCXContext(
feature_flags=(
ucx_api.Feature.AM if self.args.enable_am else ucx_api.Feature.TAG,
ucx_api.Feature.WAKEUP,
)
)
worker = ucx_api.UCXWorker(ctx)
xp = get_allocator(
self.args.object_type,
self.args.rmm_init_pool_size,
self.args.rmm_managed_memory,
)
register_am_allocators(self.args, worker)
if self.args.progress_mode.startswith("thread"):
worker.set_progress_thread_start_callback(
_create_cuda_context, cb_args=(self.args.server_dev,)
)
polling_mode = self.args.progress_mode == "thread-polling"
worker.start_progress_thread(polling_mode=polling_mode)
else:
worker.init_blocking_progress_mode()
# A reference to listener's endpoint is stored to prevent it from going
# out of scope immediately after the listener callback terminates.
global ep
ep = None
def _listener_handler(conn_request):
global ep
ep = listener.create_endpoint_from_conn_request(
conn_request, endpoint_error_handling=self.args.error_handling
)
listener = ucx_api.UCXListener.create(
worker=worker, port=self.args.port or 0, cb_func=_listener_handler
)
self.queue.put(listener.port)
# Without this, q.get() in main() may sometimes hang indefinitely.
# TODO: find root cause and fix.
sleep(0.1)
while ep is None:
if self.args.progress_mode == "blocking":
worker.progress_worker_event()
elif self.args.progress_mode == "polling":
worker.progress()
# Wireup before starting to transfer data
wireup_requests = _transfer_wireup(ep, server=True)
_wait_requests(worker, self.args.progress_mode, wireup_requests)
async def _transfer():
if self.args.reuse_alloc:
recv_msg = Array(xp.zeros(self.args.n_bytes, dtype="u1"))
for i in range(self.args.n_iter + self.args.n_warmup_iter):
if not self.args.reuse_alloc:
recv_msg = Array(xp.zeros(self.args.n_bytes, dtype="u1"))
requests = [
ep.tag_recv(recv_msg, tag=1),
ep.tag_send(recv_msg, tag=0),
]
if self.args.asyncio_wait:
await _wait_requests_async(worker, requests)
else:
_wait_requests(worker, self.args.progress_mode, requests)
# Check all requests completed successfully
for r in requests:
r.check_error()
loop = get_event_loop()
loop.run_until_complete(_transfer())
class UCXPyCoreClient(BaseClient):
has_cuda_support = True
def __init__(
self,
args: Namespace,
queue: Queue,
server_address: str,
port: int,
):
self.args = args
self.queue = queue
self.server_address = server_address
self.port = port
def run(self):
ctx = ucx_api.UCXContext(
feature_flags=(
ucx_api.Feature.AM
if self.args.enable_am is True
else ucx_api.Feature.TAG,
ucx_api.Feature.WAKEUP,
)
)
worker = ucx_api.UCXWorker(ctx)
xp = get_allocator(
self.args.object_type,
self.args.rmm_init_pool_size,
self.args.rmm_managed_memory,
)
register_am_allocators(self.args, worker)
send_msg = Array(xp.arange(self.args.n_bytes, dtype="u1"))
if self.args.progress_mode.startswith("thread"):
worker.set_progress_thread_start_callback(
_create_cuda_context, cb_args=(self.args.client_dev,)
)
polling_mode = self.args.progress_mode == "thread-polling"
worker.start_progress_thread(polling_mode=polling_mode)
else:
worker.init_blocking_progress_mode()
ep = ucx_api.UCXEndpoint.create(
worker,
self.server_address,
self.port,
endpoint_error_handling=self.args.error_handling,
)
# Wireup before starting to transfer data
wireup_requests = _transfer_wireup(ep, server=False)
_wait_requests(worker, self.args.progress_mode, wireup_requests)
times = []
async def _transfer():
if self.args.reuse_alloc:
recv_msg = Array(xp.zeros(self.args.n_bytes, dtype="u1"))
if self.args.cuda_profile:
xp.cuda.profiler.start()
for i in range(self.args.n_iter + self.args.n_warmup_iter):
start = monotonic()
if not self.args.reuse_alloc:
recv_msg = Array(xp.zeros(self.args.n_bytes, dtype="u1"))
requests = [
ep.tag_send(send_msg, tag=1),
ep.tag_recv(recv_msg, tag=0),
]
if self.args.asyncio_wait:
await _wait_requests_async(worker, requests)
else:
_wait_requests(worker, self.args.progress_mode, requests)
# Check all requests completed successfully
for r in requests:
r.check_error()
stop = monotonic()
if i >= self.args.n_warmup_iter:
times.append(stop - start)
if self.args.cuda_profile:
xp.cuda.profiler.stop()
loop = get_event_loop()
loop.run_until_complete(_transfer())
self.queue.put(times)
def print_backend_specific_config(self):
delay_progress_str = (
f"True ({self.args.max_outstanding})"
if self.args.delay_progress is True
else "False"
)
print_key_value(
key="Transfer API", value=f"{'AM' if self.args.enable_am else 'TAG'}"
)
print_key_value(key="Progress mode", value=f"{self.args.progress_mode}")
print_key_value(key="Asyncio wait", value=f"{self.args.asyncio_wait}")
print_key_value(key="Delay progress", value=f"{delay_progress_str}")
print_key_value(key="UCX_TLS", value=f"{ucxx.get_config()['TLS']}")
print_key_value(
key="UCX_NET_DEVICES", value=f"{ucxx.get_config()['NET_DEVICES']}"
)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/benchmarks | rapidsai_public_repos/ucxx/python/ucxx/benchmarks/backends/tornado.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
from time import monotonic
import numpy as np
from tornado.iostream import StreamClosedError
from tornado.tcpclient import TCPClient
from tornado.tcpserver import TCPServer
from ucxx.benchmarks.backends.base import BaseClient, BaseServer
class TornadoServer(BaseServer):
has_cuda_support = False
def __init__(self, args, queue):
self.args = args
self.queue = queue
def _start_listener(self, server, port):
if port is not None:
server.listen(port)
else:
for i in range(10000, 60000):
try:
server.listen(i)
except OSError:
continue
else:
port = i
break
return port
async def run(self):
args = self.args
event = asyncio.Event()
class TransferServer(TCPServer):
async def handle_stream(self, stream, address):
if args.reuse_alloc:
recv_msg = np.zeros(args.n_bytes, dtype="u1")
assert recv_msg.nbytes == args.n_bytes
for i in range(args.n_iter + args.n_warmup_iter):
if not args.reuse_alloc:
recv_msg = np.zeros(args.n_bytes, dtype="u1")
try:
await stream.read_into(recv_msg.data)
await stream.write(recv_msg.data)
except StreamClosedError as e:
print(e)
break
event.set()
# Set max_buffer_size to 1 GiB for now
server = TransferServer(max_buffer_size=1024**3)
port = self._start_listener(server, args.port)
self.queue.put(port)
await event.wait()
class TornadoClient(BaseClient):
has_cuda_support = False
def __init__(self, args, queue, server_address, port):
self.args = args
self.queue = queue
self.server_address = server_address
self.port = port
async def run(self) -> bool:
client = TCPClient()
# Set max_buffer_size to 1 GiB for now
stream = await client.connect(
self.server_address, self.port, max_buffer_size=1024**3
)
send_msg = np.arange(self.args.n_bytes, dtype="u1")
assert send_msg.nbytes == self.args.n_bytes
if self.args.reuse_alloc:
recv_msg = np.zeros(self.args.n_bytes, dtype="u1")
assert recv_msg.nbytes == self.args.n_bytes
times = []
for i in range(self.args.n_iter + self.args.n_warmup_iter):
start = monotonic()
if not self.args.reuse_alloc:
recv_msg = np.zeros(self.args.n_bytes, dtype="u1")
await stream.write(send_msg.data)
await stream.read_into(recv_msg.data)
stop = monotonic()
if i >= self.args.n_warmup_iter:
times.append(stop - start)
self.queue.put(times)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/benchmarks | rapidsai_public_repos/ucxx/python/ucxx/benchmarks/backends/ucxx_async.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
from argparse import Namespace
from queue import Queue
from time import monotonic
from ucxx._lib.arr import Array
from ucxx.benchmarks.backends.base import BaseClient, BaseServer
from ucxx.benchmarks.utils import get_allocator
from ucxx.utils import print_key_value
import ucxx
def register_am_allocators(args: Namespace):
"""
Register Active Message allocator in worker to correct memory type if the
benchmark is set to use the Active Message API.
Parameters
----------
args
Parsed command-line arguments that will be used as parameters during to
determine whether the caller is using the Active Message API and what
memory type.
"""
if not args.enable_am:
return
import numpy as np
ucxx.register_am_allocator(lambda n: np.empty(n, dtype=np.uint8), "host")
if args.object_type == "cupy":
import cupy as cp
ucxx.register_am_allocator(lambda n: cp.empty(n, dtype=cp.uint8), "cuda")
elif args.object_type == "rmm":
import rmm
ucxx.register_am_allocator(lambda n: rmm.DeviceBuffer(size=n), "cuda")
class UCXPyAsyncServer(BaseServer):
has_cuda_support = True
def __init__(
self,
args: Namespace,
queue: Queue,
):
self.args = args
self.queue = queue
async def run(self):
ucxx.init(progress_mode=self.args.progress_mode)
xp = get_allocator(
self.args.object_type,
self.args.rmm_init_pool_size,
self.args.rmm_managed_memory,
)
register_am_allocators(self.args)
async def server_handler(ep):
if not self.args.enable_am:
if self.args.reuse_alloc and self.args.n_buffers == 1:
reuse_msg = Array(xp.zeros(self.args.n_bytes, dtype="u1"))
for i in range(self.args.n_iter + self.args.n_warmup_iter):
if self.args.enable_am:
recv = await ep.am_recv()
await ep.am_send(recv)
else:
if self.args.n_buffers == 1:
msg = (
reuse_msg
if self.args.reuse_alloc
else xp.zeros(self.args.n_bytes, dtype="u1")
)
assert msg.nbytes == self.args.n_bytes
await ep.recv(msg)
await ep.send(msg)
else:
msgs = await ep.recv_multi()
await ep.send_multi(msgs)
await ep.close()
lf.close()
lf = ucxx.create_listener(
server_handler,
port=self.args.port,
endpoint_error_handling=self.args.error_handling,
)
self.queue.put(lf.port)
while not lf.closed():
await asyncio.sleep(0.5)
ucxx.stop_notifier_thread()
class UCXPyAsyncClient(BaseClient):
has_cuda_support = True
def __init__(
self,
args: Namespace,
queue: Queue,
server_address: str,
port: int,
):
self.args = args
self.queue = queue
self.server_address = server_address
self.port = port
async def run(self):
ucxx.init(progress_mode=self.args.progress_mode)
xp = get_allocator(
self.args.object_type,
self.args.rmm_init_pool_size,
self.args.rmm_managed_memory,
)
register_am_allocators(self.args)
ep = await ucxx.create_endpoint(
self.server_address,
self.port,
endpoint_error_handling=self.args.error_handling,
)
if self.args.enable_am:
msg = xp.arange(self.args.n_bytes, dtype="u1")
else:
if self.args.reuse_alloc:
reuse_msg_send = Array(xp.arange(self.args.n_bytes, dtype="u1"))
if self.args.n_buffers == 1:
reuse_msg_recv = Array(xp.zeros(self.args.n_bytes, dtype="u1"))
else:
reuse_msg_send = [reuse_msg_send] * self.args.n_buffers
if self.args.cuda_profile:
xp.cuda.profiler.start()
times = []
for i in range(self.args.n_iter + self.args.n_warmup_iter):
start = monotonic()
if self.args.enable_am:
await ep.am_send(msg)
await ep.am_recv()
else:
if self.args.n_buffers == 1:
if self.args.reuse_alloc:
msg_send = reuse_msg_send
msg_recv = reuse_msg_recv
else:
msg_send = Array(xp.arange(self.args.n_bytes, dtype="u1"))
msg_recv = Array(xp.zeros(self.args.n_bytes, dtype="u1"))
await ep.send(msg_send)
await ep.recv(msg_recv)
else:
if self.args.reuse_alloc:
msg_send = reuse_msg_send
else:
msg_send = [
Array(xp.arange(self.args.n_bytes, dtype="u1"))
for i in range(self.args.n_buffers)
]
await ep.send_multi(msg_send)
msg_recv = await ep.recv_multi()
stop = monotonic()
if i >= self.args.n_warmup_iter:
times.append(stop - start)
if self.args.cuda_profile:
xp.cuda.profiler.stop()
self.queue.put(times)
ucxx.stop_notifier_thread()
def print_backend_specific_config(self):
print_key_value(
key="Transfer API", value=f"{'AM' if self.args.enable_am else 'TAG'}"
)
print_key_value(key="Progress mode", value=f"{self.args.progress_mode}")
print_key_value(key="UCX_TLS", value=f"{ucxx.get_config()['TLS']}")
print_key_value(
key="UCX_NET_DEVICES", value=f"{ucxx.get_config()['NET_DEVICES']}"
)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/benchmarks | rapidsai_public_repos/ucxx/python/ucxx/benchmarks/backends/__init__.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/benchmarks | rapidsai_public_repos/ucxx/python/ucxx/benchmarks/backends/base.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
from abc import ABC, abstractmethod
from argparse import Namespace
from queue import Queue
class BaseServer(ABC):
@abstractmethod
def __init__(self, args: Namespace, queue: Queue):
"""
Benchmark server.
Parameters
----------
args: argparse.Namespace
Parsed command-line arguments that will be used as parameters during
the `run` method.
queue: Queue
Queue object where server will put the port it is listening at.
"""
pass
@property
@abstractmethod
def has_cuda_support() -> bool:
"""
Check whether server implementation supports CUDA memory transfers.
Returns
-------
ret: bool
`True` if CUDA is supported, `False` otherwise.
"""
return False
@abstractmethod
def run(self):
"""
Run the benchmark server.
The server is executed as follows:
1. Start the listener and put port where it is listening into the queue
registered in constructor;
2. Setup any additional context (Active Message registration, memory buffers
to reuse, etc.);
3. Transfer data back-and-forth with client;
4. Shutdown server.
"""
pass
class BaseClient(ABC):
@abstractmethod
def __init__(self, args: Namespace, queue: Queue, server_address: str, port: int):
"""
Benchmark client.
Parameters
----------
args
Parsed command-line arguments that will be used as parameters during
the `run` method.
queue
Queue object where to put timing results.
server_address
Hostname or IP address where server is listening at.
port
Port where server is listening at.
"""
pass
@property
@abstractmethod
def has_cuda_support() -> bool:
"""
Check whether client implementation supports CUDA memory transfers.
Returns
-------
ret: bool
`True` if CUDA is supported, `False` otherwise.
"""
return False
@abstractmethod
def run(self):
"""
Run the benchmark client.
The client is executed as follows:
1. Connects to listener;
2. Setup any additional context (Active Message registration, memory buffers
to reuse, etc.);
3. Transfer data back-and-forth with server;
4. Shutdown client;
5. Put timing results into the queue registered in constructor.
"""
pass
def print_backend_specific_config(self):
"""
Pretty print configuration specific to backend implementation.
"""
pass
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/_lib/CMakeLists.txt | # =================================================================================
# SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD 3-Clause License
# =================================================================================
set(cython_sources arr.pyx libucxx.pyx)
set(linked_libraries ucxx::ucxx ucxx::python Python3::Python)
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}" ASSOCIATED_TARGETS ucxx
)
# TODO: Finding NumPy currently requires finding Development due to a bug in CMake. This bug was
# fixed in https://gitlab.kitware.com/cmake/cmake/-/merge_requests/7410 and will be available in
# CMake 3.24, so we can remove the Development component once we upgrade to CMake 3.24.
# find_package(Python REQUIRED COMPONENTS Development NumPy)
# Note: The bug noted above prevents us from finding NumPy successfully using FindPython.cmake
# inside the manylinux images used to build wheels because manylinux images do not contain
# libpython.so and therefore Development cannot be found. Until we upgrade to CMake 3.24, we should
# use FindNumpy.cmake instead (provided by scikit-build). When we switch to 3.24 we can try
# switching back, but it may not work if that implicitly still requires Python libraries. In that
# case we'll need to follow up with the CMake team to remove that dependency. The stopgap solution
# is to unpack the static lib tarballs in the wheel building jobs so that there are at least static
# libs to be found, but that should be a last resort since it implies a dependency that isn't really
# necessary. The relevant command is tar -xf /opt/_internal/static-libs-for-embedding-only.tar.xz -C
# /opt/_internal"
find_package(NumPy REQUIRED)
set(targets_using_numpy libucxx)
foreach(target IN LISTS targets_using_numpy)
target_include_directories(${target} PRIVATE "${NumPy_INCLUDE_DIRS}")
# Switch to the line below when we switch back to FindPython.cmake in CMake 3.24.
# target_include_directories(${target} PRIVATE "${Python_NumPy_INCLUDE_DIRS}")
target_compile_definitions(${target} PRIVATE NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION)
endforeach()
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/_lib/libucxx.pyx | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import enum
import functools
import logging
from cpython.buffer cimport PyBUF_FORMAT, PyBUF_ND, PyBUF_WRITABLE
from cpython.ref cimport PyObject
from cython.operator cimport dereference as deref
from libc.stdint cimport uintptr_t
from libcpp cimport nullptr
from libcpp.functional cimport function
from libcpp.map cimport map as cpp_map
from libcpp.memory cimport (
dynamic_pointer_cast,
make_shared,
make_unique,
shared_ptr,
unique_ptr,
)
from libcpp.string cimport string
from libcpp.unordered_map cimport unordered_map
from libcpp.utility cimport move
from libcpp.vector cimport vector
import numpy as np
from rmm._lib.device_buffer cimport DeviceBuffer
from . cimport ucxx_api
from .arr cimport Array
from .ucxx_api cimport *
logger = logging.getLogger("ucx")
np.import_array()
cdef ptr_to_ndarray(void* ptr, np.npy_intp N):
cdef np.ndarray[np.uint8_t, ndim=1, mode="c"] arr = (
np.PyArray_SimpleNewFromData(1, &N, np.NPY_UINT8, <np.uint8_t*>ptr)
)
PyArray_ENABLEFLAGS(arr, NPY_ARRAY_OWNDATA)
return arr
def _get_rmm_buffer(uintptr_t recv_buffer_ptr):
cdef RMMBuffer* rmm_buffer = <RMMBuffer*>recv_buffer_ptr
return DeviceBuffer.c_from_unique_ptr(move(rmm_buffer.release()))
def _get_host_buffer(uintptr_t recv_buffer_ptr):
cdef HostBuffer* host_buffer = <HostBuffer*>recv_buffer_ptr
cdef size_t size = host_buffer.getSize()
return ptr_to_ndarray(host_buffer.release(), size)
cdef shared_ptr[Buffer] _rmm_am_allocator(size_t length):
cdef shared_ptr[RMMBuffer] rmm_buffer = make_shared[RMMBuffer](length)
return dynamic_pointer_cast[Buffer, RMMBuffer](rmm_buffer)
###############################################################################
# Exceptions #
###############################################################################
UCXError = None
UCXNoMessageError = None
UCXNoResourceError = None
UCXIOError = None
UCXNoMemoryError = None
UCXInvalidParamError = None
UCXUnreachableError = None
UCXInvalidAddrError = None
UCXNotImplementedError = None
UCXMessageTruncatedError = None
UCXNoProgressError = None
UCXBufferTooSmallError = None
UCXNoElemError = None
UCXSomeConnectsFailedError = None
UCXNoDeviceError = None
UCXBusyError = None
UCXCanceledError = None
UCXShmemSegmentError = None
UCXAlreadyExistsError = None
UCXOutOfRangeError = None
UCXTimedOutError = None
UCXExceedsLimitError = None
UCXUnsupportedError = None
UCXRejectedError = None
UCXNotConnectedError = None
UCXConnectionResetError = None
UCXFirstLinkFailureError = None
UCXLastLinkFailureError = None
UCXFirstEndpointFailureError = None
UCXEndpointTimeoutError = None
UCXLastEndpointFailureError = None
UCXCloseError = None
UCXConfigError = None
# Legacy names
UCXCanceled = None
UCXMsgTruncated = None
def _create_exceptions():
global UCXError
global UCXNoMessageError
global UCXNoResourceError
global UCXIOError
global UCXNoMemoryError
global UCXInvalidParamError
global UCXUnreachableError
global UCXInvalidAddrError
global UCXNotImplementedError
global UCXMessageTruncatedError
global UCXNoProgressError
global UCXBufferTooSmallError
global UCXNoElemError
global UCXSomeConnectsFailedError
global UCXNoDeviceError
global UCXBusyError
global UCXCanceledError
global UCXShmemSegmentError
global UCXAlreadyExistsError
global UCXOutOfRangeError
global UCXTimedOutError
global UCXExceedsLimitError
global UCXUnsupportedError
global UCXRejectedError
global UCXNotConnectedError
global UCXConnectionResetError
global UCXFirstLinkFailureError
global UCXLastLinkFailureError
global UCXFirstEndpointFailureError
global UCXEndpointTimeoutError
global UCXLastEndpointFailureError
global UCXCloseError
global UCXConfigError
# Legacy names
global UCXCanceled
global UCXMsgTruncated
create_exceptions()
UCXError = <object>UCXXError
UCXNoMessageError = <object>UCXXNoMessageError
UCXNoResourceError = <object>UCXXNoResourceError
UCXIOError = <object>UCXXIOError
UCXNoMemoryError = <object>UCXXNoMemoryError
UCXInvalidParamError = <object>UCXXInvalidParamError
UCXUnreachableError = <object>UCXXUnreachableError
UCXInvalidAddrError = <object>UCXXInvalidAddrError
UCXNotImplementedError = <object>UCXXNotImplementedError
UCXMessageTruncatedError = <object>UCXXMessageTruncatedError
UCXNoProgressError = <object>UCXXNoProgressError
UCXBufferTooSmallError = <object>UCXXBufferTooSmallError
UCXNoElemError = <object>UCXXNoElemError
UCXSomeConnectsFailedError = <object>UCXXSomeConnectsFailedError
UCXNoDeviceError = <object>UCXXNoDeviceError
UCXBusyError = <object>UCXXBusyError
UCXCanceledError = <object>UCXXCanceledError
UCXShmemSegmentError = <object>UCXXShmemSegmentError
UCXAlreadyExistsError = <object>UCXXAlreadyExistsError
UCXOutOfRangeError = <object>UCXXOutOfRangeError
UCXTimedOutError = <object>UCXXTimedOutError
UCXExceedsLimitError = <object>UCXXExceedsLimitError
UCXUnsupportedError = <object>UCXXUnsupportedError
UCXRejectedError = <object>UCXXRejectedError
UCXNotConnectedError = <object>UCXXNotConnectedError
UCXConnectionResetError = <object>UCXXConnectionResetError
UCXFirstLinkFailureError = <object>UCXXFirstLinkFailureError
UCXLastLinkFailureError = <object>UCXXLastLinkFailureError
UCXFirstEndpointFailureError = <object>UCXXFirstEndpointFailureError
UCXEndpointTimeoutError = <object>UCXXEndpointTimeoutError
UCXLastEndpointFailureError = <object>UCXXLastEndpointFailureError
UCXCloseError = <object>UCXXCloseError
UCXConfigError = <object>UCXXConfigError
# Define legacy names
# TODO: Deprecate and remove
UCXCanceled = UCXCanceledError
UCXMsgTruncated = UCXMessageTruncatedError
###############################################################################
# Types #
###############################################################################
class Feature(enum.Enum):
TAG = UCP_FEATURE_TAG
RMA = UCP_FEATURE_RMA
AMO32 = UCP_FEATURE_AMO32
AMO64 = UCP_FEATURE_AMO64
WAKEUP = UCP_FEATURE_WAKEUP
STREAM = UCP_FEATURE_STREAM
AM = UCP_FEATURE_AM
class PythonRequestNotifierWaitState(enum.Enum):
Ready = RequestNotifierWaitState.Ready
Timeout = RequestNotifierWaitState.Timeout
Shutdown = RequestNotifierWaitState.Shutdown
###############################################################################
# Classes #
###############################################################################
cdef class UCXConfig():
cdef:
unique_ptr[Config] _config
bint _enable_python_future
dict _cb_data
def __init__(self, ConfigMap user_options=ConfigMap()):
# TODO: Replace unique_ptr by stack object. Rule-of-five is not allowed
# by Config, and Cython seems not to handle constructors without moving
# in `__init__`.
self._config = move(make_unique[Config](user_options))
def __dealloc__(self):
with nogil:
self._config.reset()
def get(self):
cdef ConfigMap config_map = self._config.get().get()
return {
item.first.decode("utf-8"): item.second.decode("utf-8")
for item in config_map
}
cdef class UCXContext():
"""Python representation of `ucp_context_h`
Parameters
----------
config_dict: Mapping[str, str]
UCX options such as "MEMTYPE_CACHE=n" and "SEG_SIZE=3M"
feature_flags: Iterable[Feature]
Tuple of UCX feature flags
"""
cdef:
shared_ptr[Context] _context
dict _config
def __init__(
self,
config_dict={},
feature_flags=(
Feature.TAG,
Feature.WAKEUP,
Feature.STREAM,
Feature.AM,
Feature.RMA
)
):
cdef ConfigMap cpp_config_in, cpp_config_out
cdef dict context_config
for k, v in config_dict.items():
cpp_config_in[k.encode("utf-8")] = v.encode("utf-8")
cdef uint64_t feature_flags_uint = functools.reduce(
lambda x, y: x | y.value, feature_flags, 0
)
with nogil:
self._context = createContext(cpp_config_in, feature_flags_uint)
cpp_config_out = self._context.get().getConfig()
context_config = cpp_config_out
self._config = {
k.decode("utf-8"): v.decode("utf-8") for k, v in context_config.items()
}
logger.info("UCP initiated using config: ")
for k, v in self._config.items():
logger.info(f" {k}, {v}")
def __dealloc__(self):
with nogil:
self._context.reset()
cpdef dict get_config(self):
return self._config
@property
def feature_flags(self):
return int(self._context.get().getFeatureFlags())
@property
def cuda_support(self):
return bool(self._context.get().hasCudaSupport())
@property
def handle(self):
cdef ucp_context_h handle
with nogil:
handle = self._context.get().getHandle()
return int(<uintptr_t>handle)
@property
def info(self):
cdef Context* ucxx_context
cdef string info
with nogil:
ucxx_context = self._context.get()
info = ucxx_context.getInfo()
return info.decode("utf-8")
cdef class UCXAddress():
cdef:
shared_ptr[Address] _address
size_t _length
ucp_address_t *_handle
string _string
def __init__(self, uintptr_t shared_ptr_address):
self._address = deref(<shared_ptr[Address] *> shared_ptr_address)
with nogil:
self._handle = self._address.get().getHandle()
self._length = self._address.get().getLength()
self._string = self._address.get().getString()
def __dealloc__(self):
with nogil:
self._handle = NULL
self._address.reset()
@classmethod
def create_from_worker(cls, UCXWorker worker):
cdef shared_ptr[Address] address
with nogil:
address = worker._worker.get().getAddress()
return cls(<uintptr_t><void*>&address)
@classmethod
def create_from_string(cls, address_str):
cdef shared_ptr[Address] address
cdef string cpp_address_str = address_str
with nogil:
address = createAddressFromString(cpp_address_str)
return cls(<uintptr_t><void*>&address)
@classmethod
def create_from_buffer(cls, buffer):
cdef string address_str
buf = Array(buffer)
assert buf.c_contiguous
address_str = string(<char*>buf.ptr, <size_t>buf.nbytes)
return UCXAddress.create_from_string(address_str)
# For old UCX-Py API compatibility
@classmethod
def from_worker(cls, UCXWorker worker):
return cls.create_from_worker(worker)
@property
def address(self):
return int(<uintptr_t>self._handle)
@property
def length(self):
return int(self._length)
@property
def string(self):
return bytes(self._string)
def __getbuffer__(self, Py_buffer *buffer, int flags):
cdef Address* address_ptr = self._address.get()
if bool(flags & PyBUF_WRITABLE):
raise BufferError("Requested writable view on readonly data")
buffer.buf = self._handle
buffer.len = self._length
buffer.obj = self
buffer.readonly = True
buffer.itemsize = 1
if bool(flags & PyBUF_FORMAT):
buffer.format = b"B"
else:
buffer.format = NULL
buffer.ndim = 1
if bool(flags & PyBUF_ND):
buffer.shape = &buffer.len
else:
buffer.shape = NULL
buffer.strides = NULL
buffer.suboffsets = NULL
buffer.internal = NULL
def __releasebuffer__(self, Py_buffer *buffer):
pass
def __reduce__(self):
return (UCXAddress.create_from_buffer, (self.string,))
def __hash__(self):
return hash(bytes(self.string))
cdef void _generic_callback(void *args) with gil:
"""Callback function called when UCXEndpoint closes or errors"""
cdef dict cb_data = <dict> args
try:
cb_data['cb_func'](
*cb_data['cb_args'],
**cb_data['cb_kwargs']
)
except Exception as e:
pass
cdef class UCXWorker():
"""Python representation of `ucp_worker_h`"""
cdef:
shared_ptr[Worker] _worker
dict _progress_thread_start_cb_data
bint _enable_delayed_submission
bint _enable_python_future
uint64_t _context_feature_flags
def __init__(
self,
UCXContext context,
enable_delayed_submission=False,
enable_python_future=False,
):
cdef bint ucxx_enable_delayed_submission = enable_delayed_submission
cdef bint ucxx_enable_python_future = enable_python_future
cdef AmAllocatorType rmm_am_allocator
self._context_feature_flags = <uint64_t>(context.feature_flags)
with nogil:
self._worker = createPythonWorker(
context._context,
ucxx_enable_delayed_submission,
ucxx_enable_python_future,
)
self._enable_delayed_submission = self._worker.get().isDelayedRequestSubmissionEnabled()
self._enable_python_future = self._worker.get().isFutureEnabled()
if self._context_feature_flags & UCP_FEATURE_AM:
rmm_am_allocator = <AmAllocatorType>(&_rmm_am_allocator)
self._worker.get().registerAmAllocator(UCS_MEMORY_TYPE_CUDA, rmm_am_allocator)
def __dealloc__(self):
with nogil:
self._worker.reset()
@property
def handle(self):
cdef ucp_worker_h handle
with nogil:
handle = self._worker.get().getHandle()
return int(<uintptr_t>handle)
@property
def info(self):
cdef Worker* ucxx_worker
cdef string info
with nogil:
ucxx_worker = self._worker.get()
info = ucxx_worker.getInfo()
return info.decode("utf-8")
def get_address(self):
return UCXAddress.create_from_worker(self)
def create_endpoint_from_hostname(
self,
str ip_address,
uint16_t port,
bint endpoint_error_handling
):
return UCXEndpoint.create(self, ip_address, port, endpoint_error_handling)
def create_endpoint_from_worker_address(
self,
UCXAddress address,
bint endpoint_error_handling
):
return UCXEndpoint.create_from_worker_address(
self, address, endpoint_error_handling
)
def init_blocking_progress_mode(self):
with nogil:
self._worker.get().initBlockingProgressMode()
def progress(self):
with nogil:
self._worker.get().progress()
def progress_once(self):
cdef bint progress_made
with nogil:
progress_made = self._worker.get().progressOnce()
return progress_made
def progress_worker_event(self, epoll_timeout=-1):
cdef int ucxx_epoll_timeout = epoll_timeout
with nogil:
self._worker.get().progressWorkerEvent(ucxx_epoll_timeout)
def start_progress_thread(self, bint polling_mode=False, epoll_timeout=-1):
cdef int ucxx_epoll_timeout = epoll_timeout
with nogil:
self._worker.get().startProgressThread(polling_mode, epoll_timeout=ucxx_epoll_timeout)
def stop_progress_thread(self):
with nogil:
self._worker.get().stopProgressThread()
def cancel_inflight_requests(self):
cdef size_t num_canceled
with nogil:
num_canceled = self._worker.get().cancelInflightRequests()
return num_canceled
def tag_probe(self, size_t tag):
cdef bint tag_matched
with nogil:
tag_matched = self._worker.get().tagProbe(tag)
return tag_matched
def set_progress_thread_start_callback(
self, cb_func, tuple cb_args=None, dict cb_kwargs=None
):
if cb_args is None:
cb_args = ()
if cb_kwargs is None:
cb_kwargs = {}
self._progress_thread_start_cb_data = {
"cb_func": cb_func,
"cb_args": cb_args,
"cb_kwargs": cb_kwargs,
}
cdef function[void(void*)]* func_generic_callback = (
new function[void(void*)](_generic_callback)
)
with nogil:
self._worker.get().setProgressThreadStartCallback(
deref(func_generic_callback), <void*>self._progress_thread_start_cb_data
)
del func_generic_callback
def stop_request_notifier_thread(self):
with nogil:
self._worker.get().stopRequestNotifierThread()
def wait_request_notifier(self, period_ns=0):
cdef RequestNotifierWaitState state
cdef uint64_t p = period_ns
with nogil:
state = self._worker.get().waitRequestNotifier(p)
return PythonRequestNotifierWaitState(state)
def run_request_notifier(self):
with nogil:
self._worker.get().runRequestNotifier()
def populate_python_futures_pool(self):
with nogil:
self._worker.get().populateFuturesPool()
def is_delayed_submission_enabled(self):
return self._enable_delayed_submission
def is_python_future_enabled(self):
return self._enable_python_future
def tag_recv(self, Array arr, size_t tag):
cdef void* buf = <void*>arr.ptr
cdef size_t nbytes = arr.nbytes
cdef shared_ptr[Request] req
if not self._context_feature_flags & Feature.TAG.value:
raise ValueError("UCXContext must be created with `Feature.TAG`")
with nogil:
req = self._worker.get().tagRecv(
buf,
nbytes,
tag,
self._enable_python_future
)
return UCXRequest(<uintptr_t><void*>&req, self._enable_python_future)
cdef class UCXRequest():
cdef:
shared_ptr[Request] _request
bint _enable_python_future
bint _is_completed
def __init__(self, uintptr_t shared_ptr_request, bint enable_python_future):
self._request = deref(<shared_ptr[Request] *> shared_ptr_request)
self._enable_python_future = enable_python_future
self._is_completed = False
def __dealloc__(self):
with nogil:
self._request.reset()
def is_completed(self):
cdef bint is_completed
if self._is_completed is True:
return True
with nogil:
is_completed = self._request.get().isCompleted()
return is_completed
def get_status(self):
cdef ucs_status_t status
with nogil:
status = self._request.get().getStatus()
return status
def check_error(self):
with nogil:
self._request.get().checkError()
async def wait_yield(self):
while True:
if self.is_completed():
return self.check_error()
await asyncio.sleep(0)
def get_future(self):
cdef PyObject* future_ptr
with nogil:
future_ptr = <PyObject*>self._request.get().getFuture()
return <object>future_ptr
async def wait(self):
if self._enable_python_future:
await self.get_future()
else:
await self.wait_yield()
def get_recv_buffer(self):
cdef shared_ptr[Buffer] buf
cdef BufferType bufType
with nogil:
buf = self._request.get().getRecvBuffer()
bufType = buf.get().getType() if buf != nullptr else BufferType.Invalid
# If buf == NULL, it's not allocated by the request but rather the user
if buf == NULL:
return None
elif bufType == BufferType.RMM:
return _get_rmm_buffer(<uintptr_t><void*>buf.get())
elif bufType == BufferType.Host:
return _get_host_buffer(<uintptr_t><void*>buf.get())
cdef class UCXBufferRequest:
cdef:
BufferRequestPtr _buffer_request
bint _enable_python_future
def __init__(self, uintptr_t shared_ptr_buffer_request, bint enable_python_future):
self._buffer_request = deref(<BufferRequestPtr *> shared_ptr_buffer_request)
self._enable_python_future = enable_python_future
def __dealloc__(self):
with nogil:
self._buffer_request.reset()
def get_request(self):
return UCXRequest(
<uintptr_t><void*>&self._buffer_request.get().request,
self._enable_python_future,
)
def get_py_buffer(self):
cdef shared_ptr[Buffer] buf
cdef BufferType bufType
with nogil:
buf = self._buffer_request.get().buffer
bufType = buf.get().getType() if buf != nullptr else BufferType.Invalid
# If buf == NULL, it holds a header
if buf == NULL:
return None
elif bufType == BufferType.RMM:
return _get_rmm_buffer(<uintptr_t><void*>buf.get())
elif bufType == BufferType.Host:
return _get_host_buffer(<uintptr_t><void*>buf.get())
cdef class UCXBufferRequests:
cdef:
RequestTagMultiPtr _ucxx_request_tag_multi
bint _enable_python_future
bint _is_completed
tuple _buffer_requests
tuple _requests
def __init__(self, uintptr_t unique_ptr_buffer_requests, bint enable_python_future):
cdef RequestTagMulti ucxx_buffer_requests
self._enable_python_future = enable_python_future
self._is_completed = False
self._requests = tuple()
self._ucxx_request_tag_multi = (
deref(<RequestTagMultiPtr *> unique_ptr_buffer_requests)
)
def __dealloc__(self):
with nogil:
self._ucxx_request_tag_multi.reset()
def _populate_requests(self):
cdef vector[BufferRequestPtr] requests
if len(self._requests) == 0:
requests = deref(self._ucxx_request_tag_multi)._bufferRequests
total_requests = requests.size()
self._buffer_requests = tuple([
UCXBufferRequest(
<uintptr_t><void*>&(requests[i]),
self._enable_python_future
)
for i in range(total_requests)
])
self._requests = tuple([br.get_request() for br in self._buffer_requests])
def is_completed_all(self):
if self._is_completed is False:
if self._ucxx_request_tag_multi.get()._isFilled is False:
return False
self._populate_requests()
self._is_completed = all(
[r.is_completed() for r in self._requests]
)
return self._is_completed
def is_completed(self):
cdef bint is_completed
if self._is_completed is False:
with nogil:
is_completed = self._ucxx_request_tag_multi.get().isCompleted()
self._is_completed = is_completed
return self._is_completed
def check_error(self):
with nogil:
self._ucxx_request_tag_multi.get().checkError()
def get_status(self):
cdef ucs_status_t status
with nogil:
status = self._ucxx_request_tag_multi.get().getStatus()
return status
async def wait_yield(self):
while True:
if self.is_completed():
for r in self._requests:
r.check_error()
return
await asyncio.sleep(0)
async def _generate_future(self):
if self._is_completed is False:
while self._ucxx_request_tag_multi.get()._isFilled is False:
await asyncio.sleep(0)
self._populate_requests()
futures = [r.get_future() for r in self._requests]
await asyncio.gather(*futures)
self._is_completed = True
return self._is_completed
def get_generator_future(self):
return self._generate_future()
def get_future(self):
cdef PyObject* future_ptr
with nogil:
future_ptr = <PyObject*>self._ucxx_request_tag_multi.get().getFuture()
return <object>future_ptr
async def wait(self):
if self._enable_python_future:
await self.get_future()
else:
await self.wait_yield()
def get_requests(self):
self._populate_requests()
return self._requests
def get_py_buffers(self):
if not self.is_completed():
raise RuntimeError("Some requests are not completed yet")
self._populate_requests()
py_buffers = [br.get_py_buffer() for br in self._buffer_requests]
# PyBuffers that are None are headers
return [b for b in py_buffers if b is not None]
cdef void _endpoint_close_callback(void *args) with gil:
"""Callback function called when UCXEndpoint closes or errors"""
cdef dict cb_data = <dict> args
try:
cb_data['cb_func'](
*cb_data['cb_args'],
**cb_data['cb_kwargs']
)
except Exception as e:
logger.error(f"{type(e)} when calling endpoint close callback: {e}")
cdef class UCXEndpoint():
cdef:
shared_ptr[Endpoint] _endpoint
uint64_t _context_feature_flags
bint _cuda_support
bint _enable_python_future
dict _close_cb_data
def __init__(
self,
uintptr_t shared_ptr_endpoint,
bint enable_python_future,
uint64_t context_feature_flags,
bint cuda_support,
):
self._endpoint = deref(<shared_ptr[Endpoint] *> shared_ptr_endpoint)
self._enable_python_future = enable_python_future
self._context_feature_flags = context_feature_flags
self._cuda_support = cuda_support
def __dealloc__(self):
with nogil:
self._endpoint.reset()
@classmethod
def create(
cls,
UCXWorker worker,
str ip_address,
uint16_t port,
bint endpoint_error_handling
):
cdef shared_ptr[Context] context
cdef shared_ptr[Endpoint] endpoint
cdef string addr = ip_address.encode("utf-8")
cdef uint64_t context_feature_flags
cdef bint cuda_support
with nogil:
endpoint = worker._worker.get().createEndpointFromHostname(
addr, port, endpoint_error_handling
)
context = dynamic_pointer_cast[Context, Component](
worker._worker.get().getParent()
)
context_feature_flags = context.get().getFeatureFlags()
cuda_support = context.get().hasCudaSupport()
return cls(
<uintptr_t><void*>&endpoint,
worker.is_python_future_enabled(),
context_feature_flags,
cuda_support,
)
@classmethod
def create_from_conn_request(
cls,
UCXListener listener,
uintptr_t conn_request,
bint endpoint_error_handling
):
cdef shared_ptr[Context] context
cdef shared_ptr[Worker] worker
cdef shared_ptr[Endpoint] endpoint
cdef uint64_t context_feature_flags
cdef bint cuda_support
with nogil:
endpoint = listener._listener.get().createEndpointFromConnRequest(
<ucp_conn_request_h>conn_request, endpoint_error_handling
)
worker = dynamic_pointer_cast[Worker, Component](
listener._listener.get().getParent()
)
context = dynamic_pointer_cast[Context, Component](worker.get().getParent())
context_feature_flags = context.get().getFeatureFlags()
cuda_support = context.get().hasCudaSupport()
return cls(
<uintptr_t><void*>&endpoint,
listener.is_python_future_enabled(),
context_feature_flags,
cuda_support,
)
@classmethod
def create_from_worker_address(
cls,
UCXWorker worker,
UCXAddress address,
bint endpoint_error_handling
):
cdef shared_ptr[Context] context
cdef shared_ptr[Endpoint] endpoint
cdef shared_ptr[Address] ucxx_address = address._address
cdef uint64_t context_feature_flags
cdef bint cuda_support
with nogil:
endpoint = worker._worker.get().createEndpointFromWorkerAddress(
ucxx_address, endpoint_error_handling
)
context = dynamic_pointer_cast[Context, Component](
worker._worker.get().getParent()
)
context_feature_flags = context.get().getFeatureFlags()
cuda_support = context.get().hasCudaSupport()
return cls(
<uintptr_t><void*>&endpoint,
worker.is_python_future_enabled(),
context_feature_flags,
cuda_support,
)
@property
def handle(self):
cdef ucp_ep_h handle
with nogil:
handle = self._endpoint.get().getHandle()
return int(<uintptr_t>handle)
def close(self):
with nogil:
self._endpoint.get().close()
def am_probe(self):
cdef ucp_ep_h handle
cdef shared_ptr[Worker] worker
cdef bint ep_matched
with nogil:
handle = self._endpoint.get().getHandle()
worker = self._endpoint.get().getWorker()
ep_matched = worker.get().amProbe(handle)
return ep_matched
def am_send(self, Array arr):
cdef void* buf = <void*>arr.ptr
cdef size_t nbytes = arr.nbytes
cdef bint cuda_array = arr.cuda
cdef shared_ptr[Request] req
if not self._context_feature_flags & Feature.AM.value:
raise ValueError("UCXContext must be created with `Feature.AM`")
with nogil:
req = self._endpoint.get().amSend(
buf,
nbytes,
UCS_MEMORY_TYPE_CUDA if cuda_array else UCS_MEMORY_TYPE_HOST,
self._enable_python_future
)
return UCXRequest(<uintptr_t><void*>&req, self._enable_python_future)
def am_recv(self):
cdef shared_ptr[Request] req
if not self._context_feature_flags & Feature.AM.value:
raise ValueError("UCXContext must be created with `Feature.AM`")
with nogil:
req = self._endpoint.get().amRecv(self._enable_python_future)
return UCXRequest(<uintptr_t><void*>&req, self._enable_python_future)
def stream_send(self, Array arr):
cdef void* buf = <void*>arr.ptr
cdef size_t nbytes = arr.nbytes
cdef shared_ptr[Request] req
if not self._context_feature_flags & Feature.STREAM.value:
raise ValueError("UCXContext must be created with `Feature.STREAM`")
if arr.cuda and not self._cuda_support:
raise ValueError(
"UCX is not configured with CUDA support, please ensure that the "
"available UCX on your environment is built against CUDA and that "
"`cuda` or `cuda_copy` are present in `UCX_TLS` or that it is using "
"the default `UCX_TLS=all`."
)
with nogil:
req = self._endpoint.get().streamSend(
buf,
nbytes,
self._enable_python_future
)
return UCXRequest(<uintptr_t><void*>&req, self._enable_python_future)
def stream_recv(self, Array arr):
cdef void* buf = <void*>arr.ptr
cdef size_t nbytes = arr.nbytes
cdef shared_ptr[Request] req
if not self._context_feature_flags & Feature.STREAM.value:
raise ValueError("UCXContext must be created with `Feature.STREAM`")
if arr.cuda and not self._cuda_support:
raise ValueError(
"UCX is not configured with CUDA support, please ensure that the "
"available UCX on your environment is built against CUDA and that "
"`cuda` or `cuda_copy` are present in `UCX_TLS` or that it is using "
"the default `UCX_TLS=all`."
)
with nogil:
req = self._endpoint.get().streamRecv(
buf,
nbytes,
self._enable_python_future
)
return UCXRequest(<uintptr_t><void*>&req, self._enable_python_future)
def tag_send(self, Array arr, size_t tag):
cdef void* buf = <void*>arr.ptr
cdef size_t nbytes = arr.nbytes
cdef shared_ptr[Request] req
if not self._context_feature_flags & Feature.TAG.value:
raise ValueError("UCXContext must be created with `Feature.TAG`")
if arr.cuda and not self._cuda_support:
raise ValueError(
"UCX is not configured with CUDA support, please ensure that the "
"available UCX on your environment is built against CUDA and that "
"`cuda` or `cuda_copy` are present in `UCX_TLS` or that it is using "
"the default `UCX_TLS=all`."
)
with nogil:
req = self._endpoint.get().tagSend(
buf,
nbytes,
tag,
self._enable_python_future
)
return UCXRequest(<uintptr_t><void*>&req, self._enable_python_future)
def tag_recv(self, Array arr, size_t tag):
cdef void* buf = <void*>arr.ptr
cdef size_t nbytes = arr.nbytes
cdef shared_ptr[Request] req
if not self._context_feature_flags & Feature.TAG.value:
raise ValueError("UCXContext must be created with `Feature.TAG`")
if arr.cuda and not self._cuda_support:
raise ValueError(
"UCX is not configured with CUDA support, please ensure that the "
"available UCX on your environment is built against CUDA and that "
"`cuda` or `cuda_copy` are present in `UCX_TLS` or that it is using "
"the default `UCX_TLS=all`."
)
with nogil:
req = self._endpoint.get().tagRecv(
buf,
nbytes,
tag,
self._enable_python_future
)
return UCXRequest(<uintptr_t><void*>&req, self._enable_python_future)
def tag_send_multi(self, tuple arrays, size_t tag):
cdef vector[void*] v_buffer
cdef vector[size_t] v_size
cdef vector[int] v_is_cuda
cdef shared_ptr[Request] ucxx_buffer_requests
for arr in arrays:
if not isinstance(arr, Array):
raise ValueError(
"All elements of the `arrays` should be of `Array` type"
)
if arr.cuda and not self._cuda_support:
raise ValueError(
"UCX is not configured with CUDA support, please ensure that the "
"available UCX on your environment is built against CUDA and that "
"`cuda` or `cuda_copy` are present in `UCX_TLS` or that it is using "
"the default `UCX_TLS=all`."
)
for arr in arrays:
v_buffer.push_back(<void*><uintptr_t>arr.ptr)
v_size.push_back(arr.nbytes)
v_is_cuda.push_back(arr.cuda)
with nogil:
ucxx_buffer_requests = self._endpoint.get().tagMultiSend(
v_buffer,
v_size,
v_is_cuda,
tag,
self._enable_python_future,
)
return UCXBufferRequests(
<uintptr_t><void*>&ucxx_buffer_requests, self._enable_python_future,
)
def tag_recv_multi(self, size_t tag):
cdef shared_ptr[Request] ucxx_buffer_requests
with nogil:
ucxx_buffer_requests = self._endpoint.get().tagMultiRecv(
tag, self._enable_python_future
)
return UCXBufferRequests(
<uintptr_t><void*>&ucxx_buffer_requests, self._enable_python_future,
)
def is_alive(self):
cdef bint is_alive
with nogil:
is_alive = self._endpoint.get().isAlive()
return is_alive
def raise_on_error(self):
with nogil:
self._endpoint.get().raiseOnError()
def set_close_callback(self, cb_func, tuple cb_args=None, dict cb_kwargs=None):
if cb_args is None:
cb_args = ()
if cb_kwargs is None:
cb_kwargs = {}
self._close_cb_data = {
"cb_func": cb_func,
"cb_args": cb_args,
"cb_kwargs": cb_kwargs,
}
cdef function[void(void*)]* func_close_callback = (
new function[void(void*)](_endpoint_close_callback)
)
with nogil:
self._endpoint.get().setCloseCallback(
deref(func_close_callback), <void*>self._close_cb_data
)
del func_close_callback
cdef void _listener_callback(ucp_conn_request_h conn_request, void *args) with gil:
"""Callback function used by UCXListener"""
cdef dict cb_data = <dict> args
try:
cb_data['cb_func'](
(
cb_data['listener'].create_endpoint_from_conn_request(
int(<uintptr_t>conn_request), True
) if 'listener' in cb_data else
int(<uintptr_t>conn_request)
),
*cb_data['cb_args'],
**cb_data['cb_kwargs']
)
except Exception as e:
logger.error(f"{type(e)} when calling listener callback: {e}")
cdef class UCXListener():
cdef:
shared_ptr[Listener] _listener
bint _enable_python_future
dict _cb_data
def __init__(
self,
uintptr_t shared_ptr_listener,
dict cb_data,
bint enable_python_future,
):
self._listener = deref(<shared_ptr[Listener] *> shared_ptr_listener)
self._cb_data = cb_data
self._enable_python_future = enable_python_future
def __dealloc__(self):
with nogil:
self._listener.reset()
@classmethod
def create(
cls,
UCXWorker worker,
uint16_t port,
cb_func,
tuple cb_args=None,
dict cb_kwargs=None,
bint deliver_endpoint=False,
):
if cb_args is None:
cb_args = ()
if cb_kwargs is None:
cb_kwargs = {}
cdef shared_ptr[Listener] ucxx_listener
cdef ucp_listener_conn_callback_t listener_cb = (
<ucp_listener_conn_callback_t>_listener_callback
)
cdef dict cb_data = {
"cb_func": cb_func,
"cb_args": cb_args,
"cb_kwargs": cb_kwargs,
}
with nogil:
ucxx_listener = worker._worker.get().createListener(
port, listener_cb, <void*>cb_data
)
listener = cls(
<uintptr_t><void*>&ucxx_listener,
cb_data,
worker.is_python_future_enabled(),
)
if deliver_endpoint is True:
cb_data["listener"] = listener
return listener
@property
def port(self):
cdef uint16_t port
with nogil:
port = self._listener.get().getPort()
return port
@property
def ip(self):
cdef string ip
with nogil:
ip = self._listener.get().getIp()
return ip.decode("utf-8")
def create_endpoint_from_conn_request(
self,
uintptr_t conn_request,
bint endpoint_error_handling
):
return UCXEndpoint.create_from_conn_request(
self, conn_request, endpoint_error_handling,
)
def is_python_future_enabled(self):
return self._enable_python_future
def get_current_options():
"""
Returns the current UCX options
if UCX were to be initialized now.
"""
return UCXConfig().get()
def get_ucx_version():
cdef unsigned int a, b, c
ucp_get_version(&a, &b, &c)
return (a, b, c)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/_lib/arr.pxd | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
from libc.stdint cimport uintptr_t
cdef class Array:
cdef readonly uintptr_t ptr
cdef readonly bint readonly
cdef readonly object obj
cdef readonly Py_ssize_t itemsize
cdef readonly Py_ssize_t ndim
cdef Py_ssize_t[::1] shape_mv
cdef Py_ssize_t[::1] strides_mv
cdef readonly bint cuda
cpdef bint _c_contiguous(self)
cpdef bint _f_contiguous(self)
cpdef bint _contiguous(self)
cpdef Py_ssize_t _nbytes(self)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/_lib/arr.pyi | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
from typing import Tuple
class Array:
def __init__(self, obj: object): ...
@property
def c_contiguous(self) -> bool: ...
@property
def f_contiguous(self) -> bool: ...
@property
def contiguous(self) -> bool: ...
@property
def nbytes(self) -> int: ...
@property
def shape(self) -> Tuple[int]: ...
@property
def strides(self) -> Tuple[int]: ...
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/_lib/ucxx_api.pxd | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
from posix cimport fcntl
cimport numpy as np
from libc.stdint cimport int64_t, uint16_t, uint64_t # noqa: E402
from libcpp cimport bool as cpp_bool # noqa: E402
from libcpp.functional cimport function # noqa: E402
from libcpp.memory cimport shared_ptr, unique_ptr # noqa: E402
from libcpp.string cimport string # noqa: E402
from libcpp.unordered_map cimport ( # noqa: E402
unordered_map as cpp_unordered_map,
)
from libcpp.vector cimport vector # noqa: E402
cdef extern from "Python.h" nogil:
ctypedef struct PyObject
cdef extern from "numpy/arrayobject.h" nogil:
void PyArray_ENABLEFLAGS(np.ndarray arr, int flags)
enum:
NPY_ARRAY_OWNDATA
cdef extern from "ucp/api/ucp.h" nogil:
# Typedefs
ctypedef struct ucp_config_t:
pass
ctypedef struct ucp_context:
pass
ctypedef ucp_context* ucp_context_h
ctypedef struct ucp_worker:
pass
ctypedef ucp_worker* ucp_worker_h
ctypedef struct ucp_ep:
pass
ctypedef ucp_ep* ucp_ep_h
ctypedef ucp_conn_request* ucp_conn_request_h
ctypedef struct ucp_conn_request:
pass
ctypedef void(*ucp_listener_conn_callback_t)(ucp_conn_request_h request, void *arg)
ctypedef struct ucp_listener_conn_handler_t:
ucp_listener_conn_callback_t cb
void *arg
ctypedef struct ucp_address_t:
pass
ctypedef uint64_t ucp_tag_t
ctypedef enum ucs_status_t:
pass
ctypedef enum ucs_memory_type_t:
pass
# Constants
ucs_status_t UCS_OK
ucs_memory_type_t UCS_MEMORY_TYPE_HOST
ucs_memory_type_t UCS_MEMORY_TYPE_CUDA
int UCP_FEATURE_TAG
int UCP_FEATURE_WAKEUP
int UCP_FEATURE_STREAM
int UCP_FEATURE_RMA
int UCP_FEATURE_AMO32
int UCP_FEATURE_AMO64
int UCP_FEATURE_AM
# Functions
const char *ucs_status_string(ucs_status_t status)
void ucp_get_version(unsigned * major_version,
unsigned *minor_version,
unsigned *release_number)
cdef extern from "rmm/device_buffer.hpp" namespace "rmm" nogil:
cdef cppclass device_buffer:
pass
cdef extern from "<ucxx/python/exception.h>" namespace "ucxx::python" nogil:
cdef PyObject* UCXXError
cdef PyObject* UCXXNoMessageError
cdef PyObject* UCXXNoResourceError
cdef PyObject* UCXXIOError
cdef PyObject* UCXXNoMemoryError
cdef PyObject* UCXXInvalidParamError
cdef PyObject* UCXXUnreachableError
cdef PyObject* UCXXInvalidAddrError
cdef PyObject* UCXXNotImplementedError
cdef PyObject* UCXXMessageTruncatedError
cdef PyObject* UCXXNoProgressError
cdef PyObject* UCXXBufferTooSmallError
cdef PyObject* UCXXNoElemError
cdef PyObject* UCXXSomeConnectsFailedError
cdef PyObject* UCXXNoDeviceError
cdef PyObject* UCXXBusyError
cdef PyObject* UCXXCanceledError
cdef PyObject* UCXXShmemSegmentError
cdef PyObject* UCXXAlreadyExistsError
cdef PyObject* UCXXOutOfRangeError
cdef PyObject* UCXXTimedOutError
cdef PyObject* UCXXExceedsLimitError
cdef PyObject* UCXXUnsupportedError
cdef PyObject* UCXXRejectedError
cdef PyObject* UCXXNotConnectedError
cdef PyObject* UCXXConnectionResetError
cdef PyObject* UCXXFirstLinkFailureError
cdef PyObject* UCXXLastLinkFailureError
cdef PyObject* UCXXFirstEndpointFailureError
cdef PyObject* UCXXEndpointTimeoutError
cdef PyObject* UCXXLastEndpointFailureError
cdef PyObject* UCXXCloseError
cdef PyObject* UCXXConfigError
cdef void create_exceptions()
cdef void raise_py_error()
cdef extern from "<ucxx/python/api.h>" namespace "ucxx::python" nogil:
shared_ptr[Worker] createPythonWorker "ucxx::python::createWorker"(
shared_ptr[Context] context,
bint enableDelayedSubmission,
bint enableFuture
) except +raise_py_error
cdef extern from "<ucxx/buffer.h>" namespace "ucxx" nogil:
cdef enum class BufferType:
Host
RMM
Invalid
cdef cppclass Buffer:
Buffer(const BufferType bufferType, const size_t size_t)
BufferType getType()
size_t getSize()
cdef cppclass HostBuffer:
HostBuffer(const size_t size_t)
BufferType getType()
size_t getSize()
void* release() except +raise_py_error
void* data() except +raise_py_error
cdef cppclass RMMBuffer:
RMMBuffer(const size_t size_t)
BufferType getType()
size_t getSize()
unique_ptr[device_buffer] release() except +raise_py_error
void* data() except +raise_py_error
cdef extern from "<ucxx/notifier.h>" namespace "ucxx" nogil:
cdef enum class RequestNotifierWaitState:
Ready
Timeout
Shutdown
cdef extern from "<ucxx/api.h>" namespace "ucxx" nogil:
# Using function[Buffer] here doesn't seem possible due to Cython bugs/limitations. The
# workaround is to use a raw C function pointer and let it be parsed by the compiler.
# See https://github.com/cython/cython/issues/2041 and
# https://github.com/cython/cython/issues/3193
ctypedef shared_ptr[Buffer] (*AmAllocatorType)(size_t)
ctypedef cpp_unordered_map[string, string] ConfigMap
shared_ptr[Context] createContext(
ConfigMap ucx_config, uint64_t feature_flags
) except +raise_py_error
shared_ptr[Address] createAddressFromWorker(shared_ptr[Worker] worker)
shared_ptr[Address] createAddressFromString(string address_string)
cdef cppclass Config:
Config()
Config(ConfigMap user_options) except +raise_py_error
ConfigMap get() except +raise_py_error
ucp_config_t* getHandle()
cdef cppclass Component:
shared_ptr[Component] getParent()
cdef cppclass Context(Component):
shared_ptr[Worker] createWorker(
bint enableDelayedSubmission,
bint enableFuture,
) except +raise_py_error
ConfigMap getConfig() except +raise_py_error
ucp_context_h getHandle()
string getInfo() except +raise_py_error
uint64_t getFeatureFlags()
bint hasCudaSupport()
cdef cppclass Worker(Component):
ucp_worker_h getHandle()
string getInfo() except +raise_py_error
shared_ptr[Address] getAddress() except +raise_py_error
shared_ptr[Endpoint] createEndpointFromHostname(
string ip_address, uint16_t port, bint endpoint_error_handling
) except +raise_py_error
shared_ptr[Endpoint] createEndpointFromWorkerAddress(
shared_ptr[Address] address, bint endpoint_error_handling
) except +raise_py_error
shared_ptr[Listener] createListener(
uint16_t port, ucp_listener_conn_callback_t callback, void *callback_args
) except +raise_py_error
void initBlockingProgressMode() except +raise_py_error
void progress()
bint progressOnce()
void progressWorkerEvent(int epoll_timeout)
void startProgressThread(
bint pollingMode, int epoll_timeout
) except +raise_py_error
void stopProgressThread() except +raise_py_error
size_t cancelInflightRequests() except +raise_py_error
bint tagProbe(const ucp_tag_t) const
void setProgressThreadStartCallback(
function[void(void*)] callback, void* callbackArg
)
void stopRequestNotifierThread() except +raise_py_error
RequestNotifierWaitState waitRequestNotifier(
uint64_t periodNs
) except +raise_py_error
void runRequestNotifier() except +raise_py_error
void populateFuturesPool() except +raise_py_error
shared_ptr[Request] tagRecv(
void* buffer, size_t length, ucp_tag_t tag, bint enable_python_future
) except +raise_py_error
bint isDelayedRequestSubmissionEnabled() const
bint isFutureEnabled() const
bint amProbe(ucp_ep_h) const
void registerAmAllocator(ucs_memory_type_t memoryType, AmAllocatorType allocator)
cdef cppclass Endpoint(Component):
ucp_ep_h getHandle()
void close()
shared_ptr[Request] amSend(
void* buffer, size_t length, ucs_memory_type_t memory_type, bint enable_python_future
) except +raise_py_error
shared_ptr[Request] amRecv(
bint enable_python_future
) except +raise_py_error
shared_ptr[Request] streamSend(
void* buffer, size_t length, bint enable_python_future
) except +raise_py_error
shared_ptr[Request] streamRecv(
void* buffer, size_t length, bint enable_python_future
) except +raise_py_error
shared_ptr[Request] tagSend(
void* buffer, size_t length, ucp_tag_t tag, bint enable_python_future
) except +raise_py_error
shared_ptr[Request] tagRecv(
void* buffer, size_t length, ucp_tag_t tag, bint enable_python_future
) except +raise_py_error
shared_ptr[Request] tagMultiSend(
const vector[void*]& buffer,
const vector[size_t]& length,
const vector[int]& isCUDA,
ucp_tag_t tag,
bint enable_python_future
) except +raise_py_error
shared_ptr[Request] tagMultiRecv(
ucp_tag_t tag, bint enable_python_future
) except +raise_py_error
bint isAlive()
void raiseOnError() except +raise_py_error
void setCloseCallback(
function[void(void*)] close_callback, void* close_callback_arg
)
shared_ptr[Worker] getWorker()
cdef cppclass Listener(Component):
shared_ptr[Endpoint] createEndpointFromConnRequest(
ucp_conn_request_h conn_request, bint endpoint_error_handling
) except +raise_py_error
uint16_t getPort()
string getIp()
cdef cppclass Address(Component):
ucp_address_t* getHandle()
size_t getLength()
string getString()
cdef cppclass Request(Component):
cpp_bool isCompleted()
ucs_status_t getStatus()
void checkError() except +raise_py_error
void* getFuture() except +raise_py_error
shared_ptr[Buffer] getRecvBuffer() except +raise_py_error
cdef extern from "<ucxx/request_tag_multi.h>" namespace "ucxx" nogil:
ctypedef struct BufferRequest:
shared_ptr[Request] request
shared_ptr[string] stringBuffer
shared_ptr[Buffer] buffer
ctypedef shared_ptr[BufferRequest] BufferRequestPtr
ctypedef shared_ptr[RequestTagMulti] RequestTagMultiPtr
cdef cppclass RequestTagMulti:
vector[BufferRequestPtr] _bufferRequests
bint _isFilled
shared_ptr[Endpoint] _endpoint
ucp_tag_t _tag
bint _send
cpp_bool isCompleted()
ucs_status_t getStatus()
void checkError() except +raise_py_error
void* getFuture() except +raise_py_error
cdef extern from "<ucxx/utils/python.h>" namespace "ucxx::utils" nogil:
cpp_bool isPythonAvailable()
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/_lib/__init__.pxd | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/_lib/arr.pyx | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
from cpython.array cimport array, newarrayobject
from cpython.buffer cimport PyBuffer_IsContiguous
from cpython.memoryview cimport (
PyMemoryView_FromObject,
PyMemoryView_GET_BUFFER,
)
from cpython.object cimport PyObject
from cpython.ref cimport Py_INCREF
from cpython.tuple cimport PyTuple_New, PyTuple_SET_ITEM
from cython cimport (
auto_pickle,
boundscheck,
initializedcheck,
nonecheck,
wraparound,
)
from libc.stdint cimport uintptr_t
from libc.string cimport memcpy
try:
from numpy import dtype as numpy_dtype
except ImportError:
numpy_dtype = None
cdef dict itemsize_mapping = {
intern("|b1"): 1,
intern("|i1"): 1,
intern("|u1"): 1,
intern("<i2"): 2,
intern(">i2"): 2,
intern("<u2"): 2,
intern(">u2"): 2,
intern("<i4"): 4,
intern(">i4"): 4,
intern("<u4"): 4,
intern(">u4"): 4,
intern("<i8"): 8,
intern(">i8"): 8,
intern("<u8"): 8,
intern(">u8"): 8,
intern("<f2"): 2,
intern(">f2"): 2,
intern("<f4"): 4,
intern(">f4"): 4,
intern("<f8"): 8,
intern(">f8"): 8,
intern("<f16"): 16,
intern(">f16"): 16,
intern("<c8"): 8,
intern(">c8"): 8,
intern("<c16"): 16,
intern(">c16"): 16,
intern("<c32"): 32,
intern(">c32"): 32,
}
cdef array array_Py_ssize_t = array("q")
cdef inline Py_ssize_t[::1] new_Py_ssize_t_array(Py_ssize_t n):
return newarrayobject(
(<PyObject*>array_Py_ssize_t).ob_type, n, array_Py_ssize_t.ob_descr
)
@auto_pickle(False)
cdef class Array:
""" An efficient wrapper for host and device array-like objects
Parameters
----------
obj: Object exposing the buffer protocol or __cuda_array_interface__
A host and device array-like object
"""
def __cinit__(self, obj):
cdef dict iface = getattr(obj, "__cuda_array_interface__", None)
self.cuda = (iface is not None)
cdef const Py_buffer* pybuf
cdef str typestr
cdef tuple data, shape, strides
cdef Py_ssize_t i
if self.cuda:
if iface.get("mask") is not None:
raise NotImplementedError("mask attribute not supported")
self.obj = obj
data = iface["data"]
self.ptr, self.readonly = data
typestr = iface["typestr"]
if typestr is None:
raise ValueError("Expected `str`, but got `None`")
elif typestr == "":
raise ValueError("Got unexpected empty `str`")
else:
try:
self.itemsize = itemsize_mapping[typestr]
except KeyError:
if numpy_dtype is not None:
self.itemsize = numpy_dtype(typestr).itemsize
else:
raise ValueError(
f"Unexpected data type, '{typestr}'."
" Please install NumPy to handle this format."
)
shape = iface["shape"]
strides = iface.get("strides")
self.ndim = len(shape)
if self.ndim > 0:
self.shape_mv = new_Py_ssize_t_array(self.ndim)
for i in range(self.ndim):
self.shape_mv[i] = shape[i]
if strides is not None:
if len(strides) != self.ndim:
raise ValueError(
"The length of shape and strides must be equal"
)
self.strides_mv = new_Py_ssize_t_array(self.ndim)
for i in range(self.ndim):
self.strides_mv[i] = strides[i]
else:
self.strides_mv = None
else:
self.shape_mv = None
self.strides_mv = None
else:
mv = PyMemoryView_FromObject(obj)
pybuf = PyMemoryView_GET_BUFFER(mv)
if pybuf.suboffsets != NULL:
raise NotImplementedError("Suboffsets are not supported")
self.ptr = <uintptr_t>pybuf.buf
self.obj = pybuf.obj
self.readonly = <bint>pybuf.readonly
self.ndim = <Py_ssize_t>pybuf.ndim
self.itemsize = <Py_ssize_t>pybuf.itemsize
if self.ndim > 0:
self.shape_mv = new_Py_ssize_t_array(self.ndim)
memcpy(
&self.shape_mv[0],
pybuf.shape,
self.ndim * sizeof(Py_ssize_t)
)
if not PyBuffer_IsContiguous(pybuf, b"C"):
self.strides_mv = new_Py_ssize_t_array(self.ndim)
memcpy(
&self.strides_mv[0],
pybuf.strides,
self.ndim * sizeof(Py_ssize_t)
)
else:
self.strides_mv = None
else:
self.shape_mv = None
self.strides_mv = None
cpdef bint _c_contiguous(self):
return _c_contiguous(
self.itemsize, self.ndim, self.shape_mv, self.strides_mv
)
@property
def c_contiguous(self):
return self._c_contiguous()
cpdef bint _f_contiguous(self):
return _f_contiguous(
self.itemsize, self.ndim, self.shape_mv, self.strides_mv
)
@property
def f_contiguous(self):
return self._f_contiguous()
cpdef bint _contiguous(self):
return _contiguous(
self.itemsize, self.ndim, self.shape_mv, self.strides_mv
)
@property
def contiguous(self):
return self._contiguous()
cpdef Py_ssize_t _nbytes(self):
return _nbytes(self.itemsize, self.ndim, self.shape_mv)
@property
def nbytes(self):
return self._nbytes()
@property
@boundscheck(False)
@initializedcheck(False)
@nonecheck(False)
@wraparound(False)
def shape(self):
cdef tuple shape = PyTuple_New(self.ndim)
cdef Py_ssize_t i
cdef object o
for i in range(self.ndim):
o = self.shape_mv[i]
Py_INCREF(o)
PyTuple_SET_ITEM(shape, i, o)
return shape
@property
@boundscheck(False)
@initializedcheck(False)
@nonecheck(False)
@wraparound(False)
def strides(self):
cdef tuple strides = PyTuple_New(self.ndim)
cdef Py_ssize_t i, s
cdef object o
if self.strides_mv is not None:
for i from self.ndim > i >= 0 by 1:
o = self.strides_mv[i]
Py_INCREF(o)
PyTuple_SET_ITEM(strides, i, o)
else:
s = self.itemsize
for i from self.ndim > i >= 0 by 1:
o = s
Py_INCREF(o)
PyTuple_SET_ITEM(strides, i, o)
s *= self.shape_mv[i]
return strides
@boundscheck(False)
@initializedcheck(False)
@nonecheck(False)
@wraparound(False)
cdef inline bint _c_contiguous(Py_ssize_t itemsize,
Py_ssize_t ndim,
Py_ssize_t[::1] shape_mv,
Py_ssize_t[::1] strides_mv) nogil:
cdef Py_ssize_t i, s
if strides_mv is not None:
s = itemsize
for i from ndim > i >= 0 by 1:
if s != strides_mv[i]:
return False
s *= shape_mv[i]
return True
@boundscheck(False)
@initializedcheck(False)
@nonecheck(False)
@wraparound(False)
cdef inline bint _f_contiguous(Py_ssize_t itemsize,
Py_ssize_t ndim,
Py_ssize_t[::1] shape_mv,
Py_ssize_t[::1] strides_mv) nogil:
cdef Py_ssize_t i, s
if strides_mv is not None:
s = itemsize
for i from 0 <= i < ndim by 1:
if s != strides_mv[i]:
return False
s *= shape_mv[i]
elif ndim > 1:
return False
return True
cdef inline bint _contiguous(Py_ssize_t itemsize,
Py_ssize_t ndim,
Py_ssize_t[::1] shape_mv,
Py_ssize_t[::1] strides_mv) nogil:
cdef bint r = _c_contiguous(itemsize, ndim, shape_mv, strides_mv)
if not r:
r = _f_contiguous(itemsize, ndim, shape_mv, strides_mv)
return r
@boundscheck(False)
@initializedcheck(False)
@nonecheck(False)
@wraparound(False)
cdef inline Py_ssize_t _nbytes(Py_ssize_t itemsize,
Py_ssize_t ndim,
Py_ssize_t[::1] shape_mv) nogil:
cdef Py_ssize_t i, nbytes = itemsize
for i in range(ndim):
nbytes *= shape_mv[i]
return nbytes
| 0 |
rapidsai_public_repos/ucxx/python/ucxx | rapidsai_public_repos/ucxx/python/ucxx/_lib/__init__.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
from .libucxx import _create_exceptions
# Ensure Python exceptions are created before use
_create_exceptions()
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib | rapidsai_public_repos/ucxx/python/ucxx/_lib/tests/test_cancel.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import multiprocessing as mp
import pytest
import ucxx._lib.libucxx as ucx_api
from ucxx._lib.arr import Array
from ucxx.testing import terminate_process
mp = mp.get_context("spawn")
def _server_cancel(queue):
"""Server that establishes an endpoint to client and immediately closes
it, triggering received messages to be canceled on the client.
"""
ctx = ucx_api.UCXContext(
feature_flags=(ucx_api.Feature.TAG, ucx_api.Feature.WAKEUP)
)
worker = ucx_api.UCXWorker(ctx)
# Keep endpoint to be used from outside the listener callback
ep = [None]
def _listener_handler(conn_request):
ep[0] = listener.create_endpoint_from_conn_request(conn_request, True)
listener = ucx_api.UCXListener.create(
worker=worker, port=0, cb_func=_listener_handler
)
queue.put(listener.port)
while ep[0] is None:
worker.progress()
def _client_cancel(queue):
"""Client that connects to server and waits for messages to be received,
because the server closes without sending anything, the messages will
trigger cancelation.
"""
ctx = ucx_api.UCXContext(
feature_flags=(ucx_api.Feature.TAG, ucx_api.Feature.WAKEUP)
)
worker = ucx_api.UCXWorker(ctx)
port = queue.get()
ep = ucx_api.UCXEndpoint.create(
worker,
"127.0.0.1",
port,
endpoint_error_handling=True,
)
assert ep.is_alive()
msg = Array(bytearray(1))
request = ep.tag_recv(msg, tag=0)
while not request.is_completed():
worker.progress()
with pytest.raises(ucx_api.UCXCanceledError):
request.check_error()
while ep.is_alive():
worker.progress()
def test_message_probe():
queue = mp.Queue()
server = mp.Process(
target=_server_cancel,
args=(queue,),
)
server.start()
client = mp.Process(
target=_client_cancel,
args=(queue,),
)
client.start()
client.join(timeout=10)
server.join(timeout=10)
terminate_process(client)
terminate_process(server)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib | rapidsai_public_repos/ucxx/python/ucxx/_lib/tests/test_arr.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import array
import functools
import io
import mmap
import operator
import pytest
from ucxx._lib.arr import Array
builtin_buffers = [
b"",
b"abcd",
array.array("i", []),
array.array("i", [0, 1, 2, 3]),
array.array("I", [0, 1, 2, 3]),
array.array("f", []),
array.array("f", [0, 1, 2, 3]),
array.array("d", [0, 1, 2, 3]),
memoryview(array.array("B", [0, 1, 2, 3, 4, 5])).cast("B", (3, 2)),
memoryview(b"abcd"),
memoryview(bytearray(b"abcd")),
io.BytesIO(b"abcd").getbuffer(),
mmap.mmap(-1, 5),
]
@pytest.mark.parametrize("buffer", builtin_buffers)
def test_Array_ptr_builtins(buffer):
arr = Array(buffer)
assert arr.ptr != 0
@pytest.mark.parametrize("buffer", builtin_buffers)
def test_Array_readonly_builtins(buffer):
arr = Array(buffer)
mv = memoryview(buffer)
assert arr.readonly == mv.readonly
@pytest.mark.parametrize("buffer", builtin_buffers)
def test_Array_obj_builtins(buffer):
arr = Array(buffer)
mv = memoryview(buffer)
assert arr.obj is mv.obj
@pytest.mark.parametrize("buffer", builtin_buffers)
def test_Array_itemsize_builtins(buffer):
arr = Array(buffer)
mv = memoryview(buffer)
assert arr.itemsize == mv.itemsize
@pytest.mark.parametrize("buffer", builtin_buffers)
def test_Array_ndim_builtins(buffer):
arr = Array(buffer)
mv = memoryview(buffer)
assert arr.ndim == mv.ndim
@pytest.mark.parametrize("buffer", builtin_buffers)
def test_Array_shape_builtins(buffer):
arr = Array(buffer)
mv = memoryview(buffer)
assert arr.shape == mv.shape
@pytest.mark.parametrize("buffer", builtin_buffers)
def test_Array_strides_builtins(buffer):
arr = Array(buffer)
mv = memoryview(buffer)
assert arr.strides == mv.strides
@pytest.mark.parametrize("buffer", builtin_buffers)
def test_Array_nbytes_builtins(buffer):
arr = Array(buffer)
mv = memoryview(buffer)
assert arr.nbytes == mv.nbytes
@pytest.mark.parametrize("buffer", builtin_buffers)
def test_Array_contiguous_builtins(buffer):
mv = memoryview(buffer)
arr = Array(buffer)
assert arr.c_contiguous == mv.c_contiguous
assert arr.f_contiguous == mv.f_contiguous
assert arr.contiguous == mv.contiguous
mv2 = memoryview(buffer)[::2]
if mv2:
arr2 = Array(mv2)
assert arr2.c_contiguous == mv2.c_contiguous
assert arr2.f_contiguous == mv2.f_contiguous
assert arr2.contiguous == mv2.contiguous
array_params = [
((2, 3), "i4", (12, 4)),
((2, 3), "u4", (12, 4)),
((2, 3), "f4", (12, 4)),
((2, 3), "f8", (24, 8)),
((2, 3), "f8", (8, 16)),
]
def create_array(xp, shape, dtype, strides):
if xp == "cupy":
iface_prop = "__cuda_array_interface__"
elif xp == "numpy":
iface_prop = "__array_interface__"
xp = pytest.importorskip(xp)
nelem = functools.reduce(operator.mul, shape, 1)
data = xp.arange(nelem, dtype=dtype)
arr = xp.ndarray(shape, dtype, data.data, strides=strides)
iface = getattr(arr, iface_prop)
return xp, arr, iface
@pytest.mark.parametrize("xp", ["cupy", "numpy"])
@pytest.mark.parametrize("shape, dtype, strides", array_params)
def test_Array_ndarray_ptr(xp, shape, dtype, strides):
xp, arr, iface = create_array(xp, shape, dtype, strides)
arr2 = Array(arr)
assert arr2.ptr == iface["data"][0]
@pytest.mark.parametrize("xp", ["cupy", "numpy"])
@pytest.mark.parametrize("shape, dtype, strides", array_params)
def test_Array_ndarray_is_cuda(xp, shape, dtype, strides):
xp, arr, iface = create_array(xp, shape, dtype, strides)
arr2 = Array(arr)
is_cuda = xp.__name__ == "cupy"
assert arr2.cuda == is_cuda
@pytest.mark.parametrize("xp", ["cupy", "numpy"])
@pytest.mark.parametrize("shape, dtype, strides", array_params)
def test_Array_ndarray_nbytes(xp, shape, dtype, strides):
xp, arr, iface = create_array(xp, shape, dtype, strides)
arr2 = Array(arr)
assert arr2.nbytes == arr.nbytes
@pytest.mark.parametrize("xp", ["cupy", "numpy"])
@pytest.mark.parametrize("shape, dtype, strides", array_params)
def test_Array_ndarray_shape(xp, shape, dtype, strides):
xp, arr, iface = create_array(xp, shape, dtype, strides)
arr2 = Array(arr)
assert arr2.shape == arr.shape
@pytest.mark.parametrize("xp", ["cupy", "numpy"])
@pytest.mark.parametrize("shape, dtype, strides", array_params)
def test_Array_ndarray_strides(xp, shape, dtype, strides):
xp, arr, iface = create_array(xp, shape, dtype, strides)
arr2 = Array(arr)
assert arr2.strides == arr.strides
@pytest.mark.parametrize("xp", ["cupy", "numpy"])
@pytest.mark.parametrize("shape, dtype, strides", array_params)
def test_Array_ndarray_contiguous(xp, shape, dtype, strides):
xp, arr, iface = create_array(xp, shape, dtype, strides)
arr2 = Array(arr)
assert arr2.c_contiguous == arr.flags.c_contiguous
assert arr2.f_contiguous == arr.flags.f_contiguous
assert arr2.contiguous == (arr.flags.c_contiguous or arr.flags.f_contiguous)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib | rapidsai_public_repos/ucxx/python/ucxx/_lib/tests/test_probe.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import multiprocessing as mp
import pytest
from ucxx._lib import libucxx as ucx_api
from ucxx._lib.arr import Array
from ucxx.testing import terminate_process, wait_requests
mp = mp.get_context("spawn")
WireupMessage = bytearray(b"wireup")
DataMessage = bytearray(b"0" * 10)
def _server_probe(queue, transfer_api):
"""Server that probes and receives message after client disconnected.
Note that since it is illegal to call progress() in callback functions,
we keep a reference to the endpoint after the listener callback has
terminated, this way we can progress even after Python blocking calls.
"""
feature_flags = (
ucx_api.Feature.AM if transfer_api == "am" else ucx_api.Feature.TAG,
)
ctx = ucx_api.UCXContext(feature_flags=feature_flags)
worker = ucx_api.UCXWorker(ctx)
# Keep endpoint to be used from outside the listener callback
ep = [None]
def _listener_handler(conn_request):
ep[0] = listener.create_endpoint_from_conn_request(
conn_request, endpoint_error_handling=True
)
listener = ucx_api.UCXListener.create(
worker=worker, port=0, cb_func=_listener_handler
)
queue.put(listener.port)
while ep[0] is None:
worker.progress()
ep = ep[0]
# Ensure wireup and inform client before it can disconnect
if transfer_api == "am":
wireup_req = ep.am_recv()
wait_requests(worker, "blocking", wireup_req)
wireup = bytes(wireup_req.get_recv_buffer())
else:
wireup = bytearray(len(WireupMessage))
wait_requests(worker, "blocking", ep.tag_recv(Array(wireup), tag=0))
queue.put("wireup completed")
# Ensure client has disconnected -- endpoint is not alive anymore
while ep.is_alive() is True:
worker.progress()
# Probe/receive message even after the remote endpoint has disconnected
if transfer_api == "am":
while ep.am_probe() is False:
worker.progress()
recv_req = ep.am_recv()
wait_requests(worker, "blocking", recv_req)
received = bytes(recv_req.get_recv_buffer())
else:
while worker.tag_probe(0) is False:
worker.progress()
received = bytearray(len(DataMessage))
wait_requests(worker, "blocking", ep.tag_recv(Array(received), tag=0))
assert wireup == WireupMessage
assert received == DataMessage
def _client_probe(queue, transfer_api):
feature_flags = (
ucx_api.Feature.AM if transfer_api == "am" else ucx_api.Feature.TAG,
)
ctx = ucx_api.UCXContext(feature_flags=feature_flags)
worker = ucx_api.UCXWorker(ctx)
port = queue.get()
ep = ucx_api.UCXEndpoint.create(
worker,
"127.0.0.1",
port,
endpoint_error_handling=True,
)
if transfer_api == "am":
requests = [
ep.am_send(Array(WireupMessage)),
ep.am_send(Array(DataMessage)),
]
else:
requests = [
ep.tag_send(Array(WireupMessage), tag=0),
ep.tag_send(Array(DataMessage), tag=0),
]
wait_requests(worker, "blocking", requests)
# Wait for wireup before disconnecting
assert queue.get() == "wireup completed"
@pytest.mark.parametrize("transfer_api", ["am", "tag"])
def test_message_probe(transfer_api):
queue = mp.Queue()
server = mp.Process(target=_server_probe, args=(queue, transfer_api))
server.start()
client = mp.Process(target=_client_probe, args=(queue, transfer_api))
client.start()
client.join(timeout=10)
server.join(timeout=10)
terminate_process(client)
terminate_process(server)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib | rapidsai_public_repos/ucxx/python/ucxx/_lib/tests/test_address_object.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import multiprocessing as mp
import pickle
import ucxx._lib.libucxx as ucx_api
mp = mp.get_context("spawn")
def test_ucx_address_string():
ctx = ucx_api.UCXContext()
worker = ucx_api.UCXWorker(ctx)
org_address = worker.get_address()
org_address_str = org_address.string
new_address = ucx_api.UCXAddress.create_from_string(org_address_str)
new_address_str = new_address.string
assert hash(org_address) == hash(new_address)
assert bytes(org_address_str) == bytes(new_address_str)
def test_pickle_ucx_address():
ctx = ucx_api.UCXContext()
worker = ucx_api.UCXWorker(ctx)
org_address = worker.get_address()
org_address_str = org_address.string
org_address_hash = hash(org_address)
dumped_address = pickle.dumps(org_address)
org_address = bytes(org_address)
new_address = pickle.loads(dumped_address)
new_address_str = new_address.string
assert org_address_hash == hash(new_address)
assert bytes(org_address_str) == bytes(new_address_str)
assert bytes(org_address) == bytes(new_address)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib | rapidsai_public_repos/ucxx/python/ucxx/_lib/tests/test_config.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import os
from unittest.mock import patch
import pytest
import ucxx._lib.libucxx as ucx_api
from ucxx._lib.arr import Array
from ucxx._lib.libucxx import UCXInvalidParamError
def test_get_config():
# Cache user-defined UCX_TLS and unset it to test default value
tls = os.environ.get("UCX_TLS", None)
if tls is not None:
del os.environ["UCX_TLS"]
ctx = ucx_api.UCXContext()
config = ctx.get_config()
assert isinstance(config, dict)
assert config["TLS"] == "all"
# Restore user-defined UCX_TLS
if tls is not None:
os.environ["UCX_TLS"] = tls
def test_set_env():
os.environ["UCX_SEG_SIZE"] = "2M"
ctx = ucx_api.UCXContext()
config = ctx.get_config()
assert config["SEG_SIZE"] == os.environ["UCX_SEG_SIZE"]
def test_init_options():
os.environ["UCX_SEG_SIZE"] = "2M" # Should be ignored
options = {"SEG_SIZE": "3M"}
ctx = ucx_api.UCXContext(options)
config = ctx.get_config()
assert config["SEG_SIZE"] == options["SEG_SIZE"]
@pytest.mark.skipif(
ucx_api.get_ucx_version() >= (1, 12, 0),
reason="Beginning with UCX >= 1.12, it's only possible to validate "
"UCP options but not options from other modules such as UCT. "
"See https://github.com/openucx/ucx/issues/7519.",
)
def test_init_unknown_option():
options = {"UNKNOWN_OPTION": "3M"}
with pytest.raises(UCXInvalidParamError):
ucx_api.UCXContext(options)
def test_init_invalid_option():
options = {"SEG_SIZE": "invalid-size"}
with pytest.raises(UCXInvalidParamError):
ucx_api.UCXContext(options)
@pytest.mark.parametrize("feature_flag", [ucx_api.Feature.TAG, ucx_api.Feature.STREAM])
def test_feature_flags_mismatch(feature_flag):
ctx = ucx_api.UCXContext(feature_flags=(feature_flag,))
worker = ucx_api.UCXWorker(ctx)
addr = worker.get_address()
ep = ucx_api.UCXEndpoint.create_from_worker_address(
worker, addr, endpoint_error_handling=False
)
msg = Array(bytearray(10))
if feature_flag != ucx_api.Feature.TAG:
with pytest.raises(
ValueError, match="UCXContext must be created with `Feature.TAG`"
):
ep.tag_send(msg, 0)
with pytest.raises(
ValueError, match="UCXContext must be created with `Feature.TAG`"
):
ep.tag_recv(msg, 0)
if feature_flag != ucx_api.Feature.STREAM:
with pytest.raises(
ValueError, match="UCXContext must be created with `Feature.STREAM`"
):
ep.stream_send(msg)
with pytest.raises(
ValueError, match="UCXContext must be created with `Feature.STREAM`"
):
ep.stream_recv(msg)
@patch.dict(os.environ, {"UCX_TLS": "^cuda"})
def test_no_cuda_support():
ctx = ucx_api.UCXContext(feature_flags=(ucx_api.Feature.TAG,))
assert ctx.cuda_support is False
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib | rapidsai_public_repos/ucxx/python/ucxx/_lib/tests/test_endpoint.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import multiprocessing as mp
import os
import pytest
import ucxx._lib.libucxx as ucx_api
from ucxx._lib.arr import Array
from ucxx.testing import terminate_process, wait_requests
mp = mp.get_context("spawn")
WireupMessageSize = 10
def _close_callback(closed):
closed[0] = True
def _server(queue, server_close_callback):
"""Server that send received message back to the client
Notice, since it is illegal to call progress() in call-back functions,
we use a "chain" of call-back functions.
"""
ctx = ucx_api.UCXContext(feature_flags=(ucx_api.Feature.TAG,))
worker = ucx_api.UCXWorker(ctx)
listener_finished = [False]
closed = [False]
# A reference to listener's endpoint is stored to prevent it from going
# out of scope too early.
ep = [None]
def _listener_handler(conn_request):
ep[0] = listener.create_endpoint_from_conn_request(conn_request, True)
if server_close_callback is True:
ep[0].set_close_callback(_close_callback, cb_args=(closed,))
listener_finished[0] = True
listener = ucx_api.UCXListener.create(
worker=worker, port=0, cb_func=_listener_handler
)
queue.put(listener.port)
while ep[0] is None:
worker.progress()
wireup_msg = Array(bytearray(WireupMessageSize))
wireup_request = ep[0].tag_recv(wireup_msg, tag=0)
wait_requests(worker, "blocking", wireup_request)
if server_close_callback is True:
while closed[0] is False:
worker.progress()
assert closed[0] is True
else:
while listener_finished[0] is False:
worker.progress()
def _client(port, server_close_callback):
ctx = ucx_api.UCXContext(feature_flags=(ucx_api.Feature.TAG,))
worker = ucx_api.UCXWorker(ctx)
ep = ucx_api.UCXEndpoint.create(
worker,
"127.0.0.1",
port,
endpoint_error_handling=True,
)
worker.progress()
wireup_msg = Array(bytes(os.urandom(WireupMessageSize)))
wireup_request = ep.tag_send(wireup_msg, tag=0)
wait_requests(worker, "blocking", wireup_request)
if server_close_callback is False:
closed = [False]
ep.set_close_callback(_close_callback, cb_args=(closed,))
while closed[0] is False:
worker.progress()
@pytest.mark.parametrize("server_close_callback", [True, False])
def test_close_callback(server_close_callback):
queue = mp.Queue()
server = mp.Process(
target=_server,
args=(queue, server_close_callback),
)
server.start()
port = queue.get()
client = mp.Process(
target=_client,
args=(port, server_close_callback),
)
client.start()
client.join(timeout=10)
server.join(timeout=10)
terminate_process(client)
terminate_process(server)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib | rapidsai_public_repos/ucxx/python/ucxx/_lib/tests/test_listener.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import ucxx._lib.libucxx as ucx_api
def test_listener_ip_port():
ctx = ucx_api.UCXContext()
worker = ucx_api.UCXWorker(ctx)
def _listener_handler(conn_request):
pass
listener = ucx_api.UCXListener.create(
worker=worker, port=0, cb_func=_listener_handler
)
assert isinstance(listener.ip, str) and listener.ip
assert (
isinstance(listener.port, int) and listener.port >= 0 and listener.port <= 65535
)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib | rapidsai_public_repos/ucxx/python/ucxx/_lib/tests/test_server_client.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import multiprocessing as mp
import os
from queue import Empty as QueueIsEmpty
import pytest
import ucxx._lib.libucxx as ucx_api
from ucxx._lib.arr import Array
from ucxx.testing import terminate_process, wait_requests
mp = mp.get_context("spawn")
WireupMessageSize = 10
def _send(ep, api, message):
if api == "am":
return ep.am_send(message)
elif api == "stream":
return ep.stream_send(message)
else:
return ep.tag_send(message, tag=0)
def _recv(ep, api, message):
if api == "am":
return ep.am_recv()
elif api == "stream":
return ep.stream_recv(message)
else:
return ep.tag_recv(message, tag=0)
def _echo_server(get_queue, put_queue, transfer_api, msg_size, progress_mode):
"""Server that send received message back to the client
Notice, since it is illegal to call progress() in call-back functions,
we keep a reference to the listener's endpoint and execute tranfers
outside of the callback function.
"""
feature_flags = [ucx_api.Feature.WAKEUP]
if transfer_api == "am":
feature_flags.append(ucx_api.Feature.AM)
elif transfer_api == "stream":
feature_flags.append(ucx_api.Feature.STREAM)
else:
feature_flags.append(ucx_api.Feature.TAG)
ctx = ucx_api.UCXContext(feature_flags=tuple(feature_flags))
worker = ucx_api.UCXWorker(ctx)
if progress_mode == "blocking":
worker.init_blocking_progress_mode()
else:
worker.start_progress_thread()
# A reference to listener's endpoint is stored to prevent it from going
# out of scope too early and allow transfers outside of the listsner's
# callback even after it has terminated.
ep = [None]
def _listener_handler(conn_request):
ep[0] = listener.create_endpoint_from_conn_request(conn_request, True)
listener = ucx_api.UCXListener.create(
worker=worker, port=0, cb_func=_listener_handler
)
put_queue.put(listener.port)
while ep[0] is None:
if progress_mode == "blocking":
worker.progress()
wireup_msg = Array(bytearray(WireupMessageSize))
wireup_request = _recv(ep[0], transfer_api, wireup_msg)
wait_requests(worker, progress_mode, wireup_request)
msg = Array(bytearray(msg_size))
# We reuse the message buffer, so we must receive, wait, and then send
# it back again.
requests = [_recv(ep[0], transfer_api, msg)]
wait_requests(worker, progress_mode, requests)
if transfer_api == "am":
msg = Array(requests[0].get_recv_buffer())
requests = [_send(ep[0], transfer_api, msg)]
wait_requests(worker, progress_mode, requests)
while True:
try:
get_queue.get(block=True, timeout=0.1)
except QueueIsEmpty:
continue
else:
break
def _echo_client(transfer_api, msg_size, progress_mode, port):
feature_flags = [ucx_api.Feature.WAKEUP]
if transfer_api == "am":
feature_flags.append(ucx_api.Feature.AM)
if transfer_api == "stream":
feature_flags.append(ucx_api.Feature.STREAM)
else:
feature_flags.append(ucx_api.Feature.TAG)
ctx = ucx_api.UCXContext(feature_flags=tuple(feature_flags))
worker = ucx_api.UCXWorker(ctx)
if progress_mode == "blocking":
worker.init_blocking_progress_mode()
else:
worker.start_progress_thread()
ep = ucx_api.UCXEndpoint.create(
worker,
"127.0.0.1",
port,
endpoint_error_handling=True,
)
if progress_mode == "blocking":
worker.progress()
wireup_msg = Array(bytes(os.urandom(WireupMessageSize)))
wireup_request = _send(ep, transfer_api, wireup_msg)
wait_requests(worker, progress_mode, wireup_request)
send_msg = bytes(os.urandom(msg_size))
recv_msg = bytearray(msg_size)
requests = [
_send(ep, transfer_api, Array(send_msg)),
_recv(ep, transfer_api, Array(recv_msg)),
]
wait_requests(worker, progress_mode, requests)
if transfer_api == "am":
recv_msg = requests[1].get_recv_buffer()
assert bytes(recv_msg) == send_msg
else:
assert recv_msg == send_msg
@pytest.mark.parametrize("transfer_api", ["am", "stream", "tag"])
@pytest.mark.parametrize("msg_size", [10, 2**24])
@pytest.mark.parametrize("progress_mode", ["blocking", "thread"])
def test_server_client(transfer_api, msg_size, progress_mode):
put_queue, get_queue = mp.Queue(), mp.Queue()
server = mp.Process(
target=_echo_server,
args=(put_queue, get_queue, transfer_api, msg_size, progress_mode),
)
server.start()
port = get_queue.get()
client = mp.Process(
target=_echo_client, args=(transfer_api, msg_size, progress_mode, port)
)
client.start()
client.join(timeout=60)
terminate_process(client)
put_queue.put("Finished")
server.join(timeout=10)
terminate_process(server)
| 0 |
rapidsai_public_repos/ucxx/python/ucxx/_lib | rapidsai_public_repos/ucxx/python/ucxx/_lib/tests/test_utils.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import multiprocessing
import re
from multiprocessing.queues import Empty
import pytest
from ucxx.testing import terminate_process
def _test_process(queue):
while True:
try:
message = queue.get_nowait()
assert message == "terminate"
return
except Empty:
pass
@pytest.mark.parametrize("mp_context", ["default", "fork", "forkserver", "spawn"])
def test_terminate_process_clean(mp_context):
mp = (
multiprocessing
if mp_context == "default"
else multiprocessing.get_context(mp_context)
)
queue = mp.Queue()
proc = mp.Process(
target=_test_process,
args=(queue,),
)
proc.start()
queue.put("terminate")
proc.join()
terminate_process(proc)
@pytest.mark.parametrize("mp_context", ["default", "fork", "forkserver", "spawn"])
def test_terminate_process_join_timeout(mp_context):
mp = (
multiprocessing
if mp_context == "default"
else multiprocessing.get_context(mp_context)
)
queue = mp.Queue()
proc = mp.Process(
target=_test_process,
args=(queue,),
)
proc.start()
proc.join(timeout=1)
with pytest.raises(
RuntimeError, match=re.escape("Process did not exit cleanly (exit code: -9)")
):
terminate_process(proc)
@pytest.mark.parametrize("mp_context", ["default", "fork", "forkserver", "spawn"])
def test_terminate_process_kill_timeout(mp_context):
mp = (
multiprocessing
if mp_context == "default"
else multiprocessing.get_context(mp_context)
)
queue = mp.Queue()
proc = mp.Process(
target=_test_process,
args=(queue,),
)
proc.start()
proc.join(timeout=1)
with pytest.raises(
ValueError, match="Cannot close a process while it is still running.*"
):
terminate_process(proc, kill_wait=0.0)
| 0 |
rapidsai_public_repos/ucxx/python | rapidsai_public_repos/ucxx/python/examples/basic.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import asyncio
import numpy as np
import ucxx._lib.libucxx as ucx_api
from ucxx._lib.arr import Array
from ucxx._lib_async.utils import get_event_loop
def _create_cuda_context():
import numba.cuda
numba.cuda.current_context()
async def _progress_coroutine(worker):
while True:
try:
if worker is None:
return
worker.progress()
await asyncio.sleep(0)
except asyncio.CancelledError:
return
async def _wait_requests_async_future(loop, worker, requests):
progress_task = loop.create_task(_progress_coroutine(worker))
await asyncio.gather(*[r.get_future() for r in requests])
progress_task.cancel()
async def _wait_requests_async_yield(loop, worker, requests):
progress_task = loop.create_task(_progress_coroutine(worker))
await asyncio.gather(*[r.wait_yield() for r in requests])
progress_task.cancel()
def _wait_requests(worker, progress_mode, requests):
while not all([r.is_completed() for r in requests]):
if progress_mode == "blocking":
worker.progress_worker_event()
def parse_args():
parser = argparse.ArgumentParser(description="Basic UCXX-Py Example")
parser.add_argument(
"--asyncio-wait-future",
default=False,
action="store_true",
help="Wait for transfer requests with Python's asyncio using futures "
"(`UCXRequest.get_future()`), requires `--progress-mode blocking`. "
"(default: disabled)",
)
parser.add_argument(
"--asyncio-wait-yield",
default=False,
action="store_true",
help="Wait for transfer requests with Python's asyncio by checking "
"for request completion and yielding (`UCXRequest.wait_yield()`). "
"(default: disabled)",
)
parser.add_argument(
"-m",
"--progress-mode",
default="thread",
help="Progress mode for the UCP worker. Valid options are: "
"'thread' (default) and 'blocking'.",
type=str,
)
parser.add_argument(
"-o",
"--object-type",
default="numpy",
choices=["numpy", "rmm"],
help="In-memory array type.",
type=str,
)
parser.add_argument(
"--multi-buffer-transfer",
default=False,
action="store_true",
help="If specified, use the multi-buffer TAG transfer API.",
)
parser.add_argument(
"-p",
"--port",
default=12345,
help="The port the listener will bind to.",
type=int,
)
args = parser.parse_args()
valid_progress_modes = ["blocking", "thread"]
if not any(args.progress_mode == v for v in valid_progress_modes):
raise ValueError(
f"Unknown progress mode '{args.progress_mode}', "
f"valid modes are {valid_progress_modes}",
)
if args.asyncio_wait_future and args.progress_mode != "blocking":
raise RuntimeError(
"`--asyncio-wait-future` requires `--progress-mode='blocking'`"
)
return args
def main():
args = parse_args()
if args.object_type == "rmm":
import cupy as xp
import rmm
from rmm.allocators.cupy import rmm_cupy_allocator
rmm.reinitialize(
pool_allocator=True,
managed_memory=False,
)
xp.cuda.runtime.setDevice(0)
xp.cuda.set_allocator(rmm_cupy_allocator)
else:
import numpy as xp
ctx = ucx_api.UCXContext()
worker = ucx_api.UCXWorker(ctx)
if args.progress_mode == "blocking":
worker.init_blocking_progress_mode()
else:
if args.object_type == "rmm":
worker.set_progress_thread_start_callback(_create_cuda_context)
worker.start_progress_thread()
wireup_send_buf = np.arange(3)
wireup_recv_buf = np.empty_like(wireup_send_buf)
send_bufs = [
xp.arange(50, dtype="u1"),
xp.arange(500, dtype="u1"),
xp.arange(50000, dtype="u1"),
]
if args.multi_buffer_transfer is False:
recv_bufs = [np.empty_like(b) for b in send_bufs]
global listener_ep
listener_ep = None
def listener_callback(conn_request):
global listener_ep
listener_ep = listener.create_endpoint_from_conn_request(conn_request, True)
listener = ucx_api.UCXListener.create(
worker,
args.port,
listener_callback,
)
ep = ucx_api.UCXEndpoint.create(
worker,
"127.0.0.1",
args.port,
endpoint_error_handling=True,
)
while listener_ep is None:
if args.progress_mode == "blocking":
worker.progress_worker_event()
wireup_requests = [
ep.tag_send(Array(wireup_send_buf), tag=0),
listener_ep.tag_recv(Array(wireup_recv_buf), tag=0),
]
_wait_requests(worker, args.progress_mode, wireup_requests)
np.testing.assert_equal(wireup_recv_buf, wireup_send_buf)
if args.multi_buffer_transfer:
frames = tuple(
[
Array(send_bufs[0]),
Array(send_bufs[1]),
Array(send_bufs[2]),
]
)
# data_ptrs = tuple(f.ptr for f in frames)
# sizes = tuple(f.nbytes for f in frames)
# is_cuda = tuple(f.cuda for f in frames)
# send_buffer_requests = listener_ep.tag_send_multi(
# data_ptrs, sizes, is_cuda, tag=0
# )
send_buffer_requests = listener_ep.tag_send_multi(frames, tag=0)
recv_buffer_requests = ep.tag_recv_multi(0)
requests = [send_buffer_requests, recv_buffer_requests]
if args.asyncio_wait_future:
loop = get_event_loop()
loop.run_until_complete(_wait_requests_async_future(loop, worker, requests))
elif args.asyncio_wait_yield:
loop = get_event_loop()
loop.run_until_complete(_wait_requests_async_yield(loop, worker, requests))
else:
_wait_requests(worker, args.progress_mode, requests)
# Check results, raises an exception if any of them failed
for r in (
send_buffer_requests.get_requests()
+ recv_buffer_requests.get_requests()
):
r.check_error()
recv_bufs = recv_buffer_requests.get_py_buffers()
else:
requests = [
listener_ep.tag_send(Array(send_bufs[0]), tag=0),
listener_ep.tag_send(Array(send_bufs[1]), tag=1),
listener_ep.tag_send(Array(send_bufs[2]), tag=2),
ep.tag_recv(Array(recv_bufs[0]), tag=0),
ep.tag_recv(Array(recv_bufs[1]), tag=1),
ep.tag_recv(Array(recv_bufs[2]), tag=2),
]
if args.asyncio_wait_future:
loop = get_event_loop()
loop.run_until_complete(_wait_requests_async_future(loop, worker, requests))
elif args.asyncio_wait_yield:
loop = get_event_loop()
loop.run_until_complete(_wait_requests_async_yield(loop, worker, requests))
else:
_wait_requests(worker, args.progress_mode, requests)
# Check results, raises an exception if any of them failed
for r in requests:
r.check_error()
if args.progress_mode == "thread":
worker.stop_progress_thread()
for recv_buf, send_buf in zip(recv_bufs, send_bufs):
if args.object_type == "numpy":
xp.testing.assert_equal(recv_buf, send_buf)
else:
xp.testing.assert_array_equal(xp.asarray(recv_buf), send_buf)
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/ucxx/conda | rapidsai_public_repos/ucxx/conda/environments/all_cuda-120_arch-x86_64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- autoconf
- automake
- c-compiler
- cloudpickle
- cmake>=3.26.4
- cuda-cudart-dev
- cuda-version=12.0
- cudf==24.2.*
- cupy
- cxx-compiler
- cython>=3.0.0
- dask
- dask-cuda==24.2.*
- dask-cudf==24.2.*
- distributed
- fmt>=9.1.0,<10
- gmock>=1.13.0
- gtest>=1.13.0
- librmm==24.2.*
- libtool
- ninja
- numba>=0.57.1
- numpy>=1.21
- pip
- pkg-config
- pre-commit
- pynvml>=11.4.1
- pytest
- pytest-asyncio
- pytest-rerunfailures
- python>=3.9,<3.11
- rmm==24.2.*
- scikit-build>=0.13.1
- setuptools
- spdlog>=1.11.0,<1.12
- tomli
- ucx
- wheel
name: all_cuda-120_arch-x86_64
| 0 |
rapidsai_public_repos/ucxx/conda | rapidsai_public_repos/ucxx/conda/environments/all_cuda-118_arch-x86_64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- autoconf
- automake
- c-compiler
- cloudpickle
- cmake>=3.26.4
- cuda-version=11.8
- cudatoolkit
- cudf==24.2.*
- cupy
- cxx-compiler
- cython>=3.0.0
- dask
- dask-cuda==24.2.*
- dask-cudf==24.2.*
- distributed
- fmt>=9.1.0,<10
- gmock>=1.13.0
- gtest>=1.13.0
- librmm==24.2.*
- libtool
- ninja
- numba>=0.57.1
- numpy>=1.21
- pip
- pkg-config
- pre-commit
- pynvml>=11.4.1
- pytest
- pytest-asyncio
- pytest-rerunfailures
- python>=3.9,<3.11
- rmm==24.2.*
- scikit-build>=0.13.1
- setuptools
- spdlog>=1.11.0,<1.12
- tomli
- ucx
- wheel
name: all_cuda-118_arch-x86_64
| 0 |
rapidsai_public_repos/ucxx/conda/recipes | rapidsai_public_repos/ucxx/conda/recipes/ucxx/install_libucxx_examples.sh | #!/bin/bash
# SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
cmake --install cpp/build --component examples
| 0 |
rapidsai_public_repos/ucxx/conda/recipes | rapidsai_public_repos/ucxx/conda/recipes/ucxx/install_libucxx_tests.sh | #!/bin/bash
# SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
cmake --install cpp/build --component testing
| 0 |
rapidsai_public_repos/ucxx/conda/recipes | rapidsai_public_repos/ucxx/conda/recipes/ucxx/install_libucxx.sh | #!/bin/bash
# SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
cmake --install cpp/build
cmake --install cpp/build --component benchmarks
# For some reason RAPIDS headers are getting installed causing clobbering, which shouldn't happen.
# To workaround this issue for now, just remove all the RAPIDS headers that were installed to avoid clobbering.
# xref: https://github.com/rapidsai/ucxx/issues/20
rm -rf "${PREFIX}/include/rapids"
| 0 |
rapidsai_public_repos/ucxx/conda/recipes | rapidsai_public_repos/ucxx/conda/recipes/ucxx/conda_build_config.yaml | c_compiler_version:
- 11
cxx_compiler_version:
- 11
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
sysroot_linux_64:
- "2.17"
cmake:
- ">=3.26.4"
python:
- 3.9
- 3.10
ucx:
- "==1.14.*"
gmock:
- ">=1.13.0"
gtest:
- ">=1.13.0"
| 0 |
rapidsai_public_repos/ucxx/conda/recipes | rapidsai_public_repos/ucxx/conda/recipes/ucxx/build.sh | #!/bin/bash
# SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
export ucxx_ROOT="$(realpath ./cpp/build)"
./build.sh -n -v libucxx libucxx_python benchmarks examples tests --cmake-args=\"-DCMAKE_INSTALL_LIBDIR=lib\"
| 0 |
rapidsai_public_repos/ucxx/conda/recipes | rapidsai_public_repos/ucxx/conda/recipes/ucxx/meta.yaml | # SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
{% set version = environ.get('GIT_DESCRIBE_TAG', '0.0.0.dev').lstrip('v') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: libucxx-split
source:
path: ../../..
build:
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- PARALLEL_LEVEL
- RAPIDS_ARTIFACTS_DIR
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=libucxx-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=libucxx-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
- SCCACHE_S3_NO_CREDENTIALS
requirements:
build:
- cmake
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- ninja
- sysroot_{{ target_platform }}
host:
{% if cuda_major != "11" %}
- cuda-cudart-dev
{% endif %}
- cuda-version ={{ cuda_version }}
- ucx
- python
- librmm =24.02
- gtest
outputs:
- name: libucxx
version: {{ version }}
script: install_libucxx.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
run_exports:
- {{ pin_subpackage("libucxx", max_pin="x.x") }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
- ucx
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- sysroot_{{ target_platform }}
- cmake
- ninja
host:
- ucx
- cuda-version ={{ cuda_version }}
run:
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
- ucx >=1.14.1,<1.16.0
test:
commands:
- test -f $PREFIX/lib/libucxx.so
- test -f $PREFIX/include/ucxx/utils/python.h
- test -f $PREFIX/include/ucxx/utils/file_descriptor.h
- test -f $PREFIX/include/ucxx/utils/sockaddr.h
- test -f $PREFIX/include/ucxx/utils/ucx.h
- test -f $PREFIX/include/ucxx/notifier.h
- test -f $PREFIX/include/ucxx/address.h
- test -f $PREFIX/include/ucxx/component.h
- test -f $PREFIX/include/ucxx/delayed_submission.h
- test -f $PREFIX/include/ucxx/inflight_requests.h
- test -f $PREFIX/include/ucxx/listener.h
- test -f $PREFIX/include/ucxx/api.h
- test -f $PREFIX/include/ucxx/request_helper.h
- test -f $PREFIX/include/ucxx/request_stream.h
- test -f $PREFIX/include/ucxx/request_tag.h
- test -f $PREFIX/include/ucxx/typedefs.h
- test -f $PREFIX/include/ucxx/context.h
- test -f $PREFIX/include/ucxx/endpoint.h
- test -f $PREFIX/include/ucxx/request.h
- test -f $PREFIX/include/ucxx/worker_progress_thread.h
- test -f $PREFIX/include/ucxx/constructors.h
- test -f $PREFIX/include/ucxx/request_tag_multi.h
- test -f $PREFIX/include/ucxx/buffer.h
- test -f $PREFIX/include/ucxx/config.h
- test -f $PREFIX/include/ucxx/future.h
- test -f $PREFIX/include/ucxx/header.h
- test -f $PREFIX/include/ucxx/worker.h
- test -f $PREFIX/include/ucxx/exception.h
- test -f $PREFIX/include/ucxx/log.h
- test ! -d ${PREFIX}/include/rapids
about:
home: https://rapids.ai/
license: BSD-3-Clause
license_family: BSD
license_file: ../../../LICENSE
summary: libucxx library
- name: libucxx-examples
version: {{ version }}
script: install_libucxx_examples.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: {{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
requirements:
build:
- cmake
run:
- {{ pin_subpackage('libucxx', exact=True) }}
about:
home: https://rapids.ai/
license: BSD-3-Clause
license_family: BSD
license_file: ../../../LICENSE
summary: libucxx examples executables
- name: libucxx-tests
version: {{ version }}
script: install_libucxx_tests.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- sysroot_{{ target_platform }}
- cmake
- ninja
host:
{% if cuda_major != "11" %}
- cuda-cudart-dev
{% endif %}
- cuda-version ={{ cuda_version }}
run:
- {{ pin_subpackage('libucxx', exact=True) }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
- gtest
- gmock
about:
home: https://rapids.ai/
license: BSD-3-Clause
license_family: BSD
license_file: ../../../LICENSE
summary: libucxx tests executables
- name: ucxx
version: {{ version }}
script: build_and_install_ucxx.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_py{{ python }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
- ucx
requirements:
build:
- cmake
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major != "11" %}
- {{ compiler('cuda') }}
{% endif %}
- ninja
- sysroot_{{ target_platform }}
host:
- python
- pip
- scikit-build>=0.13.1
- setuptools
- cython >=3.0.0
- numpy 1.21
- {{ pin_subpackage('libucxx', exact=True) }}
- ucx
- rmm =24.02
{% if cuda_major != "11" %}
- cuda-cudart-dev
{% endif %}
- cuda-version {{ cuda_version }}
run:
- python * *_cpython
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
- ucx >=1.14.1,<1.16.0
- {{ pin_subpackage('libucxx', exact=True) }}
- {{ pin_compatible('rmm', max_pin='x.x') }}
- {{ pin_compatible('numpy') }}
- pynvml >=11.4.1
run_constrained:
- cupy >=9.5.0
- numba >=0.57.0
test:
commands:
- test -f $PREFIX/lib/libucxx_python.so
- test -f $PREFIX/include/ucxx/python/exception.h
- test -f $PREFIX/include/ucxx/python/future.h
- test -f $PREFIX/include/ucxx/python/api.h
- test -f $PREFIX/include/ucxx/python/constructors.h
- test -f $PREFIX/include/ucxx/python/notifier.h
- test -f $PREFIX/include/ucxx/python/python_future.h
- test -f $PREFIX/include/ucxx/python/worker.h
imports:
- ucxx
about:
home: https://rapids.ai/
license: BSD-3-Clause
license_family: BSD
license_file: ../../../LICENSE
summary: UCX Python interface built on top of the libucxx C++ implementation
- name: distributed-ucxx
version: {{ version }}
script: build_and_install_distributed_ucxx.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: py{{ python }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
requirements:
host:
- python
- pip
- tomli
run:
- python * *_cpython
- dask >=2023.9.2
- distributed >=2023.9.2
- {{ pin_subpackage('ucxx', max_pin='x.x') }}
test:
imports:
- distributed_ucxx
about:
home: https://rapids.ai/
license: BSD-3-Clause
license_family: BSD
license_file: ../../../LICENSE
summary: UCX communication module for Dask Distributed
| 0 |
rapidsai_public_repos/ucxx/conda/recipes | rapidsai_public_repos/ucxx/conda/recipes/ucxx/build_and_install_ucxx.sh | #!/bin/bash
# SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
cmake --install cpp/build --component python
./build.sh ucxx
| 0 |
rapidsai_public_repos/ucxx/conda/recipes | rapidsai_public_repos/ucxx/conda/recipes/ucxx/build_and_install_distributed_ucxx.sh | #!/bin/bash
# SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
./build.sh distributed_ucxx
| 0 |
rapidsai_public_repos/ucxx | rapidsai_public_repos/ucxx/cpp/CPPLINT.cfg | filter=+build/include_what_you_use,-build/c++11,-build/header_guard,-build/include_order,-readability/todo,-whitespace
linelength=100
| 0 |
rapidsai_public_repos/ucxx | rapidsai_public_repos/ucxx/cpp/CMakeLists.txt | # =================================================================================
# SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD 3-Clause License
# =================================================================================
cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR)
include(../fetch_rapids.cmake)
include(rapids-cmake)
include(rapids-cpm)
include(rapids-export)
include(rapids-find)
set(libucxx_version 0.36.00)
project(
UCXX
VERSION ${libucxx_version}
LANGUAGES C CXX
)
# Needed because GoogleBenchmark changes the state of FindThreads.cmake, causing subsequent runs to
# have different values for the `Threads::Threads` target. Setting this flag ensures
# `Threads::Threads` is the same value in first run and subsequent runs.
set(THREADS_PREFER_PTHREAD_FLAG ON)
# ##################################################################################################
# * build options ---------------------------------------------------------------------------------
option(BUILD_TESTS "Configure CMake to build tests" ON)
option(BUILD_BENCHMARKS "Configure CMake to build benchmarks" OFF)
option(BUILD_EXAMPLES "Configure CMake to build examples" OFF)
option(BUILD_SHARED_LIBS "Build UCXX shared libraries" ON)
option(UCXX_ENABLE_PYTHON "Enable support for Python notifier thread" OFF)
option(UCXX_ENABLE_RMM "Enable support for CUDA multi-buffer transfer with RMM" OFF)
option(DISABLE_DEPRECATION_WARNINGS "Disable warnings generated from deprecated declarations." OFF)
message(VERBOSE "UCXX: Configure CMake to build tests: ${BUILD_TESTS}")
message(VERBOSE "UCXX: Configure CMake to build benchmarks: ${BUILD_BENCHMARKS}")
message(VERBOSE "UCXX: Configure CMake to build examples: ${BUILD_EXAMPLES}")
message(VERBOSE "UCXX: Build UCXX shared libraries: ${BUILD_SHARED_LIBS}")
message(VERBOSE "UCXX: Enable support for Python notifier thread: ${UCXX_ENABLE_PYTHON}")
message(VERBOSE "UCXX: Enable support for CUDA multi-buffer transfer with RMM: ${UCXX_ENABLE_RMM}")
message(
VERBOSE
"UCXX: Disable warnings generated from deprecated declarations: ${DISABLE_DEPRECATION_WARNINGS}"
)
# Set a default build type if none was specified
rapids_cmake_build_type("Release")
set(UCXX_BUILD_TESTS ${BUILD_TESTS})
set(UCXX_BUILD_BENCHMARKS ${BUILD_BENCHMARKS})
set(UCXX_BUILD_EXAMPLES ${BUILD_EXAMPLES})
set(UCXX_CXX_FLAGS "")
set(UCXX_CXX_DEFINITIONS "")
# Set RMM logging level
set(RMM_LOGGING_LEVEL
"INFO"
CACHE STRING "Choose the logging level."
)
set_property(
CACHE RMM_LOGGING_LEVEL PROPERTY STRINGS "TRACE" "DEBUG" "INFO" "WARN" "ERROR" "CRITICAL" "OFF"
)
message(VERBOSE "UCXX: RMM_LOGGING_LEVEL = '${RMM_LOGGING_LEVEL}'.")
if(NOT UCXX_GENERATED_INCLUDE_DIR)
set(UCXX_GENERATED_INCLUDE_DIR ${UCXX_BINARY_DIR})
endif()
# ##################################################################################################
# * conda environment -----------------------------------------------------------------------------
rapids_cmake_support_conda_env(conda_env MODIFY_PREFIX_PATH)
# ##################################################################################################
# * compiler options ------------------------------------------------------------------------------
rapids_find_package(
ucx REQUIRED
BUILD_EXPORT_SET ucxx-exports
INSTALL_EXPORT_SET ucxx-exports
)
# ##################################################################################################
# * dependencies ----------------------------------------------------------------------------------
# find Threads (needed by ucxxtestutil)
rapids_find_package(
Threads REQUIRED
BUILD_EXPORT_SET ucxx-exports
INSTALL_EXPORT_SET ucxx-exports
)
# add third party dependencies using CPM
rapids_cpm_init()
# find rmm
include(cmake/thirdparty/get_rmm.cmake)
# find or install GoogleTest
include(cmake/thirdparty/get_gtest.cmake)
# ##################################################################################################
# * library targets -------------------------------------------------------------------------------
# Build main library
add_library(
ucxx
src/address.cpp
src/buffer.cpp
src/component.cpp
src/config.cpp
src/context.cpp
src/delayed_submission.cpp
src/endpoint.cpp
src/header.cpp
src/inflight_requests.cpp
src/internal/request_am.cpp
src/listener.cpp
src/log.cpp
src/request.cpp
src/request_am.cpp
src/request_helper.cpp
src/request_stream.cpp
src/request_tag.cpp
src/request_tag_multi.cpp
src/worker.cpp
src/worker_progress_thread.cpp
src/utils/callback_notifier.cpp
src/utils/file_descriptor.cpp
src/utils/python.cpp
src/utils/sockaddr.cpp
src/utils/ucx.cpp
)
set_target_properties(
ucxx
PROPERTIES BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
# set target compile options
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
target_compile_options(
ucxx PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:${UCXX_CXX_FLAGS}>"
)
# Specify include paths for the current target and dependents
target_include_directories(
ucxx
PUBLIC "$<BUILD_INTERFACE:${UCXX_SOURCE_DIR}/include>"
"$<BUILD_INTERFACE:${UCXX_GENERATED_INCLUDE_DIR}/include>"
PRIVATE "$<BUILD_INTERFACE:${UCXX_SOURCE_DIR}/src>"
INTERFACE "$<INSTALL_INTERFACE:include>"
)
target_compile_definitions(
ucxx PUBLIC "$<$<COMPILE_LANGUAGE:CXX>:${UCXX_CXX_DEFINITIONS}>"
)
# Enable RMM if necessary
if(UCXX_ENABLE_RMM)
target_compile_definitions(ucxx PUBLIC UCXX_ENABLE_RMM)
endif()
# Define spdlog level
target_compile_definitions(ucxx PUBLIC "SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_${RMM_LOGGING_LEVEL}")
# Specify the target module library dependencies
target_link_libraries(
ucxx
PUBLIC rmm::rmm ucx::ucp
)
# Add Conda library, and include paths if specified
if(TARGET conda_env)
target_link_libraries(ucxx PRIVATE conda_env)
endif()
add_library(ucxx::ucxx ALIAS ucxx)
# Build Python if requested
if(UCXX_ENABLE_PYTHON)
add_subdirectory(python)
else()
set(UCXX_PYTHON_TARGET_TARGET "")
endif()
# ##################################################################################################
# * tests and benchmarks --------------------------------------------------------------------------
# ##################################################################################################
# ##################################################################################################
# * add tests -------------------------------------------------------------------------------------
if(UCXX_BUILD_TESTS)
# include CTest module -- automatically calls enable_testing()
include(CTest)
# Always print verbose output when tests fail if run using `make test`.
list(APPEND CMAKE_CTEST_ARGUMENTS "--output-on-failure")
add_subdirectory(tests)
endif()
# ##################################################################################################
# * add benchmarks --------------------------------------------------------------------------------
if(UCXX_BUILD_BENCHMARKS)
add_subdirectory(benchmarks)
endif()
# ##################################################################################################
# * add examples ----------------------------------------------------------------------------------
if(UCXX_BUILD_EXAMPLES)
add_subdirectory(examples)
endif()
# ##################################################################################################
# * install targets -------------------------------------------------------------------------------
rapids_cmake_install_lib_dir(lib_dir)
include(CPack)
include(GNUInstallDirs)
set(CMAKE_INSTALL_DEFAULT_COMPONENT_NAME ucxx)
set(_components_export_string)
install(
TARGETS ucxx
DESTINATION ${lib_dir}
EXPORT ucxx-exports
)
if(TARGET ucxx_python)
install(
TARGETS ucxx_python
COMPONENT python
DESTINATION ${lib_dir}
EXCLUDE_FROM_ALL
EXPORT ucxx-python-exports
)
set(_components_export_string COMPONENTS python COMPONENTS_EXPORT_SET ucxx-python-exports)
endif()
install(DIRECTORY ${UCXX_SOURCE_DIR}/include/ucxx
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
)
set(doc_string
[=[
Provide targets for the ucxx library.
UCXX is a C++ interface for the UCX communication framework. It aims to provide
a high-level API for the UCP layer, encompassing both transparent lifetime
management of objects and thread-safety.
Imported Targets
^^^^^^^^^^^^^^^^
If ucxx is found, this module defines the following IMPORTED GLOBAL
targets:
ucxx::ucxx - The main ucxx library.
]=]
)
rapids_export(
INSTALL ucxx
EXPORT_SET ucxx-exports ${_components_export_string}
GLOBAL_TARGETS ucxx python
NAMESPACE ucxx::
DOCUMENTATION doc_string
)
# ##################################################################################################
# * build export -------------------------------------------------------------------------------
rapids_export(
BUILD ucxx
EXPORT_SET ucxx-exports ${_components_export_string}
GLOBAL_TARGETS ucxx python
NAMESPACE ucxx::
DOCUMENTATION doc_string
)
# ##################################################################################################
# * make documentation ----------------------------------------------------------------------------
# doc targets for UCXX
add_custom_command(
OUTPUT UCXX_DOXYGEN
WORKING_DIRECTORY ${UCXX_SOURCE_DIR}/doxygen
COMMAND doxygen Doxyfile
VERBATIM
COMMENT "Custom command for building ucxx doxygen docs."
)
add_custom_target(
docs_ucxx
DEPENDS UCXX_DOXYGEN
COMMENT "Custom command for building ucxx doxygen docs."
)
# ##################################################################################################
# * make gdb helper scripts ------------------------------------------------------------------------
# build pretty-printer load script
if(rmm_SOURCE_DIR)
configure_file(scripts/load-pretty-printers.in load-pretty-printers @ONLY)
endif()
| 0 |
rapidsai_public_repos/ucxx | rapidsai_public_repos/ucxx/cpp/.clang-format | ---
# Refer to the following link for the explanation of each params:
# http://releases.llvm.org/8.0.0/tools/clang/docs/ClangFormatStyleOptions.html
Language: Cpp
# BasedOnStyle: Google
AccessModifierOffset: -1
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: true
AlignConsecutiveBitFields: true
AlignConsecutiveDeclarations: false
AlignConsecutiveMacros: true
AlignEscapedNewlines: Left
AlignOperands: true
AlignTrailingComments: true
AllowAllArgumentsOnNextLine: true
AllowAllConstructorInitializersOnNextLine: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
AllowShortEnumsOnASingleLine: true
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: true
AllowShortLambdasOnASingleLine: true
AllowShortLoopsOnASingleLine: false
# This is deprecated
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: false
BinPackParameters: false
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
# disabling the below splits, else, they'll just add to the vertical length of source files!
SplitEmptyFunction: false
SplitEmptyRecord: false
SplitEmptyNamespace: false
BreakAfterJavaFieldAnnotations: false
BreakBeforeBinaryOperators: None
BreakBeforeBraces: WebKit
BreakBeforeInheritanceComma: false
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeColon
BreakInheritanceList: BeforeColon
BreakStringLiterals: true
ColumnLimit: 100
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: true
# Kept the below 2 to be the same as `IndentWidth` to keep everything uniform
ConstructorInitializerIndentWidth: 2
ContinuationIndentWidth: 2
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeBlocks: Preserve
IncludeIsMainRegex: '([-_](test|unittest))?$'
IndentCaseLabels: true
IndentPPDirectives: None
IndentWidth: 2
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Never
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Left
RawStringFormats:
- Language: Cpp
Delimiters:
- cc
- CC
- cpp
- Cpp
- CPP
- 'c++'
- 'C++'
CanonicalDelimiter: ''
- Language: TextProto
Delimiters:
- pb
- PB
- proto
- PROTO
EnclosingFunctions:
- EqualsProto
- EquivToProto
- PARSE_PARTIAL_TEXT_PROTO
- PARSE_TEST_PROTO
- PARSE_TEXT_PROTO
- ParseTextOrDie
- ParseTextProtoOrDie
CanonicalDelimiter: ''
BasedOnStyle: google
# Enabling comment reflow causes doxygen comments to be messed up in their formats!
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceBeforeSquareBrackets: false
SpaceInEmptyBlock: false
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 2
SpacesInAngles: false
SpacesInConditionalStatement: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: c++17
StatementMacros:
- Q_UNUSED
- QT_REQUIRE_VERSION
# Be consistent with indent-width, even for people who use tab for indentation!
TabWidth: 2
UseTab: Never
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/request_tag_multi.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <memory>
#include <mutex>
#include <string>
#include <vector>
#include <ucp/api/ucp.h>
#include <ucxx/buffer.h>
#include <ucxx/endpoint.h>
#include <ucxx/future.h>
#include <ucxx/request.h>
namespace ucxx {
class RequestTagMulti;
struct BufferRequest {
std::shared_ptr<Request> request{nullptr}; ///< The `ucxx::RequestTag` of a header or frame
std::shared_ptr<std::string> stringBuffer{nullptr}; ///< Serialized `Header`
std::shared_ptr<Buffer> buffer{nullptr}; ///< Internally allocated buffer to receive a frame
BufferRequest();
~BufferRequest();
BufferRequest(const BufferRequest&) = delete;
BufferRequest& operator=(BufferRequest const&) = delete;
BufferRequest(BufferRequest&& o) = delete;
BufferRequest& operator=(BufferRequest&& o) = delete;
};
typedef std::shared_ptr<BufferRequest> BufferRequestPtr;
class RequestTagMulti : public Request {
private:
bool _send{false}; ///< Whether this is a send (`true`) operation or recv (`false`)
ucp_tag_t _tag{0}; ///< Tag to match
size_t _totalFrames{0}; ///< The total number of frames handled by this request
std::mutex
_completedRequestsMutex{}; ///< Mutex to control access to completed requests container
size_t _completedRequests{0}; ///< Count requests that already completed
public:
std::vector<BufferRequestPtr> _bufferRequests{}; ///< Container of all requests posted
bool _isFilled{false}; ///< Whether the all requests have been posted
private:
RequestTagMulti() = delete;
RequestTagMulti(const RequestTagMulti&) = delete;
RequestTagMulti& operator=(RequestTagMulti const&) = delete;
RequestTagMulti(RequestTagMulti&& o) = delete;
RequestTagMulti& operator=(RequestTagMulti&& o) = delete;
/**
* @brief Protected constructor of a multi-buffer tag receive request.
*
* Construct multi-buffer tag receive request, registering the request to the
* `std::shared_ptr<Endpoint>` parent so that it may be canceled if necessary. This
* constructor is responsible for creating a Python future that can be later awaited
* in Python asynchronous code, which is indenpendent of the Python futures used by
* the underlying `ucxx::RequestTag` object, which will be invisible to the user. Once
* the initial setup is complete, `callback()` is called to initiate receiving by posting
* the first request to receive a header.
*
* @param[in] endpoint the `std::shared_ptr<Endpoint>` parent component
* @param[in] send whether this is a send (`true`) or receive (`false`)
* tag request.
* @param[in] tag the tag to match.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
*/
RequestTagMulti(std::shared_ptr<Endpoint> endpoint,
const bool send,
const ucp_tag_t tag,
const bool enablePythonFuture);
/**
* @brief Receive all frames.
*
* Once the header(s) has(have) been received, receiving frames containing the actual data
* is the next step. This method parses the header(s) and creates as many
* `ucxx::RequestTag` objects as necessary, each one that will handle a single sending or
* receiving a single frame.
*
* Finally, the object is marked as filled, meaning that all requests were already
* scheduled and are waiting for completion.
*
* @throws std::runtime_error if called by a send request.
*/
void recvFrames();
/**
* @brief Receive a message with header.
*
* Create the request to receive a message with header, setting
* `ucxx::RequestTagMulti::callback` as the user-defined callback of `ucxx::RequestTag` to
* handle the next request.
*
* @throws std::runtime_error if called by a send request.
*/
void recvHeader();
/**
* @brief Send all header(s) and frame(s).
*
* Build header request(s) and send them, followed by requests to send all frame(s).
*
* @throws std::length_error if the lengths of `buffer`, `size` and `isCUDA` do not
* match.
*/
void send(const std::vector<void*>& buffer,
const std::vector<size_t>& size,
const std::vector<int>& isCUDA);
public:
/**
* @brief Enqueue a multi-buffer tag send operation.
*
* Initiate a multi-buffer tag send operation, returning a
* `std::shared<ucxx::RequestTagMulti>` that can be later awaited and checked for errors.
* This is a non-blocking operation, and the status of the transfer must be verified from
* the resulting request object before the data can be released.
*
* The primary use of multi-buffer transfers is in Python where we want to reduce the
* amount of futures needed to watch for, thus reducing Python overhead. However, this
* may be used as a convenience implementation for transfers that require multiple
* frames, internally this is implemented as one or more `ucxx::RequestTag` calls sending
* headers (depending on the number of frames being transferred), followed by one
* `ucxx::RequestTag` for each data frame.
*
* Using a Python future may be requested by specifying `enablePythonFuture`. If a
* Python future is requested, the Python application must then await on this future to
* ensure the transfer has completed. Requires UCXX to be compiled with
* `UCXX_ENABLE_PYTHON=1`.
*
* @throws std::runtime_error if sizes of `buffer`, `size` and `isCUDA` do not match.
*
* @param[in] endpoint the `std::shared_ptr<Endpoint>` parent component
* @param[in] tag the tag to match.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
* @param[in] callbackFunction user-defined callback function to call upon completion.
* @param[in] callbackData user-defined data to pass to the `callbackFunction`.
*
* @returns Request to be subsequently checked for the completion and its state.
*/
friend std::shared_ptr<RequestTagMulti> createRequestTagMultiSend(
std::shared_ptr<Endpoint> endpoint,
const std::vector<void*>& buffer,
const std::vector<size_t>& size,
const std::vector<int>& isCUDA,
const ucp_tag_t tag,
const bool enablePythonFuture);
/**
* @brief Enqueue a multi-buffer tag receive operation.
*
* Enqueue a multi-buffer tag receive operation, returning a
* `std::shared<ucxx::RequestTagMulti>` that can be later awaited and checked for errors.
* This is a non-blocking operation, and because the receiver has no a priori knowledge
* of the data being received, memory allocations are automatically handled internally.
* The receiver must have the same capabilities of the sender, so that if the sender is
* compiled with RMM support to allow for CUDA transfers, the receiver must have the
* ability to understand and allocate CUDA memory.
*
* Using a Python future may be requested by specifying `enablePythonFuture`. If a
* Python future is requested, the Python application must then await on this future to
* ensure the transfer has completed. Requires UCXX to be compiled with
* `UCXX_ENABLE_PYTHON=1`.
*
* @param[in] endpoint the `std::shared_ptr<Endpoint>` parent component
* @param[in] tag the tag to match.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
*
* @returns Request to be subsequently checked for the completion and its state.
*/
friend std::shared_ptr<RequestTagMulti> createRequestTagMultiRecv(
std::shared_ptr<Endpoint> endpoint, const ucp_tag_t tag, const bool enablePythonFuture);
/**
* @brief `ucxx::RequestTagMulti` destructor.
*
* Free internal resources.
*/
virtual ~RequestTagMulti();
/**
* @brief Mark request as completed.
*
* Mark a single `ucxx::RequestTag` as completed. This method is passed as the
* user-defined callback to the `ucxx::RequestTag` constructor, which will then be
* executed when that completes.
*
* When this method is called, the request that completed will be pushed into a container
* which will be later used to evaluate if all frames completed and set the final status
* of the multi-transfer request and the Python future, if enabled. The final status is
* either `UCS_OK` if all underlying requests completed successfully, otherwise it will
* contain the status of the first failing request, for granular information the user
* may still verify each of the underlying requests individually.
*
* @param[in] status the status of the request being completed.
* @param[in] request the `ucxx::BufferRequest` object containing a single tag .
*/
void markCompleted(ucs_status_t status, RequestCallbackUserData request);
/**
* @brief Callback to submit request to receive new header or frames.
*
* When a receive multi-transfer tag request is created or has received a new header, this
* callback must be executed to ensure the next request to receive is submitted.
*
* If no requests for the present `ucxx::RequestTagMulti` transfer have been posted yet,
* create one receiving a message with header. If the previous received request is header
* containing the `next` flag set, then the next request is another header. Otherwise, the
* next incoming message(s) is(are) frame(s).
*
* When called, the callback receives a single argument, the status of the current request.
*
* @param[in] status the status of the request being completed.
* @throws std::runtime_error if called by a send request.
*/
void recvCallback(ucs_status_t status);
void populateDelayedSubmission() override;
void cancel() override;
};
typedef std::shared_ptr<RequestTagMulti> RequestTagMultiPtr;
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/config.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <ucp/api/ucp.h>
#include <ucxx/typedefs.h>
namespace ucxx {
class Config {
private:
ucp_config_t* _handle{nullptr}; ///< Handle to the UCP config
ConfigMap _configMap; ///< Map containing all visible UCP configurations
/**
* @brief Read UCX configuration and apply user options.
*
* Read UCX configuration defaults and environment variable modifiers and apply user
* configurations overriding previously set configurations.
*
* @param[in] userOptions user-defined options overriding defaults and environment
* variable modifiers.
*
* @returns The handle to the UCP configurations defined for the process.
*/
ucp_config_t* readUCXConfig(ConfigMap userOptions);
/**
* @brief Parse UCP configurations and convert them to a map.
*
* Parse UCP configurations obtained from `ucp_config_print()` and convert them to a map
* for easy access.
*
* @returns The map to the UCP configurations defined for the process.
*/
ConfigMap ucxConfigToMap();
public:
Config() = delete;
Config(const Config&) = delete;
Config& operator=(Config const&) = delete;
Config(Config&& o) = delete;
Config& operator=(Config&& o) = delete;
/**
* @brief Constructor that reads the UCX configuration and apply user options.
*
* Read UCX configuration defaults and environment variable modifiers and apply user
* configurations overriding previously set configurations.
*
* @param[in] userOptions user-defined options overriding defaults and environment
* variable modifiers.
*/
explicit Config(ConfigMap userOptions);
~Config();
/**
* @brief Get the configuration map.
*
* Get the configuration map with all visible UCP configurations that are in effect for
* the current process.
*
* @returns The map to the UCP configurations defined for the process.
*/
ConfigMap get();
/**
* @brief Get the underlying `ucp_config_t*` handle
*
* Lifetime of the `ucp_config_t*` handle is managed by the `ucxx::Config` object and
* its ownership is non-transferrable. Once the `ucxx::Config` is destroyed the handle
* is not valid anymore, it is the user's responsibility to ensure the owner's lifetime
* while using the handle.
*
* @code{.cpp}
* // config is `ucxx::Config`
* ucp_config_t* configHandle = config.getHandle();
* @endcode
*
* @return The underlying `ucp_config_t*` handle.
*/
ucp_config_t* getHandle();
};
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/inflight_requests.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <map>
#include <memory>
#include <mutex>
namespace ucxx {
class Request;
typedef std::map<const Request* const, std::shared_ptr<Request>> InflightRequestsMap;
typedef std::unique_ptr<InflightRequestsMap> InflightRequestsMapPtr;
class InflightRequests {
private:
InflightRequestsMapPtr _inflightRequests{
std::make_unique<InflightRequestsMap>()}; ///< Container storing pointers to all inflight
///< requests known to the owner of this object
std::mutex _mutex{}; ///< Mutex to control access to inflight requests container
std::mutex
_cancelMutex{}; ///< Mutex to allow cancelation and prevent removing requests simultaneously
public:
/**
* @brief Default constructor.
*/
InflightRequests() = default;
InflightRequests(const InflightRequests&) = delete;
InflightRequests& operator=(InflightRequests const&) = delete;
InflightRequests(InflightRequests&& o) = delete;
InflightRequests& operator=(InflightRequests&& o) = delete;
/**
* @brief Destructor.
*
* Cancels all inflight requests before destruction.
*/
~InflightRequests();
/**
* @brief Query the number of pending inflight requests.
*
* @returns The number of pending inflight requests.
*/
size_t size();
/**
* @brief Insert an inflight requests to the container.
*
* @param[in] request a `std::shared_ptr<Request>` with the inflight request.
*/
void insert(std::shared_ptr<Request> request);
/**
* @brief Merge a container of inflight requests with the internal container.
*
* Merge a container of inflight requests obtained from `InflightRequests::release()` of
* another object with the internal container.
*
* @param[in] inflightRequestsMap container of inflight requests to merge with the
* internal container.
*/
void merge(InflightRequestsMapPtr inflightRequestsMap);
/**
* @brief Remove an inflight request from the internal container.
*
* Remove the reference to a specific request from the internal container. This should
* be called when a request has completed and the `InflightRequests` owner does not need
* to keep track of it anymore. The raw pointer to a `ucxx::Request` is passed here as
* opposed to the usual `std::shared_ptr<ucxx::Request>` used elsewhere, this is because
* the raw pointer address is used as key to the requests reference, and this is called
* called from the object's destructor.
*
* @param[in] request raw pointer to the request
*/
void remove(const Request* const request);
/**
* @brief Issue cancelation of all inflight requests and clear the internal container.
*
* Issue cancelation of all inflight requests known to this object and clear the
* internal container. The total number of canceled requests is returned.
*
* @returns The total number of canceled requests.
*/
size_t cancelAll();
/**
* @brief Releases the internal container.
*
* Releases the internal container that can be merged into another `InflightRequests`
* object with `InflightRequests::release()`. Effectively leaves the internal state as a
* clean, new object.
*
* @returns The internal container.
*/
InflightRequestsMapPtr release();
};
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/notifier.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <chrono>
#include <memory>
#include <ucp/api/ucp.h>
namespace ucxx {
enum class RequestNotifierThreadState { NotRunning = 0, Running, Stopping };
enum class RequestNotifierWaitState { Ready = 0, Timeout, Shutdown };
class Future;
class Notifier {
protected:
Notifier() = default;
public:
Notifier(const Notifier&) = delete;
Notifier& operator=(Notifier const&) = delete;
Notifier(Notifier&& o) = delete;
Notifier& operator=(Notifier&& o) = delete;
/**
* @brief Virtual destructor.
*
* Virtual destructor with empty implementation.
*/
virtual ~Notifier() {}
/**
* @brief Schedule notification of completed future.
*
* Schedule the notification of a completed Python future, but does not set the future's
* result yet, which is later done by `runRequestNotifier()`. Because this call does
* not notify the future, it does not require any resources associated with it.
*
* This is meant to be called from `ucxx::Future::notify()`.
*
* @param[in] future future to notify.
* @param[in] status the request completion status.
*/
virtual void scheduleFutureNotify(std::shared_ptr<Future> future, ucs_status_t status) = 0;
/**
* @brief Wait for a new event with a timeout in nanoseconds.
*
* Block while waiting for an event (new future to be notified or stop signal) with added
* timeout in nanoseconds to unblock after a that period if no event has occurred. A
* period of zero means this call will never unblock until an event occurs.
*
* WARNING: Be cautious using a period of zero, if no event ever occurs it will be
* impossible to continue the thread.
*
* @param[in] period the time in nanoseconds to wait for an event before unblocking.
*/
virtual RequestNotifierWaitState waitRequestNotifier(uint64_t periodNs) = 0;
/**
* @brief Notify event loop of all pending completed futures.
*
* This method will notify the internal resource of all pending completed futures.
* Notifying the resource may require some exclusion mechanism, thus it should not run
* indefinitely, but instead run periodically. Futures that completed must first be
* scheduled with `scheduleFutureNotify()`.
*/
virtual void runRequestNotifier() = 0;
/**
* @brief Make known to the notifier thread that it should stop.
*
* Often called when the application is shutting down, make known to the notifier that
* it should stop and exit.
*/
virtual void stopRequestNotifierThread() = 0;
};
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/api.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#ifndef UCXX_ENABLE_RMM
#define UCXX_ENABLE_RMM 0
#endif
#include <ucxx/address.h>
#include <ucxx/buffer.h>
#include <ucxx/constructors.h>
#include <ucxx/context.h>
#include <ucxx/endpoint.h>
#include <ucxx/header.h>
#include <ucxx/inflight_requests.h>
#include <ucxx/listener.h>
#include <ucxx/request.h>
#include <ucxx/request_tag_multi.h>
#include <ucxx/typedefs.h>
#include <ucxx/worker.h>
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/worker_progress_thread.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <functional>
#include <memory>
#include <mutex>
#include <thread>
#include <ucxx/delayed_submission.h>
namespace ucxx {
typedef std::function<void(void)> SignalWorkerFunction;
typedef std::function<void(void*)> ProgressThreadStartCallback;
typedef void* ProgressThreadStartCallbackArg;
class WorkerProgressThread {
private:
std::thread _thread{}; ///< Thread object
bool _stop{false}; ///< Signal to stop on next iteration
bool _pollingMode{false}; ///< Whether thread will use polling mode to progress
SignalWorkerFunction _signalWorkerFunction{
nullptr}; ///< Function signaling worker to wake the progress event (when _pollingMode is
///< `false`)
ProgressThreadStartCallback _startCallback{
nullptr}; ///< Callback to execute at start of the progress thread
ProgressThreadStartCallbackArg _startCallbackArg{
nullptr}; ///< Argument to pass to start callback
std::shared_ptr<DelayedSubmissionCollection> _delayedSubmissionCollection{
nullptr}; ///< Collection of enqueued delayed submissions
/**
* @brief The function executed in the new thread.
*
* This function ensures the `startCallback` is executed once at the start of the thread,
* subsequently starting a continuous loop that processes any delayed submission requests
* that are pending in the `delayedSubmissionCollection` followed by the execution of the
* `progressFunction`, the loop repeats until `stop` is set.
*
* @param[in] progressFunction user-defined progress function implementation.
* @param[in] stop reference to the stop signal causing the
* progress loop to terminate.
* @param[in] startCallback user-defined callback function to be executed
* at the start of the progress thread.
* @param[in] startCallbackArg an argument to be passed to the start callback.
* @param[in] delayedSubmissionCollection collection of delayed submissions to be
* processed during progress.
*/
static void progressUntilSync(
std::function<bool(void)> progressFunction,
const bool& stop,
ProgressThreadStartCallback startCallback,
ProgressThreadStartCallbackArg startCallbackArg,
std::shared_ptr<DelayedSubmissionCollection> delayedSubmissionCollection);
public:
WorkerProgressThread() = delete;
/**
* @brief Constructor of `shared_ptr<ucxx::Worker>`.
*
* The constructor for a `shared_ptr<ucxx::Worker>` object. The default constructor is
* made private to ensure all UCXX objects are shared pointers for correct
* lifetime management.
*
* This thread runs asynchronously with the main application thread. If you require
* cross-thread synchronization (for example when tearing down the thread or canceling
* requests), use the generic pre and post callbacks with a `CallbackNotifier` that
* synchronizes with the application thread. Since the worker progress itself may change
* state, it is usually the case that synchronization is needed in both pre and post
* callbacks.
*
* @code{.cpp}
* // context is `std::shared_ptr<ucxx::Context>`
* auto worker = context->createWorker(false);
*
* // Equivalent to line above
* // auto worker = ucxx::createWorker(context, false);
* @endcode
*
* @param[in] pollingMode whether the thread should use polling mode to
* progress.
* @param[in] progressFunction user-defined progress function implementation.
* @param[in] signalWorkerFunction user-defined function to wake the worker
* progress event (when `pollingMode` is `false`).
* @param[in] startCallback user-defined callback function to be executed
* at the start of the progress thread.
* @param[in] startCallbackArg an argument to be passed to the start callback.
* @param[in] delayedSubmissionCollection collection of delayed submissions to be
* processed during progress.
*/
WorkerProgressThread(const bool pollingMode,
std::function<bool(void)> progressFunction,
std::function<void(void)> signalWorkerFunction,
ProgressThreadStartCallback startCallback,
ProgressThreadStartCallbackArg startCallbackArg,
std::shared_ptr<DelayedSubmissionCollection> delayedSubmissionCollection);
/**
* @brief `ucxx::WorkerProgressThread destructor.
*
* Raises the stop signal and joins the thread.
*/
~WorkerProgressThread();
/**
* @brief Returns whether the thread was created for polling progress mode.
*
* @returns Whether polling mode is enabled.
*/
bool pollingMode() const;
std::thread::id getId() const;
};
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/future.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <memory>
#include <ucp/api/ucp.h>
#include <ucxx/notifier.h>
namespace ucxx {
class Future : public std::enable_shared_from_this<Future> {
protected:
std::shared_ptr<Notifier> _notifier{nullptr}; ///< The notifier object
/**
* @brief Construct a future that may be notified from a notifier object.
*
* Construct a future that may be notified from a notifier object, usually running
* on a separate thread to decrease overhead from the application thread.
*
* This class may also be used to set the result or exception from any thread.
*
* @param[in] notifier notifier object, possibly running on a separate thread.
*/
explicit Future(std::shared_ptr<Notifier> notifier) : _notifier(notifier) {}
public:
Future() = delete;
Future(const Future&) = delete;
Future& operator=(Future const&) = delete;
Future(Future&& o) = delete;
Future& operator=(Future&& o) = delete;
/**
* @brief Virtual destructor.
*
* Virtual destructor with empty implementation.
*/
virtual ~Future() {}
/**
* @brief Inform the notifier that the future has completed.
*
* Inform the notifier that the future has completed so it can notify associated
* resources of that occurrence.
*
* @throws std::runtime_error if the object is invalid or has been already released.
*
* @param[in] status request completion status.
*/
virtual void notify(ucs_status_t status) = 0;
/**
* @brief Set the future completion status.
*
* Set the future status as completed, either with a successful completion or error.
*
* @throws std::runtime_error if the object is invalid or has been already released.
*
* @param[in] status request completion status.
*/
virtual void set(ucs_status_t status) = 0;
/**
* @brief Get the underlying handle but does not release ownership.
*
* Get the underlying handle without releasing ownership. This can be useful for example
* for logging, where we want to see the address of the pointer but do not want to
* transfer ownership.
*
* @warning The destructor will also destroy the future, a pointer taken via this method
* will cause the object to become invalid.
*
* @throws std::runtime_error if the object is invalid or has been already released.
*
* @returns The underlying handle.
*/
virtual void* getHandle() = 0;
/**
* @brief Get the underlying handle and release ownership.
*
* Get the underlying handle releasing ownership. This should be used when the future
* needs to be permanently transferred to consumer code. After calling this method the
* object becomes invalid for any other uses.
*
* @throws std::runtime_error if the object is invalid or has been already released.
*
* @returns The underlying handle.
*/
virtual void* release() = 0;
};
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/request.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <atomic>
#include <chrono>
#include <memory>
#include <string>
#include <ucp/api/ucp.h>
#include <ucxx/component.h>
#include <ucxx/endpoint.h>
#include <ucxx/future.h>
#include <ucxx/typedefs.h>
#define ucxx_trace_req_f(_owner, _req, _name, _message, ...) \
ucxx_trace_req("%s, req %p, op %s: " _message, (_owner), (_req), (_name), ##__VA_ARGS__)
namespace ucxx {
class Request : public Component {
protected:
ucs_status_t _status{UCS_INPROGRESS}; ///< Requests status
std::string _status_msg{}; ///< Human-readable status message
void* _request{nullptr}; ///< Pointer to UCP request
std::shared_ptr<Future> _future{nullptr}; ///< Future to notify upon completion
RequestCallbackUserFunction _callback{nullptr}; ///< Completion callback
RequestCallbackUserData _callbackData{nullptr}; ///< Completion callback data
std::shared_ptr<Worker> _worker{
nullptr}; ///< Worker that generated request (if not from endpoint)
std::shared_ptr<Endpoint> _endpoint{
nullptr}; ///< Endpoint that generated request (if not from worker)
std::string _ownerString{
"undetermined owner"}; ///< String to print owner (endpoint or worker) when logging
std::shared_ptr<DelayedSubmission> _delayedSubmission{
nullptr}; ///< The submission object that will dispatch the request
std::string _operationName{
"request_undefined"}; ///< Human-readable operation name, mostly used for log messages
std::recursive_mutex _mutex{}; ///< Mutex to prevent checking status while it's being set
bool _enablePythonFuture{true}; ///< Whether Python future is enabled for this request
/**
* @brief Protected constructor of an abstract `ucxx::Request`.
*
* Construct an abstract request, registering the request to the appropriate parent
* (either an endpoint or a worker) so that it may be canceled if necessary. This
* constructor is also responsible for populating a `ucxx::DelayedSubmission` object that
* will effectively dispatch the request when appropriate (either immediately or a the
* next worker progress iteration), as well as create Python futures that can be later
* awaited in Python asynchronous code.
*
* @param[in] endpointOrWorker the parent component, which may either be a
* `std::shared_ptr<Endpoint>` or
* `std::shared_ptr<Worker>`.
* @param[in] delayedSubmission the object to manage request submission.
* @param[in] operationName a human-readable operation name to help identifying
* requests by their types when UCXX logging is enabled.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
*/
Request(std::shared_ptr<Component> endpointOrWorker,
std::shared_ptr<DelayedSubmission> delayedSubmission,
const std::string operationName,
const bool enablePythonFuture = false);
/**
* @brief Perform initial processing of the request to determine if immediate completion.
*
* Perform initial processing of the requeste, determining whether it completed
* immediately, in which case it will set its status and call a user-defined callback
* (when registered by the derived class), otherwise do nothing until the UCX operation
* completes and the internal callback is executed to finally set its state.
*/
void process();
/**
* @brief Set the request status and notify Python future.
*
* Set the request status and notify the Python future of the status change if the
* object was created with Python support. Intended to be called only once when the
* request status changes to its final state.
*
* @param[in] status the status of the request to be set.
*/
void setStatus(ucs_status_t status);
public:
Request() = delete;
Request(const Request&) = delete;
Request& operator=(Request const&) = delete;
Request(Request&& o) = delete;
Request& operator=(Request&& o) = delete;
/**
* @brief `ucxx::Request` destructor.
*
* Removes its own reference from its parent's inflight messages collection and
* free internal resources.
*/
virtual ~Request();
/**
* @brief Cancel the request.
*
* Cancel the request. Often called by the an error handler or parent's object
* destructor but may be called by the user to cancel the request as well.
*/
virtual void cancel();
/**
* @brief Return the status of the request.
*
* Return a `ucs_status_t` that may be used for more fine-grained error handling than
* relying on `checkError()` alone, which does not currently implement all error
* statuses supported by UCX.
*
* @return the current status of the request.
*/
ucs_status_t getStatus();
/**
* @brief Return the future used to check on state.
*
* If the object has enabled Python future support, return the future that can be
* awaited from Python, returns `nullptr` otherwise.
*
* @returns the Python future object or `nullptr`.
*/
void* getFuture();
/**
* @brief Check whether the request completed with an error.
*
* Check whether the request has completed with an error, if an error occurred an
* exception is raised, but if the request has completed or is in progress this call will
* act as a no-op. To verify whether the request is in progress either `isCompleted()` or
* `getStatus()` should be checked.
*
* @throw `ucxx::CanceledError` if the request was canceled.
* @throw `ucxx::MessageTruncatedError` if the message was truncated.
* @throw `ucxx::Error` if another error occurred.
*/
void checkError();
/**
* @brief Check whether the request has already completed.
*
* Check whether the request has already completed. The status of the request must be
* verified with `getStatus()` before consumption.
*
* @return whether the request has completed.
*/
bool isCompleted();
/**
* @brief Callback executed by UCX when request is completed.
*
* Generic callback executed by UCX when a request is completed, used to set the status
* of the request and free any resources associated with it.
*
* WARNING: This is not intended to be called by the user, but it currently needs to be
* a public method so that UCX may access it. In future changes this will be moved to
* an internal object and remove this method from the public API.
*
* @param[in] request the UCX request pointer.
* @param[in] status the completion status of the request.
*/
void callback(void* request, ucs_status_t status);
/**
* @brief Populate the internal submission dispatcher.
*
* The `ucxx::Request` utilizes `ucxx::DelayedSubmission` to manage when the request will
* be dispatched. This method is registered as a callback in the worker, that may choose
* to either execute (submit) it immediately or delay for the next iteration of its
* progress loop, depending on the progress mode in use by the worker.
*
* See `ucxx::DelayedSubmission::DelayedSubmission()` for more details.
*/
virtual void populateDelayedSubmission() = 0;
/**
* @brief Get formatted string with owner type and handle address.
*
* Get a formatted string with owner type (worker or endpoint) and its respective handle
* address. This is meant to get logging information for a request's callback, which is
* not a member attribute of `ucxx::Request` or derived class, but a static method
* or external function instead.
*
* @returns the formatted string containing the owner type and its handle.
*/
const std::string& getOwnerString() const;
/**
* @brief Get the received buffer.
*
* This method is used to get the received buffer for applicable derived classes (e.g.,
* `RequestAm` receive operations), in all other cases this will return `nullptr`. Before
* getting the received buffer it's necessary to check that the request completed
* successfully either by validating `getStatus() == UCS_OK` or by checking the request
* completed with `isCompleted() == true` and that it did not error with `checkError()`,
* if any of those is unsuccessful this call returns `nullptr`.
*
* @return The received buffer (if applicable) or `nullptr`.
*/
virtual std::shared_ptr<Buffer> getRecvBuffer();
};
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/request_am.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <memory>
#include <utility>
#include <ucp/api/ucp.h>
#include <ucxx/delayed_submission.h>
#include <ucxx/request.h>
#include <ucxx/typedefs.h>
namespace ucxx {
class Buffer;
namespace internal {
class RecvAmMessage;
} // namespace internal
class RequestAm : public Request {
private:
friend class internal::RecvAmMessage;
ucs_memory_type_t _sendHeader{}; ///< The header to send
std::shared_ptr<Buffer> _buffer{nullptr}; ///< The AM received message buffer
/**
* @brief Private constructor of `ucxx::RequestAm` send.
*
* This is the internal implementation of `ucxx::RequestAm` send constructor, made private
* not to be called directly. This constructor is made private to ensure all UCXX objects
* are shared pointers and the correct lifetime management of each one.
*
* Instead the user should use one of the following:
*
* - `ucxx::Endpoint::amSend()`
* - `ucxx::createRequestAmSend()`
*
* @throws ucxx::Error if `endpoint` is not a valid `std::shared_ptr<ucxx::Endpoint>`.
*
* @param[in] endpoint the parent endpoint.
* @param[in] buffer a raw pointer to the data to be sent.
* @param[in] length the size in bytes of the active message to be sent.
* @param[in] memoryType the memory type of the buffer.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
* @param[in] callbackFunction user-defined callback function to call upon completion.
* @param[in] callbackData user-defined data to pass to the `callbackFunction`.
*/
RequestAm(std::shared_ptr<Endpoint> endpoint,
void* buffer,
size_t length,
ucs_memory_type_t memoryType,
const bool enablePythonFuture = false,
RequestCallbackUserFunction callbackFunction = nullptr,
RequestCallbackUserData callbackData = nullptr);
/**
* @brief Private constructor of `ucxx::RequestAm` receive.
*
* This is the internal implementation of `ucxx::RequestAm` receive constructor, made
* private not to be called directly. This constructor is made private to ensure all UCXX
* objects are shared pointers and the correct lifetime management of each one.
*
* Instead the user should use one of the following:
*
* - `ucxx::Endpoint::amRecv()`
* - `ucxx::createRequestAmRecv()`
*
* @throws ucxx::Error if `endpointOrWorker` is not a valid
* `std::shared_ptr<ucxx::Endpoint>` or
* `std::shared_ptr<ucxx::Worker>`.
*
* @param[in] endpointOrWorker the parent component, which may either be a
* `std::shared_ptr<Endpoint>` or
* `std::shared_ptr<Worker>`.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
* @param[in] callbackFunction user-defined callback function to call upon completion.
* @param[in] callbackData user-defined data to pass to the `callbackFunction`.
*/
RequestAm(std::shared_ptr<Component> endpointOrWorker,
const bool enablePythonFuture = false,
RequestCallbackUserFunction callbackFunction = nullptr,
RequestCallbackUserData callbackData = nullptr);
public:
/**
* @brief Constructor for `std::shared_ptr<ucxx::RequestAm>` send.
*
* The constructor for a `std::shared_ptr<ucxx::RequestAm>` object, creating a send active
* message request, returning a pointer to a request object that can be later awaited and
* checked for errors. This is a non-blocking operation, and the status of the transfer
* must be verified from the resulting request object before the data can be
* released.
*
* @throws ucxx::Error if `endpoint` is not a valid
* `std::shared_ptr<ucxx::Endpoint>`.
*
* @param[in] endpoint the parent endpoint.
* @param[in] buffer a raw pointer to the data to be transferred.
* @param[in] length the size in bytes of the tag message to be transferred.
* @param[in] memoryType the memory type of the buffer.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
* @param[in] callbackFunction user-defined callback function to call upon completion.
* @param[in] callbackData user-defined data to pass to the `callbackFunction`.
*
* @returns The `shared_ptr<ucxx::RequestAm>` object
*/
friend std::shared_ptr<RequestAm> createRequestAmSend(
std::shared_ptr<Endpoint> endpoint,
void* buffer,
size_t length,
ucs_memory_type_t memoryType,
const bool enablePythonFuture,
RequestCallbackUserFunction callbackFunction,
RequestCallbackUserData callbackData);
/**
* @brief Constructor for `std::shared_ptr<ucxx::RequestAm>` receive.
*
* The constructor for a `std::shared_ptr<ucxx::RequestAm>` object, creating a receive
* active message request, returning a pointer to a request object that can be later
* awaited and checked for errors. This is a non-blocking operation, and the status of
* the transfer must be verified from the resulting request object before the data can be
* consumed, the data is available via the `getRecvBuffer()` method if the transfer
* completed successfully.
*
* @throws ucxx::Error if `endpoint` is not a valid
* `std::shared_ptr<ucxx::Endpoint>`.
*
* @param[in] endpoint the parent endpoint.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
* @param[in] callbackFunction user-defined callback function to call upon completion.
* @param[in] callbackData user-defined data to pass to the `callbackFunction`.
*
* @returns The `shared_ptr<ucxx::RequestTag>` object
*/
friend std::shared_ptr<RequestAm> createRequestAmRecv(
std::shared_ptr<Endpoint> endpoint,
const bool enablePythonFuture,
RequestCallbackUserFunction callbackFunction,
RequestCallbackUserData callbackData);
virtual void populateDelayedSubmission();
/**
* @brief Create and submit an active message send request.
*
* This is the method that should be called to actually submit an active message send
* request. It is meant to be called from `populateDelayedSubmission()`, which is decided
* at the discretion of `std::shared_ptr<ucxx::Worker>`. See `populateDelayedSubmission()`
* for more details.
*/
void request();
/**
* @brief Receive callback registered by `ucxx::Worker`.
*
* This is the receive callback registered by the `ucxx::Worker` to handle incoming active
* messages. For each incoming active message, a proper buffer will be allocated based on
* the header sent by the remote endpoint using the default allocator or one registered by
* the user via `ucxx::Worker::registerAmAllocator()`. Following that, the message is
* immediately received onto the buffer and a `UCS_OK` or the proper error status is set
* onto the `RequestAm` that is returned to the user, or will be later handled by another
* callback when the message is ready. If the callback is executed when a user has already
* requested received of the active message, the buffer and status will be set on the
* earliest request, otherwise a new request is created and saved in a pool that will be
* already populated and ready for consumption or waiting for the internal callback when
* requested.
*
* This is always called by `ucp_worker_progress()`, and thus will happen in the same
* thread that is called from, when using the worker progress thread, this is called from
* the progress thread.
*
* param[in,out] arg pointer to the `AmData` object held by the `ucxx::Worker` who
* registered this callback.
* param[in] header pointer to the header containing the sender buffer's memory type.
* param[in] header_length length in bytes of the receive header.
* param[in] data pointer to the buffer containing the remote endpoint's send data.
* param[in] length the length in bytes of the message to be received.
* param[in] param UCP parameters of the active message being received.
*/
static ucs_status_t recvCallback(void* arg,
const void* header,
size_t header_length,
void* data,
size_t length,
const ucp_am_recv_param_t* param);
std::shared_ptr<Buffer> getRecvBuffer() override;
};
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/listener.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <memory>
#include <string>
#include <ucp/api/ucp.h>
#include <ucxx/component.h>
#include <ucxx/worker.h>
namespace ucxx {
class Listener : public Component {
private:
ucp_listener_h _handle{nullptr}; ///< The UCP listener handle
std::string _ip{}; ///< The IP address to which the listener is bound to
uint16_t _port{0}; ///< The port to which the listener is bound to
/**
* @brief Private constructor of `ucxx::Listener`.
*
* This is the internal implementation of `ucxx::Listener` constructor, made private not
* to be called directly. Instead the user should call `worker::createListener()` or
* `ucxx::createListener()`.
*
*
* @param[in] worker the worker from which to create the listener.
* @param[in] port the port which the listener should be bound to.
* @param[in] callback user-defined callback to be executed on incoming client
* connections.
* @param[in] callbackArgs argument to be passed to the callback.
*/
Listener(std::shared_ptr<Worker> worker,
uint16_t port,
ucp_listener_conn_callback_t callback,
void* callbackArgs);
public:
Listener() = delete;
Listener(const Listener&) = delete;
Listener& operator=(Listener const&) = delete;
Listener(Listener&& o) = delete;
Listener& operator=(Listener&& o) = delete;
~Listener();
/**
* @brief Constructor of `shared_ptr<ucxx::Listener>`.
*
* The constructor for a `shared_ptr<ucxx::Listener>` object. The default constructor is
* made private to ensure all UCXX objects are shared pointers for correct lifetime
* management.
*
* @code{.cpp}
*
* typedef struct ClientContext {
* std::shared_ptr<ucxx::Endpoint> endpoint{nullptr};
* std::shared_ptr<ucxx::Listener> listener{nullptr};
* } ClientContextType;
*
* void myCallback(ucp_conn_request_h connRequest, void* arg) {
* ClientContextType clientContext = (ClientContextType*);
* clientContext->endpoint =
* clientContext->listener->createEndpointFromConnRequest(connRequest);
* }
*
* ClientContext clientContext;
*
* // worker is `std::shared_ptr<ucxx::Worker>`
* auto listener = worker->createListener(12345, myCallback, clientContext);
* clientContext->listener = listener;
*
* // Equivalent to line above
* // auto listener = ucxx::createListener(worker, 12345, myCallback, clientContext);
* @endcode
*
* @param[in] worker the worker from which to create the listener.
* @param[in] port the port which the listener should be bound to.
* @param[in] callback user-defined callback to be executed on incoming client
* connections.
* @param[in] callbackArgs argument to be passed to the callback.
*
* @returns The `shared_ptr<ucxx::Listener>` object.
*/
friend std::shared_ptr<Listener> createListener(std::shared_ptr<Worker> worker,
uint16_t port,
ucp_listener_conn_callback_t callback,
void* callbackArgs);
/**
* @brief Constructor for `shared_ptr<ucxx::Endpoint>`.
*
* The constructor for a `shared_ptr<ucxx::Endpoint>` object from a `ucp_conn_request_h`,
* as delivered by a `ucxx::Listener` connection callback.
*
* @code{.cpp}
* // listener is `std::shared_ptr<ucxx::Listener>`, with a `ucp_conn_request_h` delivered
* // by a `ucxx::Listener` connection callback.
* auto endpoint = listener->createEndpointFromConnRequest(connRequest, true);
*
* // Equivalent to line above
* // auto endpoint = ucxx::createEndpointFromConnRequest(listener, connRequest, true);
* @endcode
*
* @param[in] connRequest handle to connection request delivered by a
* listener callback.
* @param[in] endpointErrorHandling whether to enable endpoint error handling.
*
* @returns The `shared_ptr<ucxx::Endpoint>` object.
*/
std::shared_ptr<Endpoint> createEndpointFromConnRequest(ucp_conn_request_h connRequest,
bool endpointErrorHandling = true);
/**
* @brief Get the underlying `ucp_listener_h` handle.
*
* Lifetime of the `ucp_listener_h` handle is managed by the `ucxx::Listener` object and
* its ownership is non-transferrable. Once the `ucxx::Listener` is destroyed the handle
* is not valid anymore, it is the user's responsibility to ensure the owner's lifetime
* while using the handle.
*
* @code{.cpp}
* // listener is `std::shared_ptr<ucxx::Listener>`
* ucp_listener_h listenerHandle = listener->getHandle();
* @endcode
*
* @returns The underlying `ucp_listener_h` handle.
*/
ucp_listener_h getHandle();
/**
* @brief Get the port to which the listener is bound to.
*
* Get the port to which the listener is bound to.
*
* @returns the port to which the listener is bound to.
*/
uint16_t getPort();
/**
* @brief Get the IP address to which the listener is bound to.
*
* Get the IP address to which the listener is bound to.
*
* @returns the IP address to which the listener is bound to.
*/
std::string getIp();
};
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/request_stream.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <memory>
#include <ucp/api/ucp.h>
#include <ucxx/delayed_submission.h>
#include <ucxx/request.h>
#include <ucxx/typedefs.h>
namespace ucxx {
class RequestStream : public Request {
private:
size_t _length{0}; ///< The stream request length in bytes
/**
* @brief Private constructor of `ucxx::RequestStream`.
*
* This is the internal implementation of `ucxx::RequestStream` constructor, made private
* not to be called directly. This constructor is made private to ensure all UCXX objects
* are shared pointers and the correct lifetime management of each one.
*
* Instead the user should use one of the following:
*
* - `ucxx::Endpoint::streamRecv()`
* - `ucxx::Endpoint::streamSend()`
* - `ucxx::createRequestStream()`
*
* @param[in] endpoint the `std::shared_ptr<Endpoint>` parent component
* @param[in] send whether this is a send (`true`) or receive (`false`)
* stream request.
* @param[in] buffer a raw pointer to the data to be transferred.
* @param[in] length the size in bytes of the stream message to be
* transferred.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
*/
RequestStream(std::shared_ptr<Endpoint> endpoint,
bool send,
void* buffer,
size_t length,
const bool enablePythonFuture = false);
public:
/**
* @brief Constructor for `std::shared_ptr<ucxx::RequestStream>`.
*
* The constructor for a `std::shared_ptr<ucxx::RequestStream>` object, creating a send
* or receive stream request, returning a pointer to a request object that can be later
* awaited and checked for errors. This is a non-blocking operation, and the status of
* the transfer must be verified from the resulting request object before the data can be
* released (for a send operation) or consumed (for a receive operation).
*
* @param[in] endpoint the `std::shared_ptr<Endpoint>` parent component
* @param[in] send whether this is a send (`true`) or receive (`false`)
* stream request.
* @param[in] buffer a raw pointer to the data to be transferred.
* @param[in] length the size in bytes of the stream message to be
* transferred.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
*
* @returns The `shared_ptr<ucxx::RequestStream>` object
*/
friend std::shared_ptr<RequestStream> createRequestStream(std::shared_ptr<Endpoint> endpoint,
bool send,
void* buffer,
size_t length,
const bool enablePythonFuture);
virtual void populateDelayedSubmission();
/**
* @brief Create and submit a stream request.
*
* This is the method that should be called to actually submit a stream request. It is
* meant to be called from `populateDelayedSubmission()`, which is decided at the
* discretion of `std::shared_ptr<ucxx::Worker>`. See `populateDelayedSubmission()` for
* more details.
*/
void request();
/**
* @brief Implementation of the stream receive request callback.
*
* Implementation of the stream receive request callback. Verify whether the message was
* truncated and set that state if necessary, and finally dispatch
* `ucxx::Request::callback()`.
*
* WARNING: This is not intended to be called by the user, but it currently needs to be
* a public method so that UCX may access it. In future changes this will be moved to
* an internal object and remove this method from the public API.
*
* @param[in] request the UCX request pointer.
* @param[in] status the completion status of the request.
* @param[in] length length of message received used to verify for truncation.
*/
void callback(void* request, ucs_status_t status, size_t length);
/**
* @brief Callback executed by UCX when a stream send request is completed.
*
* Callback executed by UCX when a stream send request is completed, that will dispatch
* `ucxx::Request::callback()`.
*
* WARNING: This is not intended to be called by the user, but it currently needs to be
* a public method so that UCX may access it. In future changes this will be moved to
* an internal object and remove this method from the public API.
*
* @param[in] request the UCX request pointer.
* @param[in] status the completion status of the request.
* @param[in] arg the pointer to the `ucxx::Request` object that created the
* transfer, effectively `this` pointer as seen by `request()`.
*/
static void streamSendCallback(void* request, ucs_status_t status, void* arg);
/**
* @brief Callback executed by UCX when a stream receive request is completed.
*
* Callback executed by UCX when a stream receive request is completed, that will
* dispatch `ucxx::RequestStream::callback()`.
*
* WARNING: This is not intended to be called by the user, but it currently needs to be
* a public method so that UCX may access it. In future changes this will be moved to
* an internal object and remove this method from the public API.
*
* @param[in] request the UCX request pointer.
* @param[in] status the completion status of the request.
* @param[in] length length of message received used to verify for truncation.
* @param[in] arg the pointer to the `ucxx::Request` object that created the
* transfer, effectively `this` pointer as seen by `request()`.
*/
static void streamRecvCallback(void* request, ucs_status_t status, size_t length, void* arg);
};
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/address.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <memory>
#include <string>
#include <ucp/api/ucp.h>
#include <ucxx/component.h>
#include <ucxx/worker.h>
namespace ucxx {
class Address : public Component {
private:
ucp_address_t* _handle{nullptr};
size_t _length{0};
/**
* @brief Private constructor of `ucxx::Address`.
*
* This is the internal implementation of `ucxx::Address` constructor, made private not
* to be called directly. This constructor is made private to ensure all UCXX objects
* are shared pointers and the correct lifetime management of each one.
*
* Instead the user should use one of the following:
*
* - `ucxx::createAddressFromWorker()`
* - `ucxx::createAddressFromString()`
* - `ucxx::Worker::getAddress()`
*
* @param[in] worker the parent `set::shared_ptr<Worker>` component.
* @param[in] address UCP address handle.
* @param[in] length length of the address byte-string in bytes.
*/
Address(std::shared_ptr<Worker> worker, ucp_address_t* address, size_t length);
public:
Address() = delete;
Address(const Address&) = delete;
Address& operator=(Address const&) = delete;
Address(Address&& o) = delete;
Address& operator=(Address&& o) = delete;
~Address();
/**
* @brief Constructor for `shared_ptr<ucxx::Address>` from worker.
*
* The constructor for a `shared_ptr<ucxx::Address>` object from a
* `std::shared_ptr<ucxx::Worker>` to obtain its address.
*
* @param[in] worker parent worker from which to get the address.
*
* @returns The `shared_ptr<ucxx::Address>` object.
*/
friend std::shared_ptr<Address> createAddressFromWorker(std::shared_ptr<Worker> worker);
/**
* @brief Constructor for `shared_ptr<ucxx::Address>` from string.
*
* The constructor for a `shared_ptr<ucxx::Address>` object from the address extracted
* as string from a remote `std::shared_ptr<ucxx::Worker>`.
*
* @param[in] addressString the string from which to create the address.
*
* @returns The `shared_ptr<ucxx::Address>` object.
*/
friend std::shared_ptr<Address> createAddressFromString(std::string addressString);
/**
* @brief Get the underlying `ucp_address_t*` handle.
*
* Lifetime of the `ucp_address_t*` handle is managed by the `ucxx::Address` object and
* its ownership is non-transferrable. Once the `ucxx::Address` is destroyed the handle
* is not valid anymore, it is the user's responsibility to ensure the owner's lifetime
* while using the handle.
*
* @code{.cpp}
* // address is `std::shared_ptr<ucxx::Address>`
* ucp_address_t* addressHandle = address->getHandle();
* @endcode
*
* @returns The underlying `ucp_address_t` handle.
*/
ucp_address_t* getHandle() const;
/**
* @brief Get the length of the `ucp_address_t*` handle.
*
* Get the length of the `ucp_address_t*` handle, required to access the complete address
* and prevent reading out-of-bound.
*
* @returns The length of the `ucp_address_t*` handle in bytes.
*/
size_t getLength() const;
/**
* @brief Get the address as a string.
*
* Convenience method to copy the underlying address to a `std::string` and return it as
* a single object.
*
* @returns The underlying `ucp_address_t` handle.
*/
std::string getString() const;
};
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/worker.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <functional>
#include <memory>
#include <mutex>
#include <queue>
#include <string>
#include <thread>
#include <ucp/api/ucp.h>
#include <ucxx/component.h>
#include <ucxx/constructors.h>
#include <ucxx/context.h>
#include <ucxx/delayed_submission.h>
#include <ucxx/future.h>
#include <ucxx/inflight_requests.h>
#include <ucxx/notifier.h>
#include <ucxx/worker_progress_thread.h>
namespace ucxx {
class Address;
class Buffer;
class Endpoint;
class Listener;
class RequestAm;
namespace internal {
class AmData;
} // namespace internal
class Worker : public Component {
private:
ucp_worker_h _handle{nullptr}; ///< The UCP worker handle
int _epollFileDescriptor{-1}; ///< The epoll file descriptor
int _workerFileDescriptor{-1}; ///< The worker file descriptor
std::mutex _inflightRequestsMutex{}; ///< Mutex to access the inflight requests pool
std::shared_ptr<InflightRequests> _inflightRequests{
std::make_shared<InflightRequests>()}; ///< The inflight requests
std::mutex
_inflightRequestsToCancelMutex{}; ///< Mutex to access the inflight requests to cancel pool
std::shared_ptr<InflightRequests> _inflightRequestsToCancel{
std::make_shared<InflightRequests>()}; ///< The inflight requests scheduled to be canceled
std::shared_ptr<WorkerProgressThread> _progressThread{nullptr}; ///< The progress thread object
std::thread::id _progressThreadId{}; ///< The progress thread ID
std::function<void(void*)> _progressThreadStartCallback{
nullptr}; ///< The callback function to execute at progress thread start
void* _progressThreadStartCallbackArg{
nullptr}; ///< The argument to be passed to the progress thread start callback
std::shared_ptr<DelayedSubmissionCollection> _delayedSubmissionCollection{
nullptr}; ///< Collection of enqueued delayed submissions
friend std::shared_ptr<RequestAm> createRequestAmRecv(
std::shared_ptr<Endpoint> endpoint,
const bool enablePythonFuture,
RequestCallbackUserFunction callbackFunction,
RequestCallbackUserData callbackData);
protected:
bool _enableFuture{
false}; ///< Boolean identifying whether the worker was created with future capability
std::mutex _futuresPoolMutex{}; ///< Mutex to access the futures pool
std::queue<std::shared_ptr<Future>>
_futuresPool{}; ///< Futures pool to prevent running out of fresh futures
std::shared_ptr<Notifier> _notifier{nullptr}; ///< Notifier object
std::shared_ptr<internal::AmData>
_amData; ///< Worker data made available to Active Messages callback
private:
/**
* @brief Drain the worker for uncaught tag messages received.
*
* Called by the destructor, any uncaught tag messages received will be drained so as
* not to generate UCX warnings.
*/
void drainWorkerTagRecv();
/**
* @brief Get active message receive request.
*
* Returns an active message request from the pool if the worker has already begun
* handling a request with the active messages callback, otherwise creates a new request
* that is later populated with status and buffer by the active messages callback.
*
* @param[in] ep the endpoint handle where receiving the message, the same handle that
* will later be used to reply to the message.
* @param[in] createAmRecvRequestFunction function to create a new request if one is not
* already availale in the pool.
*
* @returns Request to be subsequently checked for the completion state and data.
*/
std::shared_ptr<RequestAm> getAmRecv(
ucp_ep_h ep, std::function<std::shared_ptr<RequestAm>()> createAmRecvRequestFunction);
/**
* @brief Stop the progress thread if running without raising warnings.
*
* Called by the destructor, will stop the progress thread if running without
* raising warnings.
*/
void stopProgressThreadNoWarn();
/**
* @brief Register an inflight request.
*
* Called each time a new transfer request is made by the `Worker`, such that it may
* be canceled when necessary.
*
* @param[in] request the request to register.
*
* @return the request that was registered (i.e., the `request` argument itself).
*/
std::shared_ptr<Request> registerInflightRequest(std::shared_ptr<Request> request);
/**
* @brief Progress the worker until all communication events are completed.
*
* Iteratively calls `progressOnce()` until all communication events are completed.
*
* @returns whether any communication events have been progressed.
*/
bool progressPending();
protected:
/**
* @brief Protected constructor of `ucxx::Worker`.
*
* This is the internal implementation of `ucxx::Worker` constructor, made protected not
* to be called directly. Instead the user should call `context::createWorker()` or
* `ucxx::createWorker()` (or `ucxx::createPythonWorker` for the Python-enabled
* implementation).
*
*
* @param[in] context the context from which to create the worker.
* @param[in] enableDelayedSubmission if `true`, each `ucxx::Request` will not be
* submitted immediately, but instead delayed to
* the progress thread. Requires use of the
* progress thread.
* @param[in] enableFuture if `true`, notifies the future associated with each
* `ucxx::Request`, currently used only by `ucxx::python::Worker`.
*/
explicit Worker(std::shared_ptr<Context> context,
const bool enableDelayedSubmission = false,
const bool enableFuture = false);
public:
Worker() = delete;
Worker(const Worker&) = delete;
Worker& operator=(Worker const&) = delete;
Worker(Worker&& o) = delete;
Worker& operator=(Worker&& o) = delete;
/**
* @brief Constructor of `shared_ptr<ucxx::Worker>`.
*
* The constructor for a `shared_ptr<ucxx::Worker>` object. The default constructor is
* made private to ensure all UCXX objects are shared pointers for correct
* lifetime management.
*
* @code{.cpp}
* // context is `std::shared_ptr<ucxx::Context>`
* auto worker = context->createWorker(false);
*
* // Equivalent to line above
* // auto worker = ucxx::createWorker(context, false);
* @endcode
*
* @param[in] context the context from which to create the worker.
* @param[in] enableDelayedSubmission if `true`, each `ucxx::Request` will not be
* submitted immediately, but instead delayed to
* the progress thread. Requires use of the
* progress thread.
* @param[in] enableFuture if `true`, notifies the future associated with each
* `ucxx::Request`, currently used only by `ucxx::python::Worker`.
* @returns The `shared_ptr<ucxx::Worker>` object
*/
friend std::shared_ptr<Worker> createWorker(std::shared_ptr<Context> context,
const bool enableDelayedSubmission,
const bool enableFuture);
/**
* @brief `ucxx::Worker` destructor.
*/
virtual ~Worker();
/**
* @brief Get the underlying `ucp_worker_h` handle.
*
* Lifetime of the `ucp_worker_h` handle is managed by the `ucxx::Worker` object and
* its ownership is non-transferrable. Once the `ucxx::Worker` is destroyed the handle
* is not valid anymore, it is the user's responsibility to ensure the owner's lifetime
* while using the handle.
*
* @code{.cpp}
* // worker is `std::shared_ptr<ucxx::Worker>`
* ucp_worker_h workerHandle = worker->getHandle();
* @endcode
*
* @returns The underlying `ucp_worker_h` handle.
*/
ucp_worker_h getHandle();
/**
* @brief Get information about the underlying `ucp_worker_h` object.
*
* Convenience wrapper for `ucp_worker_print_info()` to get information about the
* underlying UCP worker handle and return it as a string.
*
* @returns String containing information about the UCP worker.
*/
std::string getInfo();
/**
* @brief Initialize blocking progress mode.
*
* Initialize blocking progress mode, creates internal file descriptors to handle blocking
* progress by waiting for the UCP worker to notify the file descriptors. This method is
* supposed to be called when usage of `progressWorkerEvent()` is intended, before the
* first call to `progressWorkerEvent()`. If using polling mode only via
* `progress()`/`progressOnce()` calls or wake-up with `waitProgress()`, this method should
* not be called.
*
* In blocking mode, the user should call `progressWorkerEvent()` to block and then progress
* the worker as new events arrive. `wakeProgressEvent()` may be called to forcefully wake
* this method, for example to shutdown the application.
*
* @code{.cpp}
* // worker is `std::shared_ptr<ucxx::Worker>`
*
* // Block until UCX's wakes up for an incoming event, then fully progresses the
* // worker
* worker->initBlockingProgressMode();
* worker->progressWorkerEvent();
*
* // All events have been progressed.
* @endcode
*
* @throws std::ios_base::failure if creating any of the file descriptors or setting their
* statuses.
*/
void initBlockingProgressMode();
/**
* @brief Arm the UCP worker.
*
* Wrapper for `ucp_worker_arm`, checking its return status for errors and raising an
* exception if an error occurred.
*
* @throws ucxx::Error if an error occurred while attempting to arm the worker.
*
* @returns `true` if worker was armed successfully, `false` if its status was `UCS_ERR_BUSY`.
*/
bool arm();
/**
* @brief Progress worker event while in blocking progress mode.
*
* Blocks until a new worker event has happened and the worker notifies the file descriptor
* associated with it, or `epollTimeout` has elapsed. Requires blocking progress mode to
* be initialized with `initBlockingProgressMode()` before the first call to this method.
* Additionally ensure inflight messages pending for cancelation are canceled.
*
* @code{.cpp}
* // worker is `std::shared_ptr<ucxx::Worker>`
*
* // Block until UCX's wakes up for an incoming event, then fully progresses the
* // worker
* worker->initBlockingProgressMode();
* worker->progressWorkerEvent();
*
* // All events have been progressed.
* @endcode
*
* @param[in] epollTimeout timeout in ms when waiting for worker event, or -1 to block
* indefinitely.
*
* @throws std::ios_base::failure if creating any of the file descriptors or setting their
* statuses.
*
* @returns `true` if any communication was progressed, `false` otherwise.
*/
bool progressWorkerEvent(const int epollTimeout = -1);
/**
* @brief Signal the worker that an event happened.
*
* Signals that an event has happened while, causing both either `progressWorkerEvent()`
* or `waitProgress()` to immediately wake-up.
*
* @code{.cpp}
* // worker is `std::shared_ptr<ucxx::Worker>`
*
* void progressThread() {
* // Block until UCX's wakes up for an incoming event, then fully progresses the
* // worker.
* worker->initBlockingProgressMode();
* worker->progressWorkerEvent();
*
* // Will reach this point and exit after 3 seconds
* }
*
* void otherThread() {
* // Signals the worker after 3 seconds
* std::this_thread::sleep_for(std::chrono::seconds(3));
* worker->signal();
* }
*
* void mainThread() {
* t1 = std::thread(progressThread);
* t2 = std::thread(otherThread);
* t1.join();
* t2.join();
* }
* @endcode
*
* @throws ucxx::Error if an error occurred while attempting to signal the worker.
*/
void signal();
/**
* @brief Block until an event has happened, then progresses.
*
* Blocks until an event has happened as part of UCX's wake-up mechanism and progress
* the worker. Additionally ensure inflight messages pending for cancelation are canceled.
*
* @code{.cpp}
* // worker is `std::shared_ptr<ucxx::Worker>`
*
* // Block until UCX's wakes up for an incoming event, then fully progresses the
* // worker
* worker->waitProgress();
* worker->progress();
*
* // All events have been progressed.
* @endcode
*
* @throws ucxx::Error if an error occurred while attempting to arm the worker.
*
* @returns `true` if any communication was progressed, `false` otherwise.
*/
bool waitProgress();
/**
* @brief Progress the worker only once.
*
* Wrapper for `ucp_worker_progress`.
*
* @code{.cpp}
* // worker is `std::shared_ptr<ucxx::Worker>`
* while (!worker->progressOnce()) ;
*
* // All events have been progressed.
* @endcode
*
* @returns `true` if any communication was progressed, `false` otherwise.
*/
bool progressOnce();
/**
* @brief Progress the worker until all communication events are completed.
*
* Iteratively calls `progressOnce()` until all communication events are completed.
* Additionally ensure inflight messages pending for cancelation are canceled.
*
* @code{.cpp}
* // worker is `std::shared_ptr<ucxx::Worker>`
* worker->progress();
*
* // All events have been progressed and inflight pending for cancelation were canceled.
* @endcode
*
* @returns whether any communication events have been progressed.
*/
bool progress();
/**
* @brief Register delayed request submission.
*
* Register `ucxx::Request` for delayed submission. When the `ucxx::Worker` is created
* with `enableDelayedSubmission=true`, calling actual UCX transfer routines will not
* happen immediately and instead will be submitted later by the worker thread.
*
* The purpose of this method is to offload as much as possible any work to the worker
* thread, thus decreasing computation on the caller thread, but potentially increasing
* transfer latency.
*
* @param[in] request the request to which the callback belongs, ensuring it remains
* alive until the callback is invoked.
* @param[in] callback the callback set to execute the UCP transfer routine during the
* worker thread loop.
*/
void registerDelayedSubmission(std::shared_ptr<Request> request,
DelayedSubmissionCallbackType callback);
/**
* @brief Register callback to be executed in progress thread before progressing.
*
* Register callback to be executed in the current or next iteration of the progress
* thread before the worker is progressed. There is no guarantee that the callback will
* be executed in the current or next iteration, this depends on where the progress thread
* is in the current iteration when this callback is registered. The lifetime of the
* callback must be ensured by the caller.
*
* The purpose of this method is to schedule operations to be executed in the progress
* thread, such as endpoint creation and closing, so that progressing doesn't ever need
* to occur in the application thread when using a progress thread.
*
* @param[in] callback the callback to execute before progressing the worker.
*/
void registerGenericPre(DelayedSubmissionCallbackType callback);
/**
* @brief Register callback to be executed in progress thread after progressing.
*
* Register callback to be executed in the current or next iteration of the progress
* thread after the worker is progressed. There is no guarantee that the callback will
* be executed in the current or next iteration, this depends on where the progress thread
* is in the current iteration when this callback is registered. The lifetime of the
* callback must be ensured by the caller.
*
* The purpose of this method is to schedule operations to be executed in the progress
* thread, immediately after progressing the worker completes.
*
* @param[in] callback the callback to execute after progressing the worker.
*/
void registerGenericPost(DelayedSubmissionCallbackType callback);
/**
* @brief Inquire if worker has been created with delayed submission enabled.
*
* Check whether the worker has been created with delayed submission enabled.
*
* @returns `true` if delayed submission is enabled, `false` otherwise.
*/
bool isDelayedRequestSubmissionEnabled() const;
/**
* @brief Inquire if worker has been created with future support.
*
* Check whether the worker has been created with future support.
*
* @returns `true` if future support is enabled, `false` otherwise.
*/
bool isFutureEnabled() const;
/**
* @brief Populate the future pool.
*
* To avoid taking blocking resources (such as the Python GIL) for every new future
* required by each `ucxx::Request`, the `ucxx::Worker` maintains a pool of futures
* that can be acquired when a new `ucxx::Request` is created. Currently the pool has
* a maximum size of 100 objects, and will refill once it goes under 50, otherwise
* calling this functions results in a no-op.
*
* @throws std::runtime_error if future support is not implemented.
*/
virtual void populateFuturesPool();
/**
* @brief Get a future from the pool.
*
* Get a future from the pool. If the pool is empty,
* `ucxx::Worker::populateFuturesPool()` is called and a warning is raised, since
* that likely means the user is missing to call the aforementioned method regularly.
*
* @throws std::runtime_error if future support is not implemented.
*
* @returns The `shared_ptr<ucxx::python::Future>` object
*/
virtual std::shared_ptr<Future> getFuture();
/**
* @brief Block until a request event.
*
* Blocks until some communication is completed and future is ready to be notified,
* shutdown was initiated or a timeout occurred (only if `periodNs > 0`). This method is
* intended for use from the notifier (such as the Python thread running it), where that
* thread will block until one of the aforementioned events occur.
*
* @throws std::runtime_error if future support is not implemented.
*
* @returns `RequestNotifierWaitState::Ready` if some communication completed,
* `RequestNotifierWaitStats::Timeout` if a timeout occurred, or
* `RequestNotifierWaitStats::Shutdown` if shutdown has initiated.
*/
virtual RequestNotifierWaitState waitRequestNotifier(uint64_t periodNs);
/**
* @brief Notify futures of each completed communication request.
*
* Notifies futures of each completed communication request of their new status. This
* method is intended to be used from the Notifier (such as the Python thread running it),
* where the thread will call `waitRequestNotifier()` and block until some communication
* is completed, and then call this method to notify all futures. If this is notifying
* a Python future, the thread where this method is called from must be using the same
* Python event loop as the thread that submitted the transfer request.
*
* @throws std::runtime_error if future support is not implemented.
*/
virtual void runRequestNotifier();
/**
* @brief Signal the notifier to terminate.
*
* Signals the notifier to terminate, awakening the `waitRequestNotifier()` blocking call.
*
* @throws std::runtime_error if future support is not implemented.
*/
virtual void stopRequestNotifierThread();
/**
* @brief Set callback to be executed at the progress thread start.
*
* Sets a callback that will be executed at the beginning of the progress thread. This
* can be used to initialize any resources that are required to be available on the thread
* the worker will be progressed from, such as a CUDA context.
*
* @param[in] callback function to execute during progress thread start
* @param[in] callbackArg argument to be passed to the callback function
*/
void setProgressThreadStartCallback(std::function<void(void*)> callback, void* callbackArg);
/**
* @brief Start the progress thread.
*
* Spawns a new thread that will take care of continuously progressing the worker. The
* thread can progress the worker in blocking mode, using `progressWorkerEvent()` only
* when worker events happen, or in polling mode by continuously calling `progress()`
* (incurs in high CPU utilization).
*
* @param[in] pollingMode use polling mode if `true`, or blocking mode if `false`.
* @param[in] epollTimeout timeout in ms when waiting for worker event, or -1 to block
* indefinitely, only applicable if `pollingMode==true`.
*/
void startProgressThread(const bool pollingMode = false, const int epollTimeout = 1);
/**
* @brief Stop the progress thread.
*
* Stop the progress thread.
*
* May be called by the user at any time, and also called during destructor if the
* worker thread was ever started.
*/
void stopProgressThread();
/**
* @brief Inquire if worker has a progress thread running.
*
* Check whether the worker currently has a progress thread running.
*
* @returns `true` if a progress thread is running, `false` otherwise.
*/
bool isProgressThreadRunning();
/**
* @brief Get the progress thread ID.
*
* Get the progress thread ID, only valid if `startProgressThread()` was called.
*
* @returns the progress thread ID.
*/
std::thread::id getProgressThreadId();
/**
* @brief Cancel inflight requests.
*
* Cancel inflight requests, returning the total number of requests that were canceled.
* This is usually executed during the progress loop.
*
* If the parent worker is running a progress thread, a maximum timeout may be specified
* for which the close operation will wait. This can be particularly important for cases
* where the progress thread might be attempting to acquire a resource (e.g., the Python
* GIL) while the current thread owns that resource. In particular for Python, the
* `~Worker()` will call this method for which we can't release the GIL when the garbage
* collector runs and destroys the object.
*
* @param[in] period maximum period to wait for a generic pre/post progress thread
* operation will wait for.
* @param[in] maxAttempts maximum number of attempts to close endpoint, only applicable
* if worker is running a progress thread and `period > 0`.
*
* @returns Number of requests that were canceled.
*/
size_t cancelInflightRequests(uint64_t period = 0, uint64_t maxAttempts = 1);
/**
* @brief Schedule cancelation of inflight requests.
*
* Schedule inflight request to be canceled when `cancelInflightRequests()` is executed
* the next time, usually during the progress loop. This is usually called from a
* `ucxx::Endpoint`, for example when the error callback was called, signaling that
* inflight requests for that endpoint will not be completed successfully and should be
* canceled.
*
* @param[in] inflight requests object that implements the `cancelAll()` method.
*/
void scheduleRequestCancel(std::shared_ptr<InflightRequests> inflightRequests);
/**
* @brief Remove reference to request from internal container.
*
* Remove the reference to a specific request from the internal container. This should
* be called when a request has completed and the `ucxx::Worker` does not need to keep
* track of it anymore. The raw pointer to a `ucxx::Request` is passed here as opposed
* to the usual `std::shared_ptr<ucxx::Request>` used elsewhere, this is because the
* raw pointer address is used as key to the requests reference, and this is called
* from the object's destructor.
*
* @param[in] request raw pointer to the request
*/
void removeInflightRequest(const Request* const request);
/**
* @brief Check for uncaught tag messages.
*
* Checks the worker for any uncaught tag messages. An uncaught tag message is any
* tag message that has been fully or partially received by the worker, but not matched
* by a corresponding `ucp_tag_recv_*` call.
*
* @code{.cpp}
* // `worker` is `std::shared_ptr<ucxx::Worker>`
* assert(!worker->tagProbe(0));
*
* // `ep` is a remote `std::shared_ptr<ucxx::Endpoint` to the local `worker`
* ep->tagSend(buffer, length, 0);
*
* assert(worker->tagProbe(0));
* @endcode
*
* @returns `true` if any uncaught messages were received, `false` otherwise.
*/
bool tagProbe(const ucp_tag_t tag);
/**
* @brief Enqueue a tag receive operation.
*
* Enqueue a tag receive operation, returning a `std::shared<ucxx::Request>` that can
* be later awaited and checked for errors. This is a non-blocking operation, and the
* status of the transfer must be verified from the resulting request object before the
* data can be consumed.
*
* Using a future may be requested by specifying `enableFuture` if the worker
* implementation has support for it. If a future is requested, the application must then
* await on this future to ensure the transfer has completed.
*
* @param[in] buffer a raw pointer to pre-allocated memory where resulting
* data will be stored.
* @param[in] length the size in bytes of the tag message to be received.
* @param[in] tag the tag to match.
* @param[in] enableFuture whether a future should be created and subsequently
* notified.
* @param[in] callbackFunction user-defined callback function to call upon completion.
* @param[in] callbackData user-defined data to pass to the `callbackFunction`.
*
* @returns Request to be subsequently checked for the completion and its state.
*/
std::shared_ptr<Request> tagRecv(void* buffer,
size_t length,
ucp_tag_t tag,
const bool enableFuture = false,
RequestCallbackUserFunction callbackFunction = nullptr,
RequestCallbackUserData callbackData = nullptr);
/**
* @brief Get the address of the UCX worker object.
*
* Gets the address of the underlying UCX worker object, which can then be passed
* to a remote worker, allowing creating a new endpoint to the local worker via
* `ucxx::Worker::createEndpointFromWorkerAddress()`.
*
* @throws ucxx::Error if an error occurred while attempting to get the worker address.
*
* @returns The address of the local worker.
*/
std::shared_ptr<Address> getAddress();
/**
* @brief Create endpoint to worker listening on specific IP and port.
*
* Creates an endpoint to a remote worker listening on a specific IP address and port.
* The remote worker must have an active listener created with
* `ucxx::Worker::createListener()`.
*
* @code{.cpp}
* // `worker` is `std::shared_ptr<ucxx::Worker>`
* // Create endpoint to worker listening on `10.10.10.10:12345`.
* auto ep = worker->createEndpointFromHostname("10.10.10.10", 12345);
* @endcode
*
* @throws std::invalid_argument if the IP address or hostname is invalid.
* @throws std::bad_alloc if there was an error allocating space to handle the address.
* @throws ucxx::Error if an error occurred while attempting to create the endpoint.
*
* @param[in] ipAddress string containing the IP address of the remote worker.
* @param[in] port port number where the remote worker is listening at.
* @param[in] endpointErrorHandling enable endpoint error handling if `true`,
* disable otherwise.
*
* @returns The `shared_ptr<ucxx::Endpoint>` object
*/
std::shared_ptr<Endpoint> createEndpointFromHostname(std::string ipAddress,
uint16_t port,
bool endpointErrorHandling = true);
/**
* @brief Create endpoint to worker located at UCX address.
*
* Creates an endpoint to a listener-independent remote worker. The worker location is
* identified by its UCX address, wrapped by a `std::shared_ptr<ucxx::Address>` object.
*
* @code{.cpp}
* // `worker` is `std::shared_ptr<ucxx::Worker>`
* auto localAddress = worker->getAddress();
*
* // pass address to remote process
* // ...
*
* // receive address received from remote process
* // ...
*
* // `remoteAddress` is `std::shared_ptr<ucxx::Address>`
* auto ep = worker->createEndpointFromAddress(remoteAddress);
* @endcode
*
* @throws ucxx::Error if an error occurred while attempting to create the endpoint.
*
* @param[in] address address of the remote UCX worker.
* @param[in] endpointErrorHandling enable endpoint error handling if `true`,
* disable otherwise.
*
* @returns The `shared_ptr<ucxx::Endpoint>` object
*/
std::shared_ptr<Endpoint> createEndpointFromWorkerAddress(std::shared_ptr<Address> address,
bool endpointErrorHandling = true);
/**
* @brief Listen for remote connections on given port.
*
* Starts a listener on given port. The listener allows remote processes to connect to
* the local worker via an IP and port pair. The connection is then handle via a
* callback specified by the user.
*
* @throws std::bad_alloc if there was an error allocating space to handle the address.
* @throws ucxx::Error if an error occurred while attempting to create the listener or
* to acquire its address.
*
* @param[in] port port number where to listen at.
* @param[in] callback to handle each incoming connection.
* @param[in] callback_args pointer to argument to pass to the callback.
*
* @returns The `shared_ptr<ucxx::Listener>` object
*/
std::shared_ptr<Listener> createListener(uint16_t port,
ucp_listener_conn_callback_t callback,
void* callbackArgs);
/**
* @brief Register allocator for active messages.
*
* Register a new allocator for active messages. By default, only one allocator is defined
* for host memory (`UCS_MEMORY_TYPE_HOST`), and is used as a fallback when an allocator
* for the source's memory type is unavailable. In many circumstances relying exclusively
* on the host allocator is undesirable, for example when transferring CUDA buffers the
* destination is always going to be a host buffer and prevent the use of transports such
* as NVLink or InfiniBand+GPUDirectRDMA. For that reason it's important that the user
* defines those allocators that are important for the application.
*
* If the `memoryType` has already been registered, the previous allocator will be
* replaced by the new one. Be careful when doing this after transfers have started, there
* are no guarantees that inflight messages have not already been allocated with the old
* allocator for that type.
*
* @code{.cpp}
* // context is `std::shared_ptr<ucxx::Context>`
* auto worker = context->createWorker(false);
*
* worker->registerAmAllocator(`UCS_MEMORY_TYPE_CUDA`, ucxx::RMMBuffer);
* @endcode
*
* @param[in] memoryType the memory type the allocator will be used for.
* @param[in] allocator the allocator callable that will be used to allocate new
* active message buffers.
*/
void registerAmAllocator(ucs_memory_type_t memoryType, AmAllocatorType allocator);
/**
* @brief Check for uncaught active messages.
*
* Checks the worker for any uncaught active messages. An uncaught active message is any
* active message that has been fully or partially received by the worker, but not matched
* by a corresponding `createRequestAmRecv()` call.
*
* @code{.cpp}
* // `worker` is `std::shared_ptr<ucxx::Worker>`
* // `ep` is a remote `std::shared_ptr<ucxx::Endpoint` to the local `worker`
* assert(!worker->amProbe(ep->getHandle()));
*
* ep->amSend(buffer, length);
*
* assert(worker->amProbe(0));
* @endcode
*
* @returns `true` if any uncaught messages were received, `false` otherwise.
*/
bool amProbe(const ucp_ep_h endpointHandle) const;
};
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/endpoint.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <netdb.h>
#include <memory>
#include <string>
#include <vector>
#include <ucp/api/ucp.h>
#include <ucxx/address.h>
#include <ucxx/component.h>
#include <ucxx/exception.h>
#include <ucxx/inflight_requests.h>
#include <ucxx/listener.h>
#include <ucxx/request.h>
#include <ucxx/typedefs.h>
#include <ucxx/utils/sockaddr.h>
#include <ucxx/worker.h>
namespace ucxx {
struct EpParamsDeleter {
void operator()(ucp_ep_params_t* ptr);
};
struct ErrorCallbackData {
ucs_status_t status; ///< Endpoint status
std::shared_ptr<InflightRequests> inflightRequests; ///< Endpoint inflight requests
std::function<void(void*)> closeCallback; ///< Close callback to call
void* closeCallbackArg; ///< Argument to be passed to close callback
std::shared_ptr<Worker> worker; ///< Worker the endpoint has been created from
};
class Endpoint : public Component {
private:
ucp_ep_h _handle{nullptr}; ///< Handle to the UCP endpoint
ucp_ep_h _originalHandle{nullptr}; ///< Handle to the UCP endpoint, after it was previously
///< closed, used for logging purposes only
bool _endpointErrorHandling{true}; ///< Whether the endpoint enables error handling
std::unique_ptr<ErrorCallbackData> _callbackData{
nullptr}; ///< Data struct to pass to endpoint error handling callback
std::shared_ptr<InflightRequests> _inflightRequests{
std::make_shared<InflightRequests>()}; ///< The inflight requests
/**
* @brief Private constructor of `ucxx::Endpoint`.
*
* This is the internal implementation of `ucxx::Endpoint` constructor, made private not
* to be called directly. This constructor is made private to ensure all UCXX objects
* are shared pointers and the correct lifetime management of each one.
*
* Instead the user should use one of the following:
*
* - `ucxx::Listener::createEndpointFromConnRequest`
* - `ucxx::Worker::createEndpointFromHostname()`
* - `ucxx::Worker::createEndpointFromWorkerAddress()`
* - `ucxx::createEndpointFromConnRequest()`
* - `ucxx::createEndpointFromHostname()`
* - `ucxx::createEndpointFromWorkerAddress()`
*
* @param[in] workerOrListener the parent component, which may either be a
* `std::shared_ptr<Listener>` or
* `std::shared_ptr<Worker>`.
* @param[in] params parameters specifying UCP endpoint capabilities.
* @param[in] endpointErrorHandling whether to enable endpoint error handling.
*/
Endpoint(std::shared_ptr<Component> workerOrListener,
ucp_ep_params_t* params,
bool endpointErrorHandling);
/**
* @brief Register an inflight request.
*
* Called each time a new transfer request is made by the `Endpoint`, such that it may
* be canceled when necessary. Also schedule requests to be canceled immediately after
* registration if the endpoint error handler has been called with an error.
*
* @param[in] request the request to register.
*
* @return the request that was registered (i.e., the `request` argument itself).
*/
std::shared_ptr<Request> registerInflightRequest(std::shared_ptr<Request> request);
public:
Endpoint() = delete;
Endpoint(const Endpoint&) = delete;
Endpoint& operator=(Endpoint const&) = delete;
Endpoint(Endpoint&& o) = delete;
Endpoint& operator=(Endpoint&& o) = delete;
~Endpoint();
/**
* @brief Constructor for `shared_ptr<ucxx::Endpoint>`.
*
* The constructor for a `shared_ptr<ucxx::Endpoint>` object, connecting to a listener
* from the given hostname or IP address and port pair.
*
* @code{.cpp}
* // worker is `std::shared_ptr<ucxx::Worker>`, with a presumed listener on
* // "localhost:12345"
* auto endpoint = worker->createEndpointFromHostname("localhost", 12345, true);
*
* // Equivalent to line above
* // auto endpoint = ucxx::createEndpointFromHostname(worker, "localhost", 12345, true);
* @endcode
*
* @param[in] worker parent worker from which to create the endpoint.
* @param[in] ipAddress hostname or IP address the listener is bound to.
* @param[in] port port the listener is bound to.
* @param[in] endpointErrorHandling whether to enable endpoint error handling.
*
* @returns The `shared_ptr<ucxx::Endpoint>` object
*/
friend std::shared_ptr<Endpoint> createEndpointFromHostname(std::shared_ptr<Worker> worker,
std::string ipAddress,
uint16_t port,
bool endpointErrorHandling);
/**
* @brief Constructor for `shared_ptr<ucxx::Endpoint>`.
*
* The constructor for a `shared_ptr<ucxx::Endpoint>` object from a `ucp_conn_request_h`,
* as delivered by a `ucxx::Listener` connection callback.
*
* @code{.cpp}
* // listener is `std::shared_ptr<ucxx::Listener>`, with a `ucp_conn_request_h` delivered
* // by a `ucxx::Listener` connection callback.
* auto endpoint = listener->createEndpointFromConnRequest(connRequest, true);
*
* // Equivalent to line above
* // auto endpoint = ucxx::createEndpointFromConnRequest(listener, connRequest, true);
* @endcode
*
* @param[in] listener listener from which to create the endpoint.
* @param[in] connRequest handle to connection request delivered by a
* listener callback.
* @param[in] endpointErrorHandling whether to enable endpoint error handling.
*
* @returns The `shared_ptr<ucxx::Endpoint>` object
*/
friend std::shared_ptr<Endpoint> createEndpointFromConnRequest(std::shared_ptr<Listener> listener,
ucp_conn_request_h connRequest,
bool endpointErrorHandling);
/**
* @brief Constructor for `shared_ptr<ucxx::Endpoint>`.
*
* The constructor for a `shared_ptr<ucxx::Endpoint>` object from a `shared_ptr<ucxx::Address>`.
*
* @code{.cpp}
* // worker is `std::shared_ptr<ucxx::Worker>`, address is `std::shared_ptr<ucxx::Address>`
* auto endpoint = worker->createEndpointFromWorkerAddress(address, true);
*
* // Equivalent to line above
* // auto endpoint = ucxx::createEndpointFromWorkerAddress(worker, address, true);
* @endcode
*
* @param[in] worker parent worker from which to create the endpoint.
* @param[in] address address of the remote UCX worker
* @param[in] endpointErrorHandling whether to enable endpoint error handling.
*
* @returns The `shared_ptr<ucxx::Endpoint>` object
*/
friend std::shared_ptr<Endpoint> createEndpointFromWorkerAddress(std::shared_ptr<Worker> worker,
std::shared_ptr<Address> address,
bool endpointErrorHandling);
/**
* @brief Get the underlying `ucp_ep_h` handle.
*
* Lifetime of the `ucp_ep_h` handle is managed by the `ucxx::Endpoint` object and its
* ownership is non-transferrable. Once the `ucxx::Endpoint` is destroyed the handle
* is not valid anymore, it is the user's responsibility to ensure the owner's lifetime
* while using the handle.
*
* @code{.cpp}
* // endpoint is `std::shared_ptr<ucxx::Endpoint>`
* ucp_ep_h endpointHandle = endpoint->getHandle();
* @endcode
*
* @returns The underlying `ucp_ep_h` handle.
*/
ucp_ep_h getHandle();
/**
* @brief Check whether the endpoint is still alive.
*
* Check whether the endpoint is still alive, generally `true` until `close()` is called
* the endpoint errors and the error handling procedure is executed. Always `true` if
* endpoint error handling is disabled.
*
* @returns whether the endpoint is still alive if endpoint enables error handling, always
* returns `true` if error handling is disabled.
*/
bool isAlive() const;
/**
* @brief Raises an exception if an error occurred.
*
* Raises an exception if an error occurred and error handling is enabled for the
* endpoint, no-op otherwise.
*
* @throws ucxx::ConnectionResetError if `UCP_ERR_CONNECTION_RESET` occurred.
* @throws ucxx::Error if any other UCP error occurred.
*/
void raiseOnError();
/**
* @brief Remove reference to request from internal container.
*
* Remove the reference to a specific request from the internal container. This should
* be called when a request has completed and the `ucxx::Endpoint` does not need to keep
* track of it anymore. The raw pointer to a `ucxx::Request` is passed here as opposed
* to the usual `std::shared_ptr<ucxx::Request>` used elsewhere, this is because the
* raw pointer address is used as key to the requests reference, and this is called
* from the object's destructor.
*
* @param[in] request raw pointer to the request
*/
void removeInflightRequest(const Request* const request);
/**
* @brief Cancel inflight requests.
*
* Cancel inflight requests, returning the total number of requests that were canceled.
* This is usually executed by `close()`, when pending requests will no longer be able
* to complete.
*
* If the parent worker is running a progress thread, a maximum timeout may be specified
* for which the close operation will wait. This can be particularly important for cases
* where the progress thread might be attempting to acquire a resource (e.g., the Python
* GIL) while the current thread owns that resource. In particular for Python, the
* `~Endpoint()` will call this method for which we can't release the GIL when the garbage
* collector runs and destroys the object.
*
* @param[in] period maximum period to wait for a generic pre/post progress thread
* operation will wait for.
* @param[in] maxAttempts maximum number of attempts to close endpoint, only applicable
* if worker is running a progress thread and `period > 0`.
*
* @returns Number of requests that were canceled.
*/
size_t cancelInflightRequests(uint64_t period = 0, uint64_t maxAttempts = 1);
/**
* @brief Register a user-defined callback to call when endpoint closes.
*
* Register a user-defined callback and argument that is later called immediately after
* the endpoint closes. The callback is executed either if the endpoint closed
* successfully after completing and disconnecting from the remote endpoint, but more
* importantly when any error occurs, allowing the application to be notified immediately
* after such an event occurred.
*
* @param[in] closeCallback `std::function` to a function definition return `void` and
* receiving a single opaque pointer.
* @param[in] closeCallbackArg pointer to optional user-allocated callback argument.
*
* @returns Number of requests that were canceled.
*/
void setCloseCallback(std::function<void(void*)> closeCallback, void* closeCallbackArg);
/**
* @brief Enqueue an active message send operation.
*
* Enqueue an active message send operation, returning a `std::shared_ptr<ucxx::Request>`
* that can be later awaited and checked for errors. This is a non-blocking operation, and
* the status of the transfer must be verified from the resulting request object before
* the data can be released.
*
* Using a Python future may be requested by specifying `enablePythonFuture`. If a
* Python future is requested, the Python application must then await on this future to
* ensure the transfer has completed. Requires UCXX Python support.
*
* @param[in] buffer a raw pointer to the data to be sent.
* @param[in] length the size in bytes of the tag message to be sent.
* @param[in] memoryType the memory type of the buffer.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
* @param[in] callbackFunction user-defined callback function to call upon completion.
* @param[in] callbackData user-defined data to pass to the `callbackFunction`.
*
* @returns Request to be subsequently checked for the completion and its state.
*/
std::shared_ptr<Request> amSend(void* buffer,
size_t length,
ucs_memory_type_t memoryType,
const bool enablePythonFuture = false,
RequestCallbackUserFunction callbackFunction = nullptr,
RequestCallbackUserData callbackData = nullptr);
/**
* @brief Enqueue an active message receive operation.
*
* Enqueue an active message receive operation, returning a
* `std::shared_ptr<ucxx::Request>` that can be later awaited and checked for errors,
* making data available via the return value's `getRecvBuffer()` method once the
* operation completes successfully. This is a non-blocking operation, and the status of
* the transfer must be verified from the resulting request object before the data can be
* consumed.
*
* Using a Python future may be requested by specifying `enablePythonFuture`. If a
* Python future is requested, the Python application must then await on this future to
* ensure the transfer has completed. Requires UCXX Python support.
*
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
* @param[in] callbackFunction user-defined callback function to call upon completion.
* @param[in] callbackData user-defined data to pass to the `callbackFunction`.
*
* @returns Request to be subsequently checked for the completion state and data.
*/
std::shared_ptr<Request> amRecv(const bool enablePythonFuture = false,
RequestCallbackUserFunction callbackFunction = nullptr,
RequestCallbackUserData callbackData = nullptr);
/**
* @brief Enqueue a stream send operation.
*
* Enqueue a stream send operation, returning a `std::shared<ucxx::Request>` that can
* be later awaited and checked for errors. This is a non-blocking operation, and the
* status of the transfer must be verified from the resulting request object before the
* data can be released.
*
* Using a Python future may be requested by specifying `enablePythonFuture`. If a
* Python future is requested, the Python application must then await on this future to
* ensure the transfer has completed. Requires UCXX Python support.
*
* @param[in] buffer a raw pointer to the data to be sent.
* @param[in] length the size in bytes of the tag message to be sent.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
*
* @returns Request to be subsequently checked for the completion and its state.
*/
std::shared_ptr<Request> streamSend(void* buffer, size_t length, const bool enablePythonFuture);
/**
* @brief Enqueue a stream receive operation.
*
* Enqueue a stream receive operation, returning a `std::shared<ucxx::Request>` that can
* be later awaited and checked for errors. This is a non-blocking operation, and the
* status of the transfer must be verified from the resulting request object before the
* data can be consumed.
*
* Using a Python future may be requested by specifying `enablePythonFuture`. If a
* Python future is requested, the Python application must then await on this future to
* ensure the transfer has completed. Requires UCXX Python support.
*
* @param[in] buffer a raw pointer to pre-allocated memory where resulting
* data will be stored.
* @param[in] length the size in bytes of the tag message to be received.
* @param[in] tag the tag to match.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
*
* @returns Request to be subsequently checked for the completion and its state.
*/
std::shared_ptr<Request> streamRecv(void* buffer, size_t length, const bool enablePythonFuture);
/**
* @brief Enqueue a tag send operation.
*
* Enqueue a tag send operation, returning a `std::shared<ucxx::Request>` that can
* be later awaited and checked for errors. This is a non-blocking operation, and the
* status of the transfer must be verified from the resulting request object before the
* data can be released.
*
* Using a Python future may be requested by specifying `enablePythonFuture`. If a
* Python future is requested, the Python application must then await on this future to
* ensure the transfer has completed. Requires UCXX Python support.
*
* @param[in] buffer a raw pointer to the data to be sent.
* @param[in] length the size in bytes of the tag message to be sent.
* @param[in] tag the tag to match.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
* @param[in] callbackFunction user-defined callback function to call upon completion.
* @param[in] callbackData user-defined data to pass to the `callbackFunction`.
*
* @returns Request to be subsequently checked for the completion and its state.
*/
std::shared_ptr<Request> tagSend(void* buffer,
size_t length,
ucp_tag_t tag,
const bool enablePythonFuture = false,
RequestCallbackUserFunction callbackFunction = nullptr,
RequestCallbackUserData callbackData = nullptr);
/**
* @brief Enqueue a tag receive operation.
*
* Enqueue a tag receive operation, returning a `std::shared<ucxx::Request>` that can
* be later awaited and checked for errors. This is a non-blocking operation, and the
* status of the transfer must be verified from the resulting request object before the
* data can be consumed.
*
* Using a Python future may be requested by specifying `enablePythonFuture`. If a
* Python future is requested, the Python application must then await on this future to
* ensure the transfer has completed. Requires UCXX Python support.
*
* @param[in] buffer a raw pointer to pre-allocated memory where resulting
* data will be stored.
* @param[in] length the size in bytes of the tag message to be received.
* @param[in] tag the tag to match.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
* @param[in] callbackFunction user-defined callback function to call upon completion.
* @param[in] callbackData user-defined data to pass to the `callbackFunction`.
*
* @returns Request to be subsequently checked for the completion and its state.
*/
std::shared_ptr<Request> tagRecv(void* buffer,
size_t length,
ucp_tag_t tag,
const bool enablePythonFuture = false,
RequestCallbackUserFunction callbackFunction = nullptr,
RequestCallbackUserData callbackData = nullptr);
/**
* @brief Enqueue a multi-buffer tag send operation.
*
* Enqueue a multi-buffer tag send operation, returning a
* `std::shared<ucxx::RequestTagMulti>` that can be later awaited and checked for errors.
* This is a non-blocking operation, and the status of the transfer must be verified from
* the resulting request object before the data can be released.
*
* The primary use of multi-buffer transfers is in Python where we want to reduce the
* amount of futures needed to watch for, thus reducing Python overhead. However, this
* may be used as a convenience implementation for transfers that require multiple
* frames, internally this is implemented as one or more `tagSend` calls sending headers
* (depending on the number of frames being transferred), followed by one `tagSend` for
* each data frame.
*
* Using a Python future may be requested by specifying `enablePythonFuture`. If a
* Python future is requested, the Python application must then await on this future to
* ensure the transfer has completed. Requires UCXX Python support.
*
* @throws std::runtime_error if sizes of `buffer`, `size` and `isCUDA` do not match.
*
* @param[in] buffer a vector of raw pointers to the data frames to be sent.
* @param[in] length a vector of size in bytes of each frame to be sent.
* @param[in] isCUDA a vector of booleans (integers to prevent incoherence
* with other vector types) indicating whether frame is
* CUDA, to ensure proper memory allocation by the
* receiver.
* @param[in] tag the tag to match.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
* @param[in] callbackFunction user-defined callback function to call upon completion.
* @param[in] callbackData user-defined data to pass to the `callbackFunction`.
*
* @returns Request to be subsequently checked for the completion and its state.
*/
std::shared_ptr<Request> tagMultiSend(const std::vector<void*>& buffer,
const std::vector<size_t>& size,
const std::vector<int>& isCUDA,
const ucp_tag_t tag,
const bool enablePythonFuture);
/**
* @brief Enqueue a multi-buffer tag receive operation.
*
* Enqueue a multi-buffer tag receive operation, returning a
* `std::shared<ucxx::RequestTagMulti>` that can be later awaited and checked for errors.
* This is a non-blocking operation, and because the receiver has no a priori knowledge
* of the data being received, memory allocations are automatically handled internally.
* The receiver must have the same capabilities of the sender, so that if the sender is
* compiled with RMM support to allow for CUDA transfers, the receiver must have the
* ability to understand and allocate CUDA memory.
*
* Using a Python future may be requested by specifying `enablePythonFuture`. If a
* Python future is requested, the Python application must then await on this future to
* ensure the transfer has completed. Requires UCXX Python support.
*
* @param[in] tag the tag to match.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
*
* @returns Request to be subsequently checked for the completion and its state.
*/
std::shared_ptr<Request> tagMultiRecv(const ucp_tag_t tag, const bool enablePythonFuture);
/**
* @brief Get `ucxx::Worker` component from a worker or listener object.
*
* A `std::shared_ptr<ucxx::Endpoint>` needs to be created and registered by
* `std::shared_ptr<ucxx::Worker>`, but the endpoint may be a child of a
* `std::shared_ptr<ucxx::Listener>` object. For convenience, this method can be used to
* get the `std::shared_ptr<ucxx::Worker>` which the endpoint is associated with.
*
* @returns The `std::shared_ptr<ucxx::Worker>` which the endpoint is associated with.
*/
std::shared_ptr<Worker> getWorker();
/**
* @brief The error callback registered at endpoint creation time.
*
* When the endpoint is created with error handling support this method is registered as
* the callback to be called when the endpoint is closing, it is responsible for checking
* the closing status and update internal state accordingly. If error handling support is
* not active, this method is not registered nor called.
*
* The signature for this method must match `ucp_err_handler_cb_t`.
*/
static void errorCallback(void* arg, ucp_ep_h ep, ucs_status_t status);
/**
* @brief Close the endpoint while keeping the object alive.
*
* Close the endpoint without requiring to destroy the object. This may be useful when
* `std::shared_ptr<ucxx::Request>` objects are still alive.
*
* If the endpoint was created with error handling support, the error callback will be
* executed, implying the user-defined callback will also be executed if one was
* registered with `setCloseCallback()`.
*
* If the parent worker is running a progress thread, a maximum timeout may be specified
* for which the close operation will wait. This can be particularly important for cases
* where the progress thread might be attempting to acquire a resource (e.g., the Python
* GIL) while the current thread owns that resource. In particular for Python, the
* `~Endpoint()` will call this method for which we can't release the GIL when the garbage
* collector runs and destroys the object.
*
* @param[in] period maximum period to wait for a generic pre/post progress thread
* operation will wait for.
* @param[in] maxAttempts maximum number of attempts to close endpoint, only applicable
* if worker is running a progress thread and `period > 0`.
*
*/
void close(uint64_t period = 0, uint64_t maxAttempts = 1);
};
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/header.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <array>
#include <string>
#include <vector>
namespace ucxx {
const size_t HeaderFramesSize = 100;
class Header {
private:
/**
* @brief Deserialize header.
*
* Deserialize a fixed-size header from serialized data.
*
* @param[in] serializedHeader the header in serialized format.
*/
void deserialize(const std::string& serializedHeader);
public:
bool next; ///< Whether there is a next header
size_t nframes; ///< Number of frames
std::array<int, HeaderFramesSize> isCUDA; ///< Flag for whether each frame is CUDA or host
std::array<size_t, HeaderFramesSize> size; ///< Size in bytes of each frame
Header() = delete;
/**
* @brief Constructor of a fixed-size header.
*
* Constructor of a fixed-size header used to transmit pre-defined information about
* frames that the receiver does not need to know anything about.
*
* This constructores receives a flag `next` indicating whether the next message the
* receiver should expect is another header (in case the number of frames is larger than
* the pre-defined size), the number of frames `nframes` it contains information for,
* and pointers to `nframes` arrays of whether each frame is CUDA (`isCUDA == true`) or
* host (`isCUDA == false`) and the size `size` of each frame in bytes.
*
* @param[in] next whether the receiver should expect a next header.
* @param[in] nframes the number of frames the header contains information for (must be
* lower or equal than `HeaderFramesSize`).
* @param[in] isCUDA array with length `nframes` containing flag of whether each of the
* frames being transferred are CUDA (`true`) or host (`false`).
* @param[in] size array with length `nframes` containing the size in bytes of each
* frame.
*/
Header(bool next, size_t nframes, int* isCUDA, size_t* size);
/**
* @brief Constructor of a fixed-size header from serialized data.
*
* Reconstruct (i.e., deserialize) a fixed-size header from serialized data.
*
* @param[in] serializedHeader the header in serialized format.
*/
explicit Header(std::string serializedHeader);
/**
* @brief Get the size of the underlying data.
*
* Get the size of the underlying data, in other words, the size of a serialized
* `ucxx::Header` ready for transfer.
*
* @returns the size of the underlying data.
*/
static size_t dataSize();
/**
* @brief Get the serialized data.
*
* Get the serialized data ready for transfer.
*
* @returns the serialized data.
*/
const std::string serialize() const;
/**
* @brief Convenience method to build headers given arbitrary-sized input.
*
* Convenience method to build one or more headers given arbitrary-sized input `size` and
* `isCUDA` vectors.
*
* @param[in] isCUDA vector containing flag of whether each frame being transferred are
* CUDA (`1`) or host (`0`).
* @param[in] size vector containing the size in bytes of eachf frame.
*
* @returns A vector of one or more `ucxx::Header` objects.
*/
static std::vector<Header> buildHeaders(const std::vector<size_t>& size,
const std::vector<int>& isCUDA);
};
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/exception.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <exception>
#include <string>
namespace ucxx {
class Error : public std::exception {
private:
std::string _msg{};
public:
explicit Error(const std::string& msg) : _msg{msg} {}
const char* what() const noexcept override { return this->_msg.c_str(); }
};
/**
* UCS_ERR_NO_MESSAGE
*/
class NoMessageError : public Error {
public:
explicit NoMessageError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_NO_RESOURCE
*/
class NoResourceError : public Error {
public:
explicit NoResourceError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_IO_ERROR
*/
class IOError : public Error {
public:
explicit IOError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_NO_MEMORY
*/
class NoMemoryError : public Error {
public:
explicit NoMemoryError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_INVALID_PARAM
*/
class InvalidParamError : public Error {
public:
explicit InvalidParamError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_UNREACHABLE
*/
class UnreachableError : public Error {
public:
explicit UnreachableError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_INVALID_ADDR
*/
class InvalidAddrError : public Error {
public:
explicit InvalidAddrError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_NOT_IMPLEMENTED
*/
class NotImplementedError : public Error {
public:
explicit NotImplementedError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_MESSAGE_TRUNCATED
*/
class MessageTruncatedError : public Error {
public:
explicit MessageTruncatedError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_NO_PROGRESS
*/
class NoProgressError : public Error {
public:
explicit NoProgressError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_BUFFER_TOO_SMALL
*/
class BufferTooSmallError : public Error {
public:
explicit BufferTooSmallError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_NO_ELEM
*/
class NoElemError : public Error {
public:
explicit NoElemError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_SOME_CONNECTS_FAILED
*/
class SomeConnectsFailedError : public Error {
public:
explicit SomeConnectsFailedError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_NO_DEVICE
*/
class NoDeviceError : public Error {
public:
explicit NoDeviceError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_BUSY
*/
class BusyError : public Error {
public:
explicit BusyError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_CANCELED
*/
class CanceledError : public Error {
public:
explicit CanceledError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_SHMEM_SEGMENT
*/
class ShmemSegmentError : public Error {
public:
explicit ShmemSegmentError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_ALREADY_EXISTS
*/
class AlreadyExistsError : public Error {
public:
explicit AlreadyExistsError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_OUT_OF_RANGE
*/
class OutOfRangeError : public Error {
public:
explicit OutOfRangeError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_TIMED_OUT
*/
class TimedOutError : public Error {
public:
explicit TimedOutError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_EXCEEDS_LIMIT
*/
class ExceedsLimitError : public Error {
public:
explicit ExceedsLimitError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_UNSUPPORTED
*/
class UnsupportedError : public Error {
public:
explicit UnsupportedError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_REJECTED
*/
class RejectedError : public Error {
public:
explicit RejectedError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_NOT_CONNECTED
*/
class NotConnectedError : public Error {
public:
explicit NotConnectedError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_CONNECTION_RESET
*/
class ConnectionResetError : public Error {
public:
explicit ConnectionResetError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_FIRST_LINK_FAILURE
*/
class FirstLinkFailureError : public Error {
public:
explicit FirstLinkFailureError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_LAST_LINK_FAILURE
*/
class LastLinkFailureError : public Error {
public:
explicit LastLinkFailureError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_FIRST_ENDPOINT_FAILURE
*/
class FirstEndpointFailureError : public Error {
public:
explicit FirstEndpointFailureError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_ENDPOINT_TIMEOUT
*/
class EndpointTimeoutError : public Error {
public:
explicit EndpointTimeoutError(const std::string& msg) : Error(msg) {}
};
/**
* UCS_ERR_LAST_ENDPOINT_FAILURE
*/
class LastEndpointFailureError : public Error {
public:
explicit LastEndpointFailureError(const std::string& msg) : Error(msg) {}
};
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/context.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <ucp/api/ucp.h>
#include <ucxx/component.h>
#include <ucxx/config.h>
#include <ucxx/constructors.h>
namespace ucxx {
class Worker;
class Context : public Component {
private:
ucp_context_h _handle{nullptr}; ///< The UCP context handle
Config _config{{}}; ///< UCP context configuration variables
uint64_t _featureFlags{0}; ///< Feature flags used to construct UCP context
bool _cudaSupport{false}; ///< Whether CUDA support is enabled
/**
* @brief Private constructor of `shared_ptr<ucxx::Context>`.
*
* This is the internal implementation of `ucxx::Context` constructor, made private not
* to be called directly. Instead the user should call `ucxx::createContext()`.
*
* @param[in] ucxConfig configurations overriding `UCX_*` defaults and environment
* variables.
* @param[in] featureFlags feature flags to be used at UCP context construction time.
*/
Context(const ConfigMap ucxConfig, const uint64_t featureFlags);
public:
static constexpr uint64_t defaultFeatureFlags =
UCP_FEATURE_TAG | UCP_FEATURE_WAKEUP | UCP_FEATURE_STREAM | UCP_FEATURE_AM | UCP_FEATURE_RMA;
Context() = delete;
Context(const Context&) = delete;
Context& operator=(Context const&) = delete;
Context(Context&& o) = delete;
Context& operator=(Context&& o) = delete;
/**
* @brief Constructor of `shared_ptr<ucxx::Context>`.
*
* The constructor for a `shared_ptr<ucxx::Context>` object. The default constructor is
* made private to ensure all UCXX objects are shared pointers for correct
* lifetime management.
*
* @code{.cpp}
* auto context = ucxx::createContext({}, UCP_FEATURE_WAKEUP | UCP_FEATURE_TAG);
* @endcode
*
* @param[in] ucxConfig configurations overriding `UCX_*` defaults and environment
* variables.
* @param[in] featureFlags feature flags to be used at UCP context construction time.
* @return The `shared_ptr<ucxx::Context>` object
*/
friend std::shared_ptr<Context> createContext(ConfigMap ucxConfig, const uint64_t featureFlags);
/**
* @brief `ucxx::Context` destructor
*/
~Context();
/**
* @brief Get the context configuration.
*
* The context configuration is a `ConfigMap` containing entries of the UCX variables that were
* set upon creation of the UCP context. Only those variables known to UCP can be acquired.
*
* @code{.cpp}
* // context is `std::shared_ptr<ucxx::Context>`
* auto contextConfig = context->getConfig();
* @endcode
*
* @return A `ConfigMap` corresponding to the context's configuration.
*/
ConfigMap getConfig();
/**
* @brief Get the underlying `ucp_context_h` handle
*
* Lifetime of the `ucp_context_h` handle is managed by the `ucxx::Context`
* object and its ownership is non-transferrable. Once the `ucxx::Context`
* is destroyed the handle is not valid anymore, it is the user's
* responsibility to ensure the owner's lifetime while using the handle.
*
* @code{.cpp}
* // context is `std::shared_ptr<ucxx::Context>`
* ucp_context_h contextHandle = context->getHandle();
* @endcode
*
* @return The underlying `ucp_context_h` handle
*/
ucp_context_h getHandle();
/**
* @brief Get information from UCP context.
*
* Get information from UCP context, including memory domains, transport
* resources, and other useful information. This method is a wrapper to
* `ucp_context_print_info`.
*
* @code{.cpp}
* // context is `std::shared_ptr<ucxx::Context>`
* auto contextInfo = context->getInfo();
* @endcode
*
* @return String containing context information
*/
std::string getInfo();
/**
* @brief Get feature flags that were used to construct the UCP context.
*
* Get feature flags that were used to construct the UCP context, this has
* the same value that was specified by the user when creating the
* `ucxx::Context` object.
*
* @code{.cpp}
* // context is `std::shared_ptr<ucxx::Context>`
* uint64_t contextFeatureFlags= context->getFeatureFlags();
* @endcode
*
* @return Feature flags for this context
*/
uint64_t getFeatureFlags() const;
/**
* @brief Query whether CUDA support is available.
*
* Query whether the UCP context has CUDA support available. This is a done through a
* combination of verifying whether CUDA memory support is available and `UCX_TLS` allows
* CUDA to be enabled, essentially `UCX_TLS` must explicitly be one of the following:
*
* 1. Exactly `all`;
* 2. Contain a field starting with `cuda`;
* 3. Start with `^` (disable all listed transports) and _NOT_ contain a field named
* either `cuda` or `cuda_copy`.
*
* @return Whether CUDA support is availale.
*/
bool hasCudaSupport() const;
/**
* @brief Create a new `ucxx::Worker`.
*
* Create a new `ucxx::Worker` as a child of the current `ucxx::Context`.
* The `ucxx::Context` will retain ownership of the `ucxx::Worker` and will
* not be destroyed until all `ucxx::Worker` objects are destroyed first.
*
* @code{.cpp}
* // context is `std::shared_ptr<ucxx::Context>`
* auto worker = context->createWorker(true);
* @endcode
*
* @param[in] enableDelayedSubmission whether the worker should delay
* transfer requests to the worker thread.
* @param[in] enableFuture if `true`, notifies the future associated with each
* `ucxx::Request`, currently used only by `ucxx::python::Worker`.
* @return Shared pointer to the `ucxx::Worker` object.
*/
std::shared_ptr<Worker> createWorker(const bool enableDelayedSubmission = false,
const bool enableFuture = false);
};
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/component.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <memory>
namespace ucxx {
class Component : public std::enable_shared_from_this<Component> {
protected:
std::shared_ptr<Component> _parent{nullptr};
public:
virtual ~Component();
// Called from child's constructor
void setParent(std::shared_ptr<Component> parent);
std::shared_ptr<Component> getParent() const;
};
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/request_tag.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <memory>
#include <utility>
#include <ucp/api/ucp.h>
#include <ucxx/delayed_submission.h>
#include <ucxx/request.h>
#include <ucxx/typedefs.h>
namespace ucxx {
class RequestTag : public Request {
private:
size_t _length{0}; ///< The tag message length in bytes
/**
* @brief Private constructor of `ucxx::RequestTag`.
*
* This is the internal implementation of `ucxx::RequestTag` constructor, made private not
* to be called directly. This constructor is made private to ensure all UCXX objects
* are shared pointers and the correct lifetime management of each one.
*
* Instead the user should use one of the following:
*
* - `ucxx::Endpoint::tagRecv()`
* - `ucxx::Endpoint::tagSend()`
* - `ucxx::Worker::tagRecv()`
* - `ucxx::createRequestTag()`
*
* @throws ucxx::Error if send is `true` and `endpointOrWorker` is not a
* `std::shared_ptr<ucxx::Endpoint>`.
*
* @param[in] endpointOrWorker the parent component, which may either be a
* `std::shared_ptr<Endpoint>` or
* `std::shared_ptr<Worker>`.
* @param[in] send whether this is a send (`true`) or receive (`false`)
* tag request.
* @param[in] buffer a raw pointer to the data to be transferred.
* @param[in] length the size in bytes of the tag message to be transferred.
* @param[in] tag the tag to match.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
* @param[in] callbackFunction user-defined callback function to call upon completion.
* @param[in] callbackData user-defined data to pass to the `callbackFunction`.
*/
RequestTag(std::shared_ptr<Component> endpointOrWorker,
bool send,
void* buffer,
size_t length,
ucp_tag_t tag,
const bool enablePythonFuture = false,
RequestCallbackUserFunction callbackFunction = nullptr,
RequestCallbackUserData callbackData = nullptr);
public:
/**
* @brief Constructor for `std::shared_ptr<ucxx::RequestTag>`.
*
* The constructor for a `std::shared_ptr<ucxx::RequestTag>` object, creating a send or
* receive tag request, returning a pointer to a request object that can be later awaited
* and checked for errors. This is a non-blocking operation, and the status of the
* transfer must be verified from the resulting request object before the data can be
* released (for a send operation) or consumed (for a receive operation).
*
* @throws ucxx::Error if send is `true` and `endpointOrWorker` is not a
* `std::shared_ptr<ucxx::Endpoint>`.
*
* @param[in] endpointOrWorker the parent component, which may either be a
* `std::shared_ptr<Endpoint>` or
* `std::shared_ptr<Worker>`.
* @param[in] send whether this is a send (`true`) or receive (`false`)
* tag request.
* @param[in] buffer a raw pointer to the data to be transferred.
* @param[in] length the size in bytes of the tag message to be transferred.
* @param[in] tag the tag to match.
* @param[in] enablePythonFuture whether a python future should be created and
* subsequently notified.
* @param[in] callbackFunction user-defined callback function to call upon completion.
* @param[in] callbackData user-defined data to pass to the `callbackFunction`.
*
* @returns The `shared_ptr<ucxx::RequestTag>` object
*/
friend std::shared_ptr<RequestTag> createRequestTag(std::shared_ptr<Component> endpointOrWorker,
bool send,
void* buffer,
size_t length,
ucp_tag_t tag,
const bool enablePythonFuture,
RequestCallbackUserFunction callbackFunction,
RequestCallbackUserData callbackData);
virtual void populateDelayedSubmission();
/**
* @brief Create and submit a tag request.
*
* This is the method that should be called to actually submit a tag request. It is meant
* to be called from `populateDelayedSubmission()`, which is decided at the discretion of
* `std::shared_ptr<ucxx::Worker>`. See `populateDelayedSubmission()` for more details.
*/
void request();
/**
* @brief Callback executed by UCX when a tag send request is completed.
*
* Callback executed by UCX when a tag send request is completed, that will dispatch
* `ucxx::Request::callback()`.
*
* WARNING: This is not intended to be called by the user, but it currently needs to be
* a public method so that UCX may access it. In future changes this will be moved to
* an internal object and remove this method from the public API.
*
* @param[in] request the UCX request pointer.
* @param[in] status the completion status of the request.
* @param[in] arg the pointer to the `ucxx::Request` object that created the
* transfer, effectively `this` pointer as seen by `request()`.
*/
static void tagSendCallback(void* request, ucs_status_t status, void* arg);
/**
* @brief Callback executed by UCX when a tag receive request is completed.
*
* Callback executed by UCX when a tag receive request is completed, that will dispatch
* `ucxx::RequestTag::callback()`.
*
* WARNING: This is not intended to be called by the user, but it currently needs to be
* a public method so that UCX may access it. In future changes this will be moved to
* an internal object and remove this method from the public API.
*
* @param[in] request the UCX request pointer.
* @param[in] status the completion status of the request.
* @param[in] info information of the completed transfer provided by UCX, includes
* length of message received used to verify for truncation.
* @param[in] arg the pointer to the `ucxx::Request` object that created the
* transfer, effectively `this` pointer as seen by `request()`.
*/
static void tagRecvCallback(void* request,
ucs_status_t status,
const ucp_tag_recv_info_t* info,
void* arg);
/**
* @brief Implementation of the tag receive request callback.
*
* Implementation of the tag receive request callback. Verify whether the message was
* truncated and set that state if necessary, and finally dispatch
* `ucxx::Request::callback()`.
*
* WARNING: This is not intended to be called by the user, but it currently needs to be
* a public method so that UCX may access it. In future changes this will be moved to
* an internal object and remove this method from the public API.
*
* @param[in] request the UCX request pointer.
* @param[in] status the completion status of the request.
* @param[in] info information of the completed transfer provided by UCX, includes
* length of message received used to verify for truncation.
*/
void callback(void* request, ucs_status_t status, const ucp_tag_recv_info_t* info);
};
} // namespace ucxx
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.