repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/constructors.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <memory>
#include <string>
#include <vector>
#include <ucxx/typedefs.h>
namespace ucxx {
class Address;
class Context;
class Endpoint;
class Future;
class Listener;
class Notifier;
class Request;
class RequestAm;
class RequestStream;
class RequestTag;
class RequestTagMulti;
class Worker;
// Components
std::shared_ptr<Address> createAddressFromWorker(std::shared_ptr<ucxx::Worker> worker);
std::shared_ptr<Address> createAddressFromString(std::string addressString);
std::shared_ptr<Context> createContext(const ConfigMap ucxConfig, const uint64_t featureFlags);
std::shared_ptr<Endpoint> createEndpointFromHostname(std::shared_ptr<Worker> worker,
std::string ipAddress,
uint16_t port,
bool endpointErrorHandling);
std::shared_ptr<Endpoint> createEndpointFromConnRequest(std::shared_ptr<Listener> listener,
ucp_conn_request_h connRequest,
bool endpointErrorHandling);
std::shared_ptr<Endpoint> createEndpointFromWorkerAddress(std::shared_ptr<Worker> worker,
std::shared_ptr<Address> address,
bool endpointErrorHandling);
std::shared_ptr<Listener> createListener(std::shared_ptr<Worker> worker,
uint16_t port,
ucp_listener_conn_callback_t callback,
void* callback_args);
std::shared_ptr<Worker> createWorker(std::shared_ptr<Context> context,
const bool enableDelayedSubmission,
const bool enableFuture);
// Transfers
std::shared_ptr<RequestAm> createRequestAmSend(std::shared_ptr<Endpoint> endpoint,
void* buffer,
size_t length,
ucs_memory_type_t memoryType,
const bool enablePythonFuture,
RequestCallbackUserFunction callbackFunction,
RequestCallbackUserData callbackData);
std::shared_ptr<RequestAm> createRequestAmRecv(std::shared_ptr<Endpoint> endpoint,
const bool enablePythonFuture,
RequestCallbackUserFunction callbackFunction,
RequestCallbackUserData callbackData);
std::shared_ptr<RequestStream> createRequestStream(std::shared_ptr<Endpoint> endpoint,
bool send,
void* buffer,
size_t length,
const bool enablePythonFuture);
std::shared_ptr<RequestTag> createRequestTag(std::shared_ptr<Component> endpointOrWorker,
bool send,
void* buffer,
size_t length,
ucp_tag_t tag,
const bool enablePythonFuture,
RequestCallbackUserFunction callbackFunction,
RequestCallbackUserData callbackData);
std::shared_ptr<RequestTagMulti> createRequestTagMultiSend(std::shared_ptr<Endpoint> endpoint,
const std::vector<void*>& buffer,
const std::vector<size_t>& size,
const std::vector<int>& isCUDA,
const ucp_tag_t tag,
const bool enablePythonFuture);
std::shared_ptr<RequestTagMulti> createRequestTagMultiRecv(std::shared_ptr<Endpoint> endpoint,
const ucp_tag_t tag,
const bool enablePythonFuture);
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/delayed_submission.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <functional>
#include <memory>
#include <mutex>
#include <string>
#include <utility>
#include <vector>
#include <ucp/api/ucp.h>
#include <ucxx/log.h>
namespace ucxx {
typedef std::function<void()> DelayedSubmissionCallbackType;
class DelayedSubmission {
public:
bool _send{false}; ///< Whether this is a send (`true`) operation or recv (`false`)
void* _buffer{nullptr}; ///< Raw pointer to data buffer
size_t _length{0}; ///< Length of the message in bytes
ucp_tag_t _tag{0}; ///< Tag to match
ucs_memory_type_t _memoryType{UCS_MEMORY_TYPE_UNKNOWN}; ///< Buffer memory type
DelayedSubmission() = delete;
/**
* @brief Constructor for a delayed submission operation.
*
* Construct a delayed submission operation. Delayed submission means that a transfer
* operation will not be submitted immediately, but will rather be delayed for the next
* progress iteration.
*
* This may be useful to avoid any transfer operations to be executed directly in the
* application thread, delaying all of them for the worker progress thread when enabled.
* With this approach any perceived overhead will be removed from the application thread,
* and thus provide some speedup in certain situations. It may be also useful to prevent
* a multi-threaded application for blocking while waiting for the UCX spinlock, since
* all transfer operations may be pushed to the worker progress thread.
*
* @param[in] send whether this is a send (`true`) or receive (`false`) operation.
* @param[in] buffer a raw pointer to the data being transferred.
* @param[in] length the size in bytes of the message being transfer.
* @param[in] tag tag to match for this operation (only applies for tag
* operations).
* @param[in] memoryType the memory type of the buffer.
*/
DelayedSubmission(const bool send,
void* buffer,
const size_t length,
const ucp_tag_t tag = 0,
const ucs_memory_type_t memoryType = UCS_MEMORY_TYPE_UNKNOWN);
};
template <typename T>
class BaseDelayedSubmissionCollection {
protected:
std::string _name{"undefined"}; ///< The human-readable name of the collection, used for logging
bool _enabled{true}; ///< Whether the resource required to process the collection is enabled.
std::vector<T> _collection{}; ///< The collection.
std::mutex _mutex{}; ///< Mutex to provide access to `_collection`.
/**
* @brief Log message during `schedule()`.
*
* Log a specialized message while `schedule()` is being executed.
*
* @param[in] item the callback that was passed as argument to `schedule()`.
*/
virtual void scheduleLog(T item) = 0;
/**
* @brief Process a single item during `process()`.
*
* Method called by `process()` to process a single item of the collection.
*
* @param[in] item the callback that was passed as argument to `schedule()` when
* the first registered.
*/
virtual void processItem(T item) = 0;
public:
/**
* @brief Constructor for a thread-safe delayed submission collection.
*
* Construct a thread-safe delayed submission collection. A delayed submission collection
* provides two operations: schedule and process. The `schedule()` method will push an
* operation into the collection, whereas the `process()` will invoke all callbacks that
* were previously pushed into the collection and clear the collection.
*
* @param[in] name human-readable name of the collection, used for logging.
*/
explicit BaseDelayedSubmissionCollection(const std::string name, const bool enabled)
: _name{name}, _enabled{enabled}
{
}
BaseDelayedSubmissionCollection() = delete;
BaseDelayedSubmissionCollection(const BaseDelayedSubmissionCollection&) = delete;
BaseDelayedSubmissionCollection& operator=(BaseDelayedSubmissionCollection const&) = delete;
BaseDelayedSubmissionCollection(BaseDelayedSubmissionCollection&& o) = delete;
BaseDelayedSubmissionCollection& operator=(BaseDelayedSubmissionCollection&& o) = delete;
/**
* @brief Register a callable or complex-type for delayed submission.
*
* Register a simple callback, or complex-type with a callback (requires specialization),
* for delayed submission that will be executed when the request is in fact submitted when
* `process()` is called.
*
* Raise an exception if `false` was specified as the `enabled` argument to the constructor.
*
* @throws std::runtime_error if `_enabled` is `false`.
*
* @param[in] item the callback that will be executed by `process()` when the
* operation is submitted.
* @param[in] resourceEnabled whether the resource is enabled.
*/
virtual void schedule(T item)
{
if (!_enabled) throw std::runtime_error("Resource is disabled.");
{
std::lock_guard<std::mutex> lock(_mutex);
_collection.push_back(item);
}
scheduleLog(item);
}
/**
* @brief Process all pending callbacks.
*
* Process all pending generic. Generic callbacks are deemed completed when their
* execution completes.
*/
void process()
{
decltype(_collection) itemsToProcess;
{
std::lock_guard<std::mutex> lock(_mutex);
// Move _collection to a local copy in order to to hold the lock for as
// short as possible
itemsToProcess = std::move(_collection);
}
if (itemsToProcess.size() > 0) {
ucxx_trace_req("Submitting %lu %s callbacks", itemsToProcess.size(), _name.c_str());
for (auto& item : itemsToProcess)
processItem(item);
}
}
};
class RequestDelayedSubmissionCollection
: public BaseDelayedSubmissionCollection<
std::pair<std::shared_ptr<Request>, DelayedSubmissionCallbackType>> {
protected:
void scheduleLog(
std::pair<std::shared_ptr<Request>, DelayedSubmissionCallbackType> item) override;
void processItem(
std::pair<std::shared_ptr<Request>, DelayedSubmissionCallbackType> item) override;
public:
explicit RequestDelayedSubmissionCollection(const std::string name, const bool enabled);
};
class GenericDelayedSubmissionCollection
: public BaseDelayedSubmissionCollection<DelayedSubmissionCallbackType> {
protected:
void scheduleLog(DelayedSubmissionCallbackType item) override;
void processItem(DelayedSubmissionCallbackType callback) override;
public:
explicit GenericDelayedSubmissionCollection(const std::string name);
};
class DelayedSubmissionCollection {
private:
GenericDelayedSubmissionCollection _genericPre{
"generic pre"}; ///< The collection of all known generic pre-progress operations.
GenericDelayedSubmissionCollection _genericPost{
"generic post"}; ///< The collection of all known generic post-progress operations.
RequestDelayedSubmissionCollection _requests{
"request", false}; ///< The collection of all known delayed request submission operations.
bool _enableDelayedRequestSubmission{false};
public:
/**
* @brief Default delayed submission collection constructor.
*
* Construct an empty collection of delayed submissions. Despite its name, a delayed
* submission registration may be processed right after registration, thus effectively
* making it an immediate submission.
*
* @param[in] enableDelayedRequestSubmission whether request submission should be
* enabled, if `false`, only generic
* callbacks are enabled.
*/
explicit DelayedSubmissionCollection(bool enableDelayedRequestSubmission = false);
DelayedSubmissionCollection() = delete;
DelayedSubmissionCollection(const DelayedSubmissionCollection&) = delete;
DelayedSubmissionCollection& operator=(DelayedSubmissionCollection const&) = delete;
DelayedSubmissionCollection(DelayedSubmissionCollection&& o) = delete;
DelayedSubmissionCollection& operator=(DelayedSubmissionCollection&& o) = delete;
/**
* @brief Process pending delayed request submission and generic-pre callback operations.
*
* Process all pending delayed request submissions and generic callbacks. Generic
* callbacks are deemed completed when their execution completes. On the other hand, the
* execution of the delayed request submission callbacks does not imply completion of the
* operation, only that it has been submitted. The completion of each delayed request
* submission is handled externally by the implementation of the object being processed,
* for example by checking the result of `ucxx::Request::isCompleted()`.
*
* Generic callbacks may be used to to pass information between threads on the subject
* that requests have been in fact processed, therefore, requests are processed first,
* then generic callbacks are.
*/
void processPre();
/**
* @brief Process all pending generic-post callback operations.
*
* Process all pending generic-post callbacks. Generic callbacks are deemed completed when
* their execution completes.
*/
void processPost();
/**
* @brief Register a request for delayed submission.
*
* Register a request for delayed submission with a callback that will be executed when
* the request is in fact submitted when `processPre()` is called.
*
* @throws std::runtime_error if delayed request submission was disabled at construction.
*
* @param[in] request the request to which the callback belongs, ensuring it remains
* alive until the callback is invoked.
* @param[in] callback the callback that will be executed by `processPre()` when the
* operation is submitted.
*/
void registerRequest(std::shared_ptr<Request> request, DelayedSubmissionCallbackType callback);
/**
* @brief Register a generic callback to execute during `processPre()`.
*
* Register a generic callback that will be executed when `processPre()` is called.
* Lifetime of the callback must be ensured by the caller.
*
* @param[in] callback the callback that will be executed by `processPre()`.
*/
void registerGenericPre(DelayedSubmissionCallbackType callback);
/**
* @brief Register a generic callback to execute during `processPost()`.
*
* Register a generic callback that will be executed when `processPost()` is called.
* Lifetime of the callback must be ensured by the caller.
*
* @param[in] callback the callback that will be executed by `processPre()`.
*/
void registerGenericPost(DelayedSubmissionCallbackType callback);
/**
* @brief Inquire if delayed request submission is enabled.
*
* Check whether delayed submission request is enabled, in which case `registerRequest()`
* may be used to register requests that will be executed during `processPre()`.
*
* @returns `true` if a delayed request submission is enabled, `false` otherwise.
*/
bool isDelayedRequestSubmissionEnabled() const;
};
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/typedefs.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
namespace ucxx {
class Buffer;
class Request;
// Logging levels
typedef enum {
UCXX_LOG_LEVEL_FATAL, /* Immediate termination */
UCXX_LOG_LEVEL_ERROR, /* Error is returned to the user */
UCXX_LOG_LEVEL_WARN, /* Something's wrong, but we continue */
UCXX_LOG_LEVEL_DIAG, /* Diagnostics, silent adjustments or internal error handling */
UCXX_LOG_LEVEL_INFO, /* Information */
UCXX_LOG_LEVEL_DEBUG, /* Low-volume debugging */
UCXX_LOG_LEVEL_TRACE, /* High-volume debugging */
UCXX_LOG_LEVEL_TRACE_REQ, /* Every send/receive request */
UCXX_LOG_LEVEL_TRACE_DATA, /* Data sent/received on the transport */
UCXX_LOG_LEVEL_TRACE_ASYNC, /* Asynchronous progress engine */
UCXX_LOG_LEVEL_TRACE_FUNC, /* Function calls */
UCXX_LOG_LEVEL_TRACE_POLL, /* Polling functions */
UCXX_LOG_LEVEL_LAST,
UCXX_LOG_LEVEL_PRINT /* Temporary output */
} ucxx_log_level_t;
typedef std::unordered_map<std::string, std::string> ConfigMap;
typedef std::function<void(ucs_status_t, std::shared_ptr<void>)> RequestCallbackUserFunction;
typedef std::shared_ptr<void> RequestCallbackUserData;
typedef std::function<std::shared_ptr<Buffer>(size_t)> AmAllocatorType;
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/log.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <string>
#include <unordered_map>
#include <ucs/debug/log_def.h>
#include <ucxx/typedefs.h>
namespace ucxx {
extern ucs_log_component_config_t ucxx_log_component_config;
// Macros
#ifndef UCXX_MAX_LOG_LEVEL
#define UCXX_MAX_LOG_LEVEL ucxx::UCXX_LOG_LEVEL_LAST
#endif
#define ucxx_log_component_is_enabled(_level, _comp_log_config) \
ucs_unlikely( \
((_level) <= UCXX_MAX_LOG_LEVEL) && \
((_level) <= (ucxx::ucxx_log_level_t)( \
reinterpret_cast<ucs_log_component_config_t*>(_comp_log_config)->log_level)))
#define ucxx_log_is_enabled(_level) \
ucxx_log_component_is_enabled(_level, &ucxx::ucxx_log_component_config)
#define ucxx_log_component(_level, _comp_log_config, _fmt, ...) \
do { \
if (ucxx_log_component_is_enabled(_level, _comp_log_config)) { \
ucs_log_dispatch(__FILE__, \
__LINE__, \
__func__, \
(ucs_log_level_t)(_level), \
_comp_log_config, \
_fmt, \
##__VA_ARGS__); \
} \
} while (0)
#define ucxx_log(_level, _fmt, ...) \
do { \
ucxx_log_component(_level, &ucxx::ucxx_log_component_config, _fmt, ##__VA_ARGS__); \
} while (0)
#define ucxx_error(_fmt, ...) ucxx_log(ucxx::UCXX_LOG_LEVEL_ERROR, _fmt, ##__VA_ARGS__)
#define ucxx_warn(_fmt, ...) ucxx_log(ucxx::UCXX_LOG_LEVEL_WARN, _fmt, ##__VA_ARGS__)
#define ucxx_diag(_fmt, ...) ucxx_log(ucxx::UCXX_LOG_LEVEL_DIAG, _fmt, ##__VA_ARGS__)
#define ucxx_info(_fmt, ...) ucxx_log(ucxx::UCXX_LOG_LEVEL_INFO, _fmt, ##__VA_ARGS__)
#define ucxx_debug(_fmt, ...) ucxx_log(ucxx::UCXX_LOG_LEVEL_DEBUG, _fmt, ##__VA_ARGS__)
#define ucxx_trace(_fmt, ...) ucxx_log(ucxx::UCXX_LOG_LEVEL_TRACE, _fmt, ##__VA_ARGS__)
#define ucxx_trace_req(_fmt, ...) ucxx_log(ucxx::UCXX_LOG_LEVEL_TRACE_REQ, _fmt, ##__VA_ARGS__)
#define ucxx_trace_data(_fmt, ...) ucxx_log(ucxx::UCXX_LOG_LEVEL_TRACE_DATA, _fmt, ##__VA_ARGS__)
#define ucxx_trace_async(_fmt, ...) ucxx_log(ucxx::UCXX_LOG_LEVEL_TRACE_ASYNC, _fmt, ##__VA_ARGS__)
#define ucxx_trace_func(_fmt, ...) \
ucxx_log(ucxx::UCXX_LOG_LEVEL_TRACE_FUNC, "%s(" _fmt ")", __FUNCTION__, ##__VA_ARGS__)
#define ucxx_trace_poll(_fmt, ...) ucxx_log(ucxx::UCXX_LOG_LEVEL_TRACE_POLL, _fmt, ##__VA_ARGS__)
// Constants
const std::unordered_map<std::string, ucxx_log_level_t> logLevelNames = {
{"FATAL", UCXX_LOG_LEVEL_FATAL},
{"ERROR", UCXX_LOG_LEVEL_ERROR},
{"WARN", UCXX_LOG_LEVEL_WARN},
{"DIAG", UCXX_LOG_LEVEL_DIAG},
{"INFO", UCXX_LOG_LEVEL_INFO},
{"DEBUG", UCXX_LOG_LEVEL_DEBUG},
{"TRACE", UCXX_LOG_LEVEL_TRACE},
{"REQ", UCXX_LOG_LEVEL_TRACE_REQ},
{"DATA", UCXX_LOG_LEVEL_TRACE_DATA},
{"ASYNC", UCXX_LOG_LEVEL_TRACE_ASYNC},
{"FUNC", UCXX_LOG_LEVEL_TRACE_FUNC},
{"POLL", UCXX_LOG_LEVEL_TRACE_POLL},
{"", UCXX_LOG_LEVEL_LAST},
{"PRINT", UCXX_LOG_LEVEL_PRINT}};
const char logLevelNameDefault[] = "WARN";
const ucs_log_level_t logLevelDefault = (ucs_log_level_t)logLevelNames.at(logLevelNameDefault);
// Functions
void parseLogLevel();
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/request_helper.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <memory>
#include <vector>
#include <ucxx/request.h>
#include <ucxx/worker.h>
namespace ucxx {
void waitSingleRequest(std::shared_ptr<Worker> worker, std::shared_ptr<Request> request);
void waitRequests(std::shared_ptr<Worker> worker, std::vector<std::shared_ptr<Request>> requests);
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include | rapidsai_public_repos/ucxx/cpp/include/ucxx/buffer.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <memory>
#include <utility>
#include <ucxx/log.h>
#if UCXX_ENABLE_RMM
#include <rmm/device_buffer.hpp>
#endif
namespace ucxx {
enum class BufferType {
Host = 0,
RMM,
Invalid,
};
class Buffer {
protected:
BufferType _bufferType{BufferType::Invalid}; ///< Buffer type
size_t _size; ///< Buffer size
/**
* @brief Protected constructor of abstract type `Buffer`.
*
* This is the constructor that should be called by derived classes to store
* general information about the buffer, such as its type and size.
*
* @param[in] bufferType the type of buffer the object holds.
* @param[in] size the size of the contained buffer.
*/
Buffer(const BufferType bufferType, const size_t size);
public:
Buffer() = delete;
Buffer(const Buffer&) = delete;
Buffer& operator=(Buffer const&) = delete;
Buffer(Buffer&& o) = delete;
Buffer& operator=(Buffer&& o) = delete;
/**
* @brief Virtual destructor.
*
* Virtual destructor with empty implementation.
*/
virtual ~Buffer();
/**
* @brief Get the type of buffer the object holds.
*
* The type of buffer the object holds is important to ensure proper casting
* of the object into the correct derived type.
*
* @return the type of buffer the object holds
*/
BufferType getType() const noexcept;
/**
* @brief Get the size of the contained buffer.
*
* The size in bytes of the contained buffer.
*
* @return the size of the contained buffer.
*/
size_t getSize() const noexcept;
/**
* @brief Abstract method returning void pointer to buffer.
*
* Get the void pointer to the underlying buffer that holds the data. This
* is meant to return the actual allocation, and not a pointer to some
* container to the buffer it holds.
*
* @return the void pointer to the buffer.
*/
virtual void* data() = 0;
};
class HostBuffer : public Buffer {
private:
void* _buffer; ///< Pointer to the allocated buffer
public:
HostBuffer() = delete;
HostBuffer(const HostBuffer&) = delete;
HostBuffer& operator=(HostBuffer const&) = delete;
HostBuffer(HostBuffer&& o) = delete;
HostBuffer& operator=(HostBuffer&& o) = delete;
/**
* @brief Constructor of concrete type `HostBuffer`.
*
* Constructor to materialize a buffer holding host memory. The internal buffer
* is allocated using `malloc`, and thus should be freed with `free`.
*
* @param[in] size the size of the host buffer to allocate.
*
* @code{.cpp}
* // Allocate host buffer of 1KiB
* auto buffer = HostBuffer(1024);
* @endcode
*/
explicit HostBuffer(const size_t size);
/**
* @brief Destructor of concrete type `HostBuffer`.
*
* Frees the underlying buffer, unless the underlying buffer was released to
* the user after a call to `release`.
*/
~HostBuffer();
/**
* @brief Release the allocated host buffer to the caller.
*
* Release ownership of the buffer to the caller. After this method is called,
* the caller becomes responsible for its deallocation once it is not needed
* anymore. The buffer is allocated with `malloc`, and should be properly
* disposed of by a call to `free`.
*
* The original `HostBuffer` object becomes invalid.
*
* @code{.cpp}
* // Allocate host buffer of 1KiB
* auto buffer = HostBuffer(1024);
* void* bufferPtr = buffer.release();
*
* // do work on bufferPtr
*
* // Free buffer
* free(bufferPtr);
* @endcode
*
* @throws std::runtime_error if object has been released.
*
* @return the void pointer to the buffer.
*/
void* release();
/**
* @brief Get a pointer to the allocated raw host buffer.
*
* Get a pointer to the underlying buffer, but does not release ownership.
*
* @code{.cpp}
* // Allocate host buffer of 1KiB
* auto buffer = HostBuffer(1024);
* void* bufferPtr = buffer.data();
*
* // do work on bufferPtr
*
* // Memory is freed once `buffer` goes out-of-scope.
* @endcode
*
* @throws std::runtime_error if object has been released.
*
* @return the void pointer to the buffer.
*/
virtual void* data();
};
#if UCXX_ENABLE_RMM
class RMMBuffer : public Buffer {
private:
std::unique_ptr<rmm::device_buffer> _buffer; ///< RMM-allocated device buffer
public:
RMMBuffer() = delete;
RMMBuffer(const RMMBuffer&) = delete;
RMMBuffer& operator=(RMMBuffer const&) = delete;
RMMBuffer(RMMBuffer&& o) = delete;
RMMBuffer& operator=(RMMBuffer&& o) = delete;
/**
* @brief Constructor of concrete type `RMMBuffer`.
*
* Constructor to materialize a buffer holding device memory. The internal
* buffer holds a `std::unique_ptr<rmm::device_buffer>` and is destroyed
* when the object goes out-of-scope or is explicitly deleted.
*
* @param[in] size the size of the device buffer to allocate.
*
* @code{.cpp}
* // Allocate host buffer of 1KiB
* auto buffer = RMMBuffer(1024);
* @endcode
*/
explicit RMMBuffer(const size_t size);
/**
* @brief Release the allocated `rmm::device_buffer` to the caller.
*
* Release ownership of the `rmm::device_buffer` to the caller. After this
* method is called, the caller becomes responsible for the destruction of
* the object once it is not needed anymore. The `rmm::device_buffer` is held
* owned by the `unique_ptr` and will be deallocated once it goes out-of-scope
* or gets explicitly deleted.
*
* The original `RMMBuffer` object becomes invalid.
*
* @code{.cpp}
* // Allocate RMM buffer of 1KiB
* auto buffer = RMMBuffer(1024);
* std::unique_ptr<RMMBuffer> rmmBuffer= buffer.release();
*
* // do work on rmmBuffer
*
* // `rmm::device_buffer` is destroyed and device Memory is freed once
* // `rmmBuffer` goes out-of-scope.
* @endcode
*
* @throws std::runtime_error if object has been released.
*
* @return the void pointer to the buffer.
*/
std::unique_ptr<rmm::device_buffer> release();
/**
* @brief Get a pointer to the allocated raw device buffer.
*
* Get a pointer to the underlying buffer, but does not release ownership.
*
* @code{.cpp}
* // Allocate device buffer of 1KiB
* auto buffer = RMMBuffer(1024);
* void* bufferPtr = buffer.data();
*
* // do work on bufferPtr
*
* // `rmm::device_buffer` is destroyed and device Memory is freed once
* // `buffer` goes out-of-scope.
* @endcode
*
* @throws std::runtime_error if object has been released.
*
* @return the void pointer to the device buffer.
*/
virtual void* data();
};
#endif
std::shared_ptr<Buffer> allocateBuffer(BufferType bufferType, const size_t size);
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include/ucxx | rapidsai_public_repos/ucxx/cpp/include/ucxx/utils/ucx.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <string>
#include <ucp/api/ucp.h>
namespace ucxx {
namespace utils {
/**
* @brief Throw appropriate exception on UCS error.
*
* Throw an exception appropriately mapped from a UCS error. For status `UCS_OK` and
* `UCS_INPROGRESS` or unknown values, calling this function acts as a no-op. Additionally
* set the `msg_context` string to a human-readable error message.
*
* @param[in] status UCS status for which to check state.
* @param[out] userMessage user-defined, human-readable error message.
*
* @throw NoMessageError if `status == UCS_ERR_NO_MESSAGE`
* @throw NoResourceError if `status == UCS_ERR_NO_RESOURCE`
* @throw IOError if `status == UCS_ERR_IO_ERROR`
* @throw NoMemoryError if `status == UCS_ERR_NO_MEMORY`
* @throw InvalidParamError if `status == UCS_ERR_INVALID_PARAM`
* @throw UnreachableError if `status == UCS_ERR_UNREACHABLE`
* @throw InvalidAddrError if `status == UCS_ERR_INVALID_ADDR`
* @throw NotImplementedError if `status == UCS_ERR_NOT_IMPLEMENTED`
* @throw MessageTruncatedError if `status == UCS_ERR_MESSAGE_TRUNCATED`
* @throw NoProgressError if `status == UCS_ERR_NO_PROGRESS`
* @throw BufferTooSmallError if `status == UCS_ERR_BUFFER_TOO_SMALL`
* @throw NoElemError if `status == UCS_ERR_NO_ELEM`
* @throw SomeConnectsFailedError if `status == UCS_ERR_SOME_CONNECTS_FAILED`
* @throw NoDeviceError if `status == UCS_ERR_NO_DEVICE`
* @throw BusyError if `status == UCS_ERR_BUSY`
* @throw CanceledError if `status == UCS_ERR_CANCELED`
* @throw ShmemSegmentError if `status == UCS_ERR_SHMEM_SEGMENT`
* @throw AlreadyExistsError if `status == UCS_ERR_ALREADY_EXISTS`
* @throw OutOfRangeError if `status == UCS_ERR_OUT_OF_RANGE`
* @throw TimedOutError if `status == UCS_ERR_TIMED_OUT`
* @throw ExceedsLimitError if `status == UCS_ERR_EXCEEDS_LIMIT`
* @throw UnsupportedError if `status == UCS_ERR_UNSUPPORTED`
* @throw RejectedError if `status == UCS_ERR_REJECTED`
* @throw NotConnectedError if `status == UCS_ERR_NOT_CONNECTED`
* @throw ConnectionResetError if `status == UCS_ERR_CONNECTION_RESET`
* @throw FirstLinkFailureError if `status == UCS_ERR_FIRST_LINK_FAILURE`
* @throw LastLinkFailureError if `status == UCS_ERR_LAST_LINK_FAILURE`
* @throw FirstEndpointFailureError if `status == UCS_ERR_FIRST_ENDPOINT_FAILURE`
* @throw EndpointTimeoutError if `status == UCS_ERR_ENDPOINT_TIMEOUT`
* @throw LastEndpointFailureError if `status == UCS_ERR_LAST_ENDPOINT_FAILURE`
*/
void ucsErrorThrow(const ucs_status_t status, const std::string& userMessage = "");
} // namespace utils
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include/ucxx | rapidsai_public_repos/ucxx/cpp/include/ucxx/utils/file_descriptor.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <cstdio>
#include <string>
namespace ucxx {
namespace utils {
/**
* @brief Create a file descriptor from a temporary file.
*
* Create a file descriptor from a temporary file.
*
* @throws std::ios_base::failure if creating a temporary file fails, a common cause being
* lack of write permissions to `/tmp`.
*
* @returns The file descriptor created.
*/
FILE* createTextFileDescriptor();
/**
* @brief Decode text file descriptor.
*
* Decode a text file descriptor and return it as a string.
*
* @throws std::ios_base::failure if reading the file descriptor fails.
*
* @returns The string with a copy of the file descriptor contents.
*/
std::string decodeTextFileDescriptor(FILE* textFileDescriptor);
} // namespace utils
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include/ucxx | rapidsai_public_repos/ucxx/cpp/include/ucxx/utils/callback_notifier.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <atomic>
#include <condition_variable>
#include <mutex>
namespace ucxx {
namespace utils {
class CallbackNotifier {
private:
std::atomic_bool _flag{}; //< flag storing state
std::mutex _mutex{}; //< lock to guard accesses
std::condition_variable _conditionVariable{}; //< notification condition var
public:
/**
* @brief Construct a thread-safe notification object
*
* Construct a thread-safe notification object which can signal
* release of some shared state with `set()` while other threads
* block on `wait()` until the shared state is released.
*
* If libc is glibc and the version is older than 2.25, the
* implementation uses a spinlock otherwise it uses a condition
* variable.
*
* When C++-20 is the minimum supported version, it should use
* atomic.wait + notify_all.
*/
CallbackNotifier() : _flag{false} {};
~CallbackNotifier() = default;
CallbackNotifier(const CallbackNotifier&) = delete;
CallbackNotifier& operator=(CallbackNotifier const&) = delete;
CallbackNotifier(CallbackNotifier&& o) = delete;
CallbackNotifier& operator=(CallbackNotifier&& o) = delete;
/**
* @brief Notify waiting threads that we are done and they can proceed
*
* Set the flag to true and notify others threads blocked by a call to `wait()`.
* See also `std::condition_variable::notify_all`.
*/
void set();
/**
* @brief Wait until `set()` has been called or period has elapsed.
*
* Wait until `set()` has been called, or period (in nanoseconds) has elapsed (only
* applicable if using glibc 2.25 and higher).
*
* See also `std::condition_variable::wait`.
*
* @param[in] period maximum period in nanoseconds to wait for or `0` to wait forever.
*
* @return `true` if waiting finished or `false` if a timeout occurred.
*/
bool wait(uint64_t period = 0);
};
} // namespace utils
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include/ucxx | rapidsai_public_repos/ucxx/cpp/include/ucxx/utils/python.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <memory>
namespace ucxx {
namespace utils {
/*
* @brief Check whether Python support is available.
*
* Check that binary was built with Python support and `libucxx_python.so` is in the
* library path. The check is done by attempting to `dlopen` the library, returning whether
* both conditions are met.
*
* @returns whether Python support is available.
*/
bool isPythonAvailable();
} // namespace utils
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include/ucxx | rapidsai_public_repos/ucxx/cpp/include/ucxx/utils/sockaddr.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <memory>
#include <netdb.h>
namespace ucxx {
namespace utils {
/**
* @brief Get an addrinfo struct corresponding to an address and port.
*
* This information can later be used to bind a UCP listener or endpoint.
*
* @param[in] ip_address valid socket address (e.g., IP address or hostname) or NULL as a
* wildcard for "all" to set the socket address storage to.
* @param[in] port port to set the socket address storage to.
*
* @returns unique pointer wrapping a `struct addrinfo` (frees the addrinfo when out of scope)
*/
std::unique_ptr<struct addrinfo, void (*)(struct addrinfo*)> get_addrinfo(const char* ip_address,
uint16_t port);
/**
* @brief Get socket address and port of a socket address storage.
*
* Get the socket address (usually the IP address) and port from a socket address storage
* pointer.
*
* @param[in] sock_addr pointer to the socket address storage.
* @param[in] ip_str socket address (or IP) contained the socket address storage.
* @param[in] port_str port contained the socket address storage.
* @param[in] max_str_size size of the `ip_str` and `port_str` strings.
*/
void sockaddr_get_ip_port_str(const struct sockaddr_storage* sock_addr,
char* ip_str,
char* port_str,
size_t max_str_size);
} // namespace utils
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/include/ucxx | rapidsai_public_repos/ucxx/cpp/include/ucxx/internal/request_am.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <functional>
#include <memory>
#include <mutex>
#include <queue>
#include <string>
#include <unordered_map>
#include <ucp/api/ucp.h>
#include <ucxx/typedefs.h>
namespace ucxx {
class Buffer;
class InflightRequests;
class RequestAm;
class Request;
class Worker;
namespace internal {
class AmData;
class RecvAmMessage {
public:
internal::AmData* _amData{nullptr}; ///< Active messages data
ucp_ep_h _ep{nullptr}; ///< Handle containing address of the reply endpoint
std::shared_ptr<RequestAm> _request{
nullptr}; ///< Request which will later be notified/delivered to user
std::shared_ptr<Buffer> _buffer{nullptr}; ///< Buffer containing the received data
RecvAmMessage() = delete;
RecvAmMessage(const RecvAmMessage&) = delete;
RecvAmMessage& operator=(RecvAmMessage const&) = delete;
RecvAmMessage(RecvAmMessage&& o) = delete;
RecvAmMessage& operator=(RecvAmMessage&& o) = delete;
/**
* @brief Constructor of `ucxx::RecvAmMessage`.
*
* Construct the object, setting attributes that are later needed by the callback.
*
* @param[in] amData active messages worker data.
* @param[in] ep handle containing address of the reply endpoint (i.e., endpoint
* where user is requesting to receive).
* @param[in] request request to be later notified/delivered to user.
* @param[in] buffer buffer containing the received data
*/
RecvAmMessage(internal::AmData* amData,
ucp_ep_h ep,
std::shared_ptr<RequestAm> request,
std::shared_ptr<Buffer> buffer);
/**
* @brief Set the UCP request.
*
* Set the underlying UCP request (`_request` attribute) of the `RequestAm`.
*
* @param[in] request the UCP request associated to the active message receive operation.
*/
void setUcpRequest(void* request);
/**
* @brief Execute the `ucxx::Request::callback()`.
*
* Execute the `ucxx::Request::callback()` method to set the status of the request, the
* buffer containing the data received and release the reference to this object from
* `AmData`.
*
* @param[in] request the UCP request associated to the active message receive operation.
* @param[in] status the completion status of the UCP request.
*/
void callback(void* request, ucs_status_t status);
};
typedef std::unordered_map<ucp_ep_h, std::queue<std::shared_ptr<RequestAm>>> AmPoolType;
typedef std::unordered_map<RequestAm*, std::shared_ptr<RecvAmMessage>> RecvAmMessageMapType;
class AmData {
public:
std::weak_ptr<Worker> _worker{}; ///< The worker to which the Active Message callback belongs
std::string _ownerString{}; ///< The owner string used for logging
AmPoolType _recvPool{}; ///< The pool of completed receive requests (waiting for user request)
AmPoolType _recvWait{}; ///< The pool of user receive requests (waiting for message arrival)
RecvAmMessageMapType
_recvAmMessageMap{}; ///< The active messages waiting to be handled by callback
std::mutex _mutex{}; ///< Mutex to provide access to pools/maps
std::function<void(std::shared_ptr<Request>)>
_registerInflightRequest{}; ///< Worker function to register inflight requests with
std::unordered_map<ucs_memory_type_t, AmAllocatorType>
_allocators{}; ///< Default and user-defined active message allocators
};
} // namespace internal
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/python/CMakeLists.txt | # ======================================================================================================
# SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD 3-Clause License
# ======================================================================================================
rapids_find_package(
Python3 REQUIRED COMPONENTS Development
BUILD_EXPORT_SET ucxx-python-exports
INSTALL_EXPORT_SET ucxx-python-exports
)
set(UCXX_PYTHON_LIB Python3::Python)
# ##################################################################################################
# * python library --------------------------------------------------------------------------------
add_library(
ucxx_python
src/exception.cpp
src/future.cpp
src/notifier.cpp
src/python_future.cpp
src/worker.cpp
)
set_target_properties(
ucxx_python
PROPERTIES BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
EXPORT_NAME python
# set target compile options
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
target_compile_options(
ucxx_python PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:${UCXX_CXX_FLAGS}>"
)
# Specify include paths for the current target and dependents
target_include_directories(
ucxx_python
PUBLIC "$<BUILD_INTERFACE:${UCXX_SOURCE_DIR}/include>"
"$<BUILD_INTERFACE:${UCXX_SOURCE_DIR}/python/include>"
"$<BUILD_INTERFACE:${UCXX_GENERATED_INCLUDE_DIR}/include>"
"$<BUILD_INTERFACE:${UCXX_GENERATED_INCLUDE_DIR}/python/include>"
PRIVATE "$<BUILD_INTERFACE:${UCXX_SOURCE_DIR}/src>"
INTERFACE "$<INSTALL_INTERFACE:include>"
)
target_compile_definitions(
ucxx_python PUBLIC "$<$<COMPILE_LANGUAGE:CXX>:${UCXX_CXX_DEFINITIONS}>"
)
target_compile_definitions(ucxx_python PUBLIC UCXX_ENABLE_PYTHON)
# Define spdlog level
target_compile_definitions(ucxx_python PUBLIC "SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_${RMM_LOGGING_LEVEL}")
# Specify the target module library dependencies
target_link_libraries(
ucxx_python
PUBLIC rmm::rmm ucx::ucp ucxx::ucxx ${UCXX_PYTHON_LIB}
)
# Add Conda library, and include paths if specified
if(TARGET conda_env)
target_link_libraries(ucxx_python PRIVATE conda_env)
endif()
include(GNUInstallDirs)
add_library(ucxx::python ALIAS ucxx_python)
install(DIRECTORY ${UCXX_SOURCE_DIR}/python/include/ucxx
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
COMPONENT python
EXCLUDE_FROM_ALL
)
include("${rapids-cmake-dir}/export/find_package_root.cmake")
rapids_export_find_package_root(
BUILD Python3 [=[${CMAKE_CURRENT_LIST_DIR}]=] EXPORT_SET ucxx-python-exports
)
| 0 |
rapidsai_public_repos/ucxx/cpp/python/include/ucxx | rapidsai_public_repos/ucxx/cpp/python/include/ucxx/python/notifier.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <condition_variable>
#include <memory>
#include <mutex>
#include <utility>
#include <vector>
#include <ucxx/future.h>
#include <ucxx/notifier.h>
namespace ucxx {
namespace python {
class Notifier : public ::ucxx::Notifier {
private:
std::mutex _notifierThreadMutex{}; ///< Mutex to access thread's resources
std::vector<std::pair<std::shared_ptr<::ucxx::Future>, ucs_status_t>>
_notifierThreadFutureStatus{}; ///< Container with futures and statuses to set
bool _notifierThreadFutureStatusReady{false}; ///< Whether a future is scheduled for notification
RequestNotifierThreadState _notifierThreadFutureStatusFinished{
RequestNotifierThreadState::NotRunning}; ///< State of the notifier thread
std::condition_variable
_notifierThreadConditionVariable{}; ///< Condition variable used to wait for event
/**
* @brief Private constructor of `ucxx::python::Notifier`.
*
* This is the internal `ucxx::python::Notifier` constructor, made private not to be
* called directly. Instead the user should call `ucxx::python::createNotifier()`.
*/
Notifier() = default;
/**
* @brief Wait for a new event without a timeout.
*
* Block while waiting for an event (new future to be notified or stop signal)
* indefinitely.
*
* WARNING: Use with caution, if no event ever occurs it will be impossible to continue
* the thread.
*/
RequestNotifierWaitState waitRequestNotifierWithoutTimeout();
/**
* @brief Wait for a new event with a timeout.
*
* Block while waiting for an event (new future to be notified or stop signal) with added
* timeout to unblock after a certain period if no event has occurred.
*
* @param[in] period the time to wait for an event before unblocking.
*/
RequestNotifierWaitState waitRequestNotifierWithTimeout(uint64_t period);
public:
Notifier(const Notifier&) = delete;
Notifier& operator=(Notifier const&) = delete;
Notifier(Notifier&& o) = delete;
Notifier& operator=(Notifier&& o) = delete;
/**
* @brief Constructor of `shared_ptr<ucxx::python::Notifier>`.
*
* The constructor for a `shared_ptr<ucxx::python::Notifier>` object. The default
* constructor is made private to ensure all UCXX objects are shared pointers for correct
* lifetime management.
*
* The notifier should run on its own Python thread, but need to have the same asyncio
* event loop set as the application thread. By running a notifier on its own thread the
* application thread can be decoupled from the overhead of allowing the UCX worker to
* progress on the same thread as the application to be able to notify each future, as
* removing the requirement for the GIL at any time by the UCX backend.
*
* @returns The `shared_ptr<ucxx::python::Notifier>` object
*/
friend std::shared_ptr<::ucxx::Notifier> createNotifier();
/**
* @brief Virtual destructor.
*
* Virtual destructor with empty implementation.
*/
virtual ~Notifier();
/**
* @brief Schedule event loop notification of completed Python future.
*
* Schedule the notification of the event loop of a completed Python future, but does
* not notify the event loop yet, which is later done by `runRequestNotifier()`. Because
* this call does not notify the Python asyncio event loop, it does not require the GIL
* to execute.
*
* This is meant to be called from `ucxx::python::Future::notify()`.
*
* @param[in] future Python future to notify.
* @param[in] status the request completion status.
*/
void scheduleFutureNotify(std::shared_ptr<::ucxx::Future> future, ucs_status_t status) override;
/**
* @brief Wait for a new event with a timeout in nanoseconds.
*
* Block while waiting for an event (new future to be notified or stop signal) with added
* timeout in nanoseconds to unblock after a that period if no event has occurred. A
* period of zero means this call will never unblock until an event occurs.
*
* WARNING: Be cautious using a period of zero, if no event ever occurs it will be
* impossible to continue the thread.
*
* @param[in] period the time in nanoseconds to wait for an event before unblocking.
*/
RequestNotifierWaitState waitRequestNotifier(uint64_t period) override;
/**
* @brief Notify event loop of all pending completed Python futures.
*
* This method will notify the Python asyncio event loop of all pending completed
* futures. Notifying the event loop requires taking the Python GIL, thus it cannot run
* indefinitely but must instead run periodically. Futures that completed must first be
* scheduled with `scheduleFutureNotify()`.
*/
void runRequestNotifier() override;
/**
* @brief Make known to the notifier thread that it should stop.
*
* Often called when the application is shutting down, make known to the notifier thread
* that it should stop and exit.
*/
void stopRequestNotifierThread() override;
};
} // namespace python
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/python/include/ucxx | rapidsai_public_repos/ucxx/cpp/python/include/ucxx/python/api.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <ucxx/python/constructors.h>
#include <ucxx/python/exception.h>
#include <ucxx/python/notifier.h>
#include <ucxx/python/python_future.h>
#include <ucxx/python/worker.h>
| 0 |
rapidsai_public_repos/ucxx/cpp/python/include/ucxx | rapidsai_public_repos/ucxx/cpp/python/include/ucxx/python/future.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <Python.h>
namespace ucxx {
namespace python {
/**
* @brief Create a Python asyncio future.
*
* Create Python asyncio future, effectively equal to calling `asyncio.Future()` directly
* in Python.
*
* Note that this call will take the Python GIL and requires that the current thread have
* an asynchronous event loop set.
*
* @returns The Python asyncio future object.
*/
PyObject* create_python_future();
/**
* @brief Set the result of a Python future.
*
* Set the result of a Python future.
*
* Note that this call will take the Python GIL and requires that the current thread have
* the same asynchronous event loop set as the thread that owns the future.
*
* @param[in] future Python object containing the `_asyncio.Future` object.
* @param[in] value Python object containing an arbitrary value to set the future result
* to.
*
* @returns The result of the call to `_asyncio.Future.set_result()`.
*/
PyObject* future_set_result(PyObject* future, PyObject* value);
/**
* @brief Set the exception of a Python future.
*
* Set the exception of a Python future.
*
* Note that this call will take the Python GIL and requires that the current thread have
* the same asynchronous event loop set as the thread that owns the future.
*
* @param[in] future Python object containing the `_asyncio.Future` object.
* @param[in] exception a Python exception derived of the `Exception` class.
* @param[in] message human-readable error message for the exception.
*
* @returns The result of the call to `_asyncio.Future.set_result()`.
*/
PyObject* future_set_exception(PyObject* future, PyObject* exception, const char* message);
} // namespace python
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/python/include/ucxx | rapidsai_public_repos/ucxx/cpp/python/include/ucxx/python/worker.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <functional>
#include <memory>
#include <mutex>
#include <queue>
#include <thread>
#include <ucp/api/ucp.h>
#include <ucxx/python/future.h>
#include <ucxx/python/notifier.h>
#include <ucxx/worker.h>
namespace ucxx {
namespace python {
class Worker : public ::ucxx::Worker {
private:
/**
* @brief Private constructor of `ucxx::python::Worker`.
*
* This is the internal implementation of `ucxx::python::Worker` constructor, made
* private not to be called directly. Instead the user should call
* `ucxx::python::createWorker()`.
*
* @param[in] context the context from which to create the worker.
* @param[in] enableDelayedSubmission if `true`, each `ucxx::Request` will not be
* submitted immediately, but instead delayed to
* the progress thread. Requires use of the
* progress thread.
* @param[in] enableFuture if `true`, notifies the Python future associated with each
* `ucxx::Request`.
*/
Worker(std::shared_ptr<Context> context,
const bool enableDelayedSubmission = false,
const bool enableFuture = false);
public:
Worker() = delete;
Worker(const Worker&) = delete;
Worker& operator=(Worker const&) = delete;
Worker(Worker&& o) = delete;
Worker& operator=(Worker&& o) = delete;
/**
* @brief Constructor of `shared_ptr<ucxx::python::Worker>`.
*
* The constructor for a `shared_ptr<ucxx::python::Worker>` object. The default
* constructor is made private to ensure all UCXX objects are shared pointers for correct
* lifetime management.
*
* @code{.cpp}
* // context is `std::shared_ptr<ucxx::Context>`
* auto worker = ucxx::createWorker(context, false, false);
* @endcode
*
* @param[in] context the context from which to create the worker.
* @param[in] enableDelayedSubmission if `true`, each `ucxx::Request` will not be
* submitted immediately, but instead delayed to
* the progress thread. Requires use of the
* progress thread.
* @param[in] enableFuture if `true`, notifies the Python future associated with each
* `ucxx::Request`.
* @returns The `shared_ptr<ucxx::python::Worker>` object
*/
friend std::shared_ptr<::ucxx::Worker> createWorker(std::shared_ptr<Context> context,
const bool enableDelayedSubmission,
const bool enableFuture);
/**
* @brief Populate the Python future pool.
*
* To avoid taking the Python GIL for every new future required by each `ucxx::Request`,
* the `ucxx::python::Worker` maintains a pool of futures that can be acquired when a new
* `ucxx::Request` is created. Currently the pool has a maximum size of 100 objects, and
* will refill once it goes under 50, otherwise calling this functions results in a no-op.
*/
void populateFuturesPool() override;
/**
* @brief Get a Python future from the pool.
*
* Get a Python future from the pool. If the pool is empty,
* `ucxx::python::Worker::populateFuturesPool()` is called and a warning is raised, since
* that likely means the user is missing to call the aforementioned method regularly.
*
* @returns The `shared_ptr<ucxx::python::Future>` object
*/
std::shared_ptr<::ucxx::Future> getFuture() override;
/**
* @brief Block until a request event.
*
* Blocks until some communication is completed and a Python future is ready to be
* notified, shutdown was initiated or a timeout occurred (only if `periodNs > 0`).
* This method is intended for use from the Python notifier thread, where that
* thread will block until one of the aforementioned events occur.
*
* @returns `RequestNotifierWaitState::Ready` if some communication completed,
* `RequestNotifierWaitStats::Timeout` if a timeout occurred, or
* `RequestNotifierWaitStats::Shutdown` if shutdown has initiated.
*/
RequestNotifierWaitState waitRequestNotifier(uint64_t periodNs) override;
/**
* @brief Notify Python futures of each completed communication request.
*
* Notifies Python futures of each completed communication request of their new status.
* This method is intended to be used from the Python notifier thread, where the thread
* will call `waitRequestNotifier()` and block until some communication is completed, and
* then call this method to notify all futures. If this is notifying a Python future, the
* thread where this method is called from must be using the same Python event loop as
* the thread that submitted the transfer request.
*/
void runRequestNotifier() override;
/**
* @brief Signal the notifier to terminate.
*
* Signals the notifier to terminate, awakening the `waitRequestNotifier()` blocking call.
*/
void stopRequestNotifierThread() override;
};
} // namespace python
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/python/include/ucxx | rapidsai_public_repos/ucxx/cpp/python/include/ucxx/python/python_future.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <functional>
#include <memory>
#include <Python.h>
#include <ucp/api/ucp.h>
#include <ucxx/future.h>
#include <ucxx/notifier.h>
#include <ucxx/python/future.h>
namespace ucxx {
namespace python {
class Future : public ::ucxx::Future {
private:
PyObject* _handle{create_python_future()}; ///< The handle to the Python future
/**
* @brief Construct a future that may be notified from a notifier thread.
*
* Construct a future that may be notified from a notifier running on its own thread and
* thus will decrease overhead from the application thread.
*
* This class may also be used to set the result or exception from any thread, but that
* currently requires explicitly taking the GIL before calling `set()`.
*
* @param[in] notifier notifier object running on a separate thread.
*/
explicit Future(std::shared_ptr<::ucxx::Notifier> notifier);
public:
Future() = delete;
Future(const Future&) = delete;
Future& operator=(Future const&) = delete;
Future(Future&& o) = delete;
Future& operator=(Future&& o) = delete;
/**
* @brief Constructor of `shared_ptr<ucxx::python::Future>`.
*
* The constructor for a `shared_ptr<ucxx::python::Future>` object. The default
* constructor is made private to ensure all UCXX objects are shared pointers and correct
* lifetime management.
*
* @param[in] notifier notifier object running on a separate thread.
*
* @returns The `shared_ptr<ucxx::python::Worker>` object
*/
friend std::shared_ptr<::ucxx::Future> createFuture(std::shared_ptr<::ucxx::Notifier> notifier);
/**
* @brief Virtual destructor.
*
* Virtual destructor with empty implementation.
*/
virtual ~Future();
/**
* @brief Inform the notifier thread that the future has completed.
*
* Inform the notifier thread that the future has completed so it can notify the event
* loop of that occurrence.
*
* @throws std::runtime_error if the object is invalid or has been already released.
*
* @param[in] status request completion status.
*/
void notify(ucs_status_t status);
/**
* @brief Set the future completion status.
*
* Set the future status as completed, either with a successful completion or error.
*
* @throws std::runtime_error if the object is invalid or has been already released.
*
* @param[in] status request completion status.
*/
void set(ucs_status_t status);
/**
* @brief Get the underlying `PyObject*` handle but does not release ownership.
*
* Get the underlying `PyObject*` handle without releasing ownership. This can be useful
* for example for logging, where we want to see the address of the pointer but do not
* want to transfer ownership.
*
* @warning The destructor will also destroy the Python future, a pointer taken via this
* method will cause the object to become invalid.
*
* @throws std::runtime_error if the object is invalid or has been already released.
*
* @returns The underlying `PyObject*` handle.
*/
void* getHandle();
/**
* @brief Get the underlying `PyObject*` handle and release ownership.
*
* Get the underlying `PyObject*` handle releasing ownership. This should be used when
* the future needs to be permanently transferred to Python code. After calling this
* method the object becomes invalid for any other uses.
*
* @throws std::runtime_error if the object is invalid or has been already released.
*
* @returns The underlying `PyObject*` handle.
*/
void* release();
};
} // namespace python
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/python/include/ucxx | rapidsai_public_repos/ucxx/cpp/python/include/ucxx/python/exception.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <Python.h>
#include <ucp/api/ucp.h>
namespace ucxx {
namespace python {
extern PyObject* UCXXError;
extern PyObject* UCXXNoMessageError;
extern PyObject* UCXXNoResourceError;
extern PyObject* UCXXIOError;
extern PyObject* UCXXNoMemoryError;
extern PyObject* UCXXInvalidParamError;
extern PyObject* UCXXUnreachableError;
extern PyObject* UCXXInvalidAddrError;
extern PyObject* UCXXNotImplementedError;
extern PyObject* UCXXMessageTruncatedError;
extern PyObject* UCXXNoProgressError;
extern PyObject* UCXXBufferTooSmallError;
extern PyObject* UCXXNoElemError;
extern PyObject* UCXXSomeConnectsFailedError;
extern PyObject* UCXXNoDeviceError;
extern PyObject* UCXXBusyError;
extern PyObject* UCXXCanceledError;
extern PyObject* UCXXShmemSegmentError;
extern PyObject* UCXXAlreadyExistsError;
extern PyObject* UCXXOutOfRangeError;
extern PyObject* UCXXTimedOutError;
extern PyObject* UCXXExceedsLimitError;
extern PyObject* UCXXUnsupportedError;
extern PyObject* UCXXRejectedError;
extern PyObject* UCXXNotConnectedError;
extern PyObject* UCXXConnectionResetError;
extern PyObject* UCXXFirstLinkFailureError;
extern PyObject* UCXXLastLinkFailureError;
extern PyObject* UCXXFirstEndpointFailureError;
extern PyObject* UCXXEndpointTimeoutError;
extern PyObject* UCXXLastEndpointFailureError;
extern PyObject* UCXXCloseError;
extern PyObject* UCXXConfigError;
/**
* @brief Create Python exceptions.
*
* Create UCXX-specific Python exceptions such that they are visible both from C/C++ and
* Python.
*/
void create_exceptions();
/**
* @brief Raise a C++ exception in Python.
*
* Raise a C++ exception in Python. When a C++ exception occurs, Python needs to be able
* to be informed of such event and be able to raise a Python exception from it. This
* function raises both general C++ exceptions, as well as UCXX-specific exceptions.
*
* To use this, C++ methods and functions that are exposed to Python via Cython must have
* a `except +raise_py_error` as a declaration suffix.
*/
void raise_py_error();
/**
* @brief Get a Python exception from UCS status.
*
* Given a UCS status, get a matching Python exception object.
*
* @param[in] status UCS status from which to get exception.
*/
PyObject* get_python_exception_from_ucs_status(ucs_status_t status);
} // namespace python
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/python/include/ucxx | rapidsai_public_repos/ucxx/cpp/python/include/ucxx/python/constructors.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <memory>
#include <ucxx/buffer.h>
namespace ucxx {
class Context;
class Future;
class Notifier;
class Worker;
namespace python {
std::shared_ptr<::ucxx::Future> createFuture(std::shared_ptr<::ucxx::Notifier> notifier);
std::shared_ptr<::ucxx::Notifier> createNotifier();
std::shared_ptr<::ucxx::Worker> createWorker(std::shared_ptr<ucxx::Context> context,
const bool enableDelayedSubmission,
const bool enableFuture);
} // namespace python
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/python | rapidsai_public_repos/ucxx/cpp/python/src/exception.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <ios>
#include <stdexcept>
#include <ucxx/exception.h>
#include <ucxx/python/exception.h>
namespace ucxx {
namespace python {
PyObject* UCXXError;
PyObject* UCXXNoMessageError;
PyObject* UCXXNoResourceError;
PyObject* UCXXIOError;
PyObject* UCXXNoMemoryError;
PyObject* UCXXInvalidParamError;
PyObject* UCXXUnreachableError;
PyObject* UCXXInvalidAddrError;
PyObject* UCXXNotImplementedError;
PyObject* UCXXMessageTruncatedError;
PyObject* UCXXNoProgressError;
PyObject* UCXXBufferTooSmallError;
PyObject* UCXXNoElemError;
PyObject* UCXXSomeConnectsFailedError;
PyObject* UCXXNoDeviceError;
PyObject* UCXXBusyError;
PyObject* UCXXCanceledError;
PyObject* UCXXShmemSegmentError;
PyObject* UCXXAlreadyExistsError;
PyObject* UCXXOutOfRangeError;
PyObject* UCXXTimedOutError;
PyObject* UCXXExceedsLimitError;
PyObject* UCXXUnsupportedError;
PyObject* UCXXRejectedError;
PyObject* UCXXNotConnectedError;
PyObject* UCXXConnectionResetError;
PyObject* UCXXFirstLinkFailureError;
PyObject* UCXXLastLinkFailureError;
PyObject* UCXXFirstEndpointFailureError;
PyObject* UCXXEndpointTimeoutError;
PyObject* UCXXLastEndpointFailureError;
PyObject* UCXXCloseError;
PyObject* UCXXConfigError;
static PyObject* new_exception(PyObject** exception, const char* name, PyObject* base)
{
constexpr size_t max_len = 255;
char qualified_name[max_len] = {0};
if (*exception == NULL) {
snprintf(qualified_name, max_len, "ucxx.%s", name);
*exception = PyErr_NewException(qualified_name, base, NULL);
}
return *exception;
}
void create_exceptions()
{
new_exception(&UCXXError, "UCXXError", NULL);
new_exception(&UCXXNoMessageError, "UCXXNoMessageError", UCXXError);
new_exception(&UCXXNoResourceError, "UCXXNoResourceError", UCXXError);
new_exception(&UCXXIOError, "UCXXIOError", UCXXError);
new_exception(&UCXXNoMemoryError, "UCXXNoMemoryError", UCXXError);
new_exception(&UCXXInvalidParamError, "UCXXInvalidParamError", UCXXError);
new_exception(&UCXXUnreachableError, "UCXXUnreachableError", UCXXError);
new_exception(&UCXXInvalidAddrError, "UCXXInvalidAddrError", UCXXError);
new_exception(&UCXXNotImplementedError, "UCXXNotImplementedError", UCXXError);
new_exception(&UCXXMessageTruncatedError, "UCXXMessageTruncatedError", UCXXError);
new_exception(&UCXXNoProgressError, "UCXXNoProgressError", UCXXError);
new_exception(&UCXXBufferTooSmallError, "UCXXBufferTooSmallError", UCXXError);
new_exception(&UCXXNoElemError, "UCXXNoElemError", UCXXError);
new_exception(&UCXXSomeConnectsFailedError, "UCXXSomeConnectsFailedError", UCXXError);
new_exception(&UCXXNoDeviceError, "UCXXNoDeviceError", UCXXError);
new_exception(&UCXXBusyError, "UCXXBusyError", UCXXError);
new_exception(&UCXXCanceledError, "UCXXCanceledError", UCXXError);
new_exception(&UCXXShmemSegmentError, "UCXXShmemSegmentError", UCXXError);
new_exception(&UCXXAlreadyExistsError, "UCXXAlreadyExistsError", UCXXError);
new_exception(&UCXXOutOfRangeError, "UCXXOutOfRangeError", UCXXError);
new_exception(&UCXXTimedOutError, "UCXXTimedOutError", UCXXError);
new_exception(&UCXXExceedsLimitError, "UCXXExceedsLimitError", UCXXError);
new_exception(&UCXXUnsupportedError, "UCXXUnsupportedError", UCXXError);
new_exception(&UCXXRejectedError, "UCXXRejectedError", UCXXError);
new_exception(&UCXXNotConnectedError, "UCXXNotConnectedError", UCXXError);
new_exception(&UCXXConnectionResetError, "UCXXConnectionResetError", UCXXError);
new_exception(&UCXXFirstLinkFailureError, "UCXXFirstLinkFailureError", UCXXError);
new_exception(&UCXXLastLinkFailureError, "UCXXLastLinkFailureError", UCXXError);
new_exception(&UCXXFirstEndpointFailureError, "UCXXFirstEndpointFailureError", UCXXError);
new_exception(&UCXXEndpointTimeoutError, "UCXXEndpointTimeoutError", UCXXError);
new_exception(&UCXXLastEndpointFailureError, "UCXXLastEndpointFailureError", UCXXError);
new_exception(&UCXXConfigError, "UCXXConfigError", UCXXError);
new_exception(&UCXXCloseError, "UCXXCloseError", UCXXError);
}
void raise_py_error()
{
try {
throw;
} catch (const NoMessageError& e) {
PyErr_SetString(UCXXNoMessageError, e.what());
} catch (const NoResourceError& e) {
PyErr_SetString(UCXXNoResourceError, e.what());
} catch (const IOError& e) {
PyErr_SetString(UCXXIOError, e.what());
} catch (const NoMemoryError& e) {
PyErr_SetString(UCXXNoMemoryError, e.what());
} catch (const InvalidParamError& e) {
PyErr_SetString(UCXXInvalidParamError, e.what());
} catch (const UnreachableError& e) {
PyErr_SetString(UCXXUnreachableError, e.what());
} catch (const InvalidAddrError& e) {
PyErr_SetString(UCXXInvalidAddrError, e.what());
} catch (const NotImplementedError& e) {
PyErr_SetString(UCXXNotImplementedError, e.what());
} catch (const MessageTruncatedError& e) {
PyErr_SetString(UCXXMessageTruncatedError, e.what());
} catch (const NoProgressError& e) {
PyErr_SetString(UCXXNoProgressError, e.what());
} catch (const BufferTooSmallError& e) {
PyErr_SetString(UCXXBufferTooSmallError, e.what());
} catch (const NoElemError& e) {
PyErr_SetString(UCXXNoElemError, e.what());
} catch (const SomeConnectsFailedError& e) {
PyErr_SetString(UCXXSomeConnectsFailedError, e.what());
} catch (const NoDeviceError& e) {
PyErr_SetString(UCXXNoDeviceError, e.what());
} catch (const BusyError& e) {
PyErr_SetString(UCXXBusyError, e.what());
} catch (const CanceledError& e) {
PyErr_SetString(UCXXCanceledError, e.what());
} catch (const ShmemSegmentError& e) {
PyErr_SetString(UCXXShmemSegmentError, e.what());
} catch (const AlreadyExistsError& e) {
PyErr_SetString(UCXXAlreadyExistsError, e.what());
} catch (const OutOfRangeError& e) {
PyErr_SetString(UCXXOutOfRangeError, e.what());
} catch (const TimedOutError& e) {
PyErr_SetString(UCXXTimedOutError, e.what());
} catch (const ExceedsLimitError& e) {
PyErr_SetString(UCXXExceedsLimitError, e.what());
} catch (const UnsupportedError& e) {
PyErr_SetString(UCXXUnsupportedError, e.what());
} catch (const RejectedError& e) {
PyErr_SetString(UCXXRejectedError, e.what());
} catch (const NotConnectedError& e) {
PyErr_SetString(UCXXNotConnectedError, e.what());
} catch (const ConnectionResetError& e) {
PyErr_SetString(UCXXConnectionResetError, e.what());
} catch (const FirstLinkFailureError& e) {
PyErr_SetString(UCXXFirstLinkFailureError, e.what());
} catch (const LastLinkFailureError& e) {
PyErr_SetString(UCXXLastLinkFailureError, e.what());
} catch (const FirstEndpointFailureError& e) {
PyErr_SetString(UCXXFirstEndpointFailureError, e.what());
} catch (const EndpointTimeoutError& e) {
PyErr_SetString(UCXXEndpointTimeoutError, e.what());
} catch (const LastEndpointFailureError& e) {
PyErr_SetString(UCXXLastEndpointFailureError, e.what());
} catch (const Error& e) {
PyErr_SetString(UCXXError, e.what());
} catch (const std::bad_alloc& e) {
PyErr_SetString(PyExc_MemoryError, e.what());
} catch (const std::bad_cast& e) {
PyErr_SetString(PyExc_TypeError, e.what());
} catch (const std::bad_typeid& e) {
PyErr_SetString(PyExc_TypeError, e.what());
} catch (const std::domain_error& e) {
PyErr_SetString(PyExc_ValueError, e.what());
} catch (const std::invalid_argument& e) {
PyErr_SetString(PyExc_ValueError, e.what());
} catch (const std::ios_base::failure& e) {
PyErr_SetString(PyExc_IOError, e.what());
} catch (const std::out_of_range& e) {
PyErr_SetString(PyExc_IndexError, e.what());
} catch (const std::overflow_error& e) {
PyErr_SetString(PyExc_OverflowError, e.what());
} catch (const std::range_error& e) {
PyErr_SetString(PyExc_ArithmeticError, e.what());
} catch (const std::underflow_error& e) {
PyErr_SetString(PyExc_ArithmeticError, e.what());
} catch (const std::exception& e) {
PyErr_SetString(PyExc_RuntimeError, e.what());
} catch (...) {
PyErr_SetString(PyExc_RuntimeError, "Unknown exception");
}
}
PyObject* get_python_exception_from_ucs_status(ucs_status_t status)
{
switch (status) {
case UCS_ERR_CANCELED: return UCXXCanceledError;
case UCS_ERR_CONNECTION_RESET: return UCXXConnectionResetError;
case UCS_ERR_MESSAGE_TRUNCATED: return UCXXMessageTruncatedError;
default: return UCXXError;
}
}
} // namespace python
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/python | rapidsai_public_repos/ucxx/cpp/python/src/python_future.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <memory>
#include <stdexcept>
#include <Python.h>
#include <ucp/api/ucp.h>
#include <ucxx/log.h>
#include <ucxx/python/exception.h>
#include <ucxx/python/python_future.h>
namespace ucxx {
namespace python {
Future::Future(std::shared_ptr<::ucxx::Notifier> notifier) : ::ucxx::Future(notifier) {}
std::shared_ptr<::ucxx::Future> createFuture(std::shared_ptr<::ucxx::Notifier> notifier)
{
return std::shared_ptr<::ucxx::Future>(new ::ucxx::python::Future(notifier));
}
Future::~Future()
{
// TODO: check it is truly safe to require the GIL here. Segfaults can occur
// if `Py_XDECREF` is called but the thread doesn't currently own the GIL.
PyGILState_STATE state = PyGILState_Ensure();
Py_XDECREF(_handle);
PyGILState_Release(state);
}
void Future::set(ucs_status_t status)
{
if (_handle == nullptr) throw std::runtime_error("Invalid object or already released");
ucxx_trace_req(
"Future::set() this: %p, _handle: %p, status: %s", this, _handle, ucs_status_string(status));
if (status == UCS_OK)
future_set_result(_handle, Py_True);
else
future_set_exception(
_handle, get_python_exception_from_ucs_status(status), ucs_status_string(status));
}
void Future::notify(ucs_status_t status)
{
if (_handle == nullptr) throw std::runtime_error("Invalid object or already released");
auto s = shared_from_this();
ucxx_trace_req("Future::notify() this: %p, shared.get(): %p, handle: %p, notifier: %p",
this,
s.get(),
_handle,
_notifier.get());
_notifier->scheduleFutureNotify(shared_from_this(), status);
}
void* Future::getHandle()
{
if (_handle == nullptr) throw std::runtime_error("Invalid object or already released");
return _handle;
}
void* Future::release()
{
if (_handle == nullptr) throw std::runtime_error("Invalid object or already released");
return std::exchange(_handle, nullptr);
}
} // namespace python
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/python | rapidsai_public_repos/ucxx/cpp/python/src/future.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <ucxx/log.h>
#include <Python.h>
namespace ucxx {
namespace python {
PyObject* asyncio_str = NULL;
PyObject* future_str = NULL;
PyObject* asyncio_future_object = NULL;
static int intern_strings(void)
{
asyncio_str = PyUnicode_InternFromString("asyncio");
if (asyncio_str == NULL) { return -1; }
future_str = PyUnicode_InternFromString("Future");
if (future_str == NULL) { return -1; }
return 0;
}
static int init_ucxx_python()
{
if (intern_strings() < 0) goto err;
return 0;
err:
if (!PyErr_Occurred()) PyErr_SetString(PyExc_RuntimeError, "could not initialize Python C-API.");
return -1;
}
static PyObject* get_asyncio_future_object()
{
PyObject* asyncio_module = NULL;
if (asyncio_future_object) return asyncio_future_object;
PyGILState_STATE state = PyGILState_Ensure();
if (init_ucxx_python() < 0) {
if (!PyErr_Occurred()) PyErr_SetString(PyExc_RuntimeError, "could not allocate internals.");
goto finish;
}
asyncio_module = PyImport_Import(asyncio_str);
if (PyErr_Occurred()) ucxx_trace_req("Python error here");
if (PyErr_Occurred()) PyErr_Print();
if (asyncio_module == NULL) goto finish;
asyncio_future_object = PyObject_GetAttr(asyncio_module, future_str);
if (PyErr_Occurred()) ucxx_trace_req("Python error here");
if (PyErr_Occurred()) PyErr_Print();
Py_DECREF(asyncio_module);
if (asyncio_future_object == NULL) { goto finish; }
finish:
PyGILState_Release(state);
return asyncio_future_object;
}
PyObject* create_python_future()
{
PyObject* future_object = NULL;
PyObject* result = NULL;
PyGILState_STATE state = PyGILState_Ensure();
if (init_ucxx_python() < 0) {
if (!PyErr_Occurred()) PyErr_SetString(PyExc_RuntimeError, "could not allocate internals.");
goto finish;
}
future_object = get_asyncio_future_object();
if (future_object == NULL) { goto finish; }
if (!PyCallable_Check(future_object)) {
PyErr_Format(PyExc_RuntimeError,
"%s.%s is not callable.",
PyUnicode_1BYTE_DATA(asyncio_str),
PyUnicode_1BYTE_DATA(future_str));
goto finish;
}
result = PyObject_CallFunctionObjArgs(future_object, NULL);
if (PyErr_Occurred()) ucxx_trace_req("Python error here");
if (PyErr_Occurred()) PyErr_Print();
finish:
PyGILState_Release(state);
return result;
}
static PyCFunction get_future_method(const char* method_name)
{
PyCFunction result = NULL;
PyGILState_STATE state = PyGILState_Ensure();
PyObject* future_object = get_asyncio_future_object();
if (PyErr_Occurred()) ucxx_trace_req("Python error here");
if (PyErr_Occurred()) PyErr_Print();
PyMethodDef* m = reinterpret_cast<PyTypeObject*>(future_object)->tp_methods;
for (; m != NULL; ++m) {
if (m->ml_name && !strcmp(m->ml_name, method_name)) {
result = m->ml_meth;
break;
}
}
if (!result)
PyErr_Format(PyExc_RuntimeError, "Unable to load function pointer for `Future.set_result`.");
PyGILState_Release(state);
return result;
}
PyObject* future_set_result(PyObject* future, PyObject* value)
{
PyObject* result = NULL;
PyGILState_STATE state = PyGILState_Ensure();
PyCFunction f = get_future_method("set_result");
result = f(future, value);
if (PyErr_Occurred()) ucxx_trace_req("Python error here");
if (PyErr_Occurred()) PyErr_Print();
PyGILState_Release(state);
return result;
}
PyObject* future_set_exception(PyObject* future, PyObject* exception, const char* message)
{
PyObject* result = NULL;
PyObject* message_object = NULL;
PyObject* message_tuple = NULL;
PyObject* formed_exception = NULL;
PyCFunction f = NULL;
PyGILState_STATE state = PyGILState_Ensure();
message_object = PyUnicode_FromString(message);
if (message_object == NULL) goto err;
message_tuple = PyTuple_Pack(1, message_object);
if (message_tuple == NULL) goto err;
formed_exception = PyObject_Call(exception, message_tuple, NULL);
if (formed_exception == NULL) goto err;
f = get_future_method("set_exception");
result = f(future, formed_exception);
goto finish;
err:
PyErr_Format(PyExc_RuntimeError, "Error while setting exception for `asyncio.Future`.");
finish:
Py_XDECREF(message_object);
Py_XDECREF(message_tuple);
Py_XDECREF(formed_exception);
PyGILState_Release(state);
return result;
}
} // namespace python
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/python | rapidsai_public_repos/ucxx/cpp/python/src/worker.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <functional>
#include <ios>
#include <memory>
#include <mutex>
#include <sstream>
#include <Python.h>
#include <ucxx/internal/request_am.h>
#include <ucxx/python/constructors.h>
#include <ucxx/python/future.h>
#include <ucxx/python/worker.h>
#include <ucxx/request_tag.h>
#include <ucxx/utils/python.h>
namespace ucxx {
namespace python {
Worker::Worker(std::shared_ptr<Context> context,
const bool enableDelayedSubmission,
const bool enableFuture)
: ::ucxx::Worker(context, enableDelayedSubmission, enableFuture)
{
if (_enableFuture) _notifier = createNotifier();
}
std::shared_ptr<::ucxx::Worker> createWorker(std::shared_ptr<Context> context,
const bool enableDelayedSubmission,
const bool enableFuture)
{
auto worker = std::shared_ptr<::ucxx::python::Worker>(
new ::ucxx::python::Worker(context, enableDelayedSubmission, enableFuture));
// We can only get a `shared_ptr<Worker>` for the Active Messages callback after it's
// been created, thus this cannot be in the constructor.
if (worker->_amData != nullptr) {
worker->_amData->_worker = worker;
std::stringstream ownerStream;
ownerStream << "worker " << worker->getHandle();
worker->_amData->_ownerString = ownerStream.str();
}
return worker;
}
void Worker::populateFuturesPool()
{
if (_enableFuture) {
ucxx_trace_req("populateFuturesPool: %p %p", this, shared_from_this().get());
// If the pool goes under half expected size, fill it up again.
if (_futuresPool.size() < 50) {
std::lock_guard<std::mutex> lock(_futuresPoolMutex);
PyGILState_STATE state = PyGILState_Ensure();
while (_futuresPool.size() < 100)
_futuresPool.emplace(createFuture(_notifier));
PyGILState_Release(state);
}
} else {
throw std::runtime_error(
"Worker future support disabled, please set enableFuture=true when creating the "
"Worker to use this method.");
}
}
std::shared_ptr<::ucxx::Future> Worker::getFuture()
{
if (_enableFuture) {
if (_futuresPool.size() == 0) {
ucxx_warn(
"No Futures available during getFuture(), make sure the Notifier is running "
"running and calling populateFuturesPool() periodically. Filling futures pool "
"now, but this may be inefficient.");
populateFuturesPool();
}
std::shared_ptr<::ucxx::Future> ret{nullptr};
{
std::lock_guard<std::mutex> lock(_futuresPoolMutex);
ret = _futuresPool.front();
_futuresPool.pop();
}
ucxx_trace_req("getFuture: %p %p", ret.get(), ret->getHandle());
return std::dynamic_pointer_cast<::ucxx::Future>(ret);
} else {
throw std::runtime_error(
"Worker future support disabled, please set enableFuture=true when creating the "
"Worker to use this method.");
return nullptr;
}
}
RequestNotifierWaitState Worker::waitRequestNotifier(uint64_t periodNs)
{
if (_enableFuture) {
return _notifier->waitRequestNotifier(periodNs);
} else {
throw std::runtime_error(
"Worker future support disabled, please set enableFuture=true when creating the "
"Worker to use this method.");
}
}
void Worker::runRequestNotifier()
{
if (_enableFuture) {
_notifier->runRequestNotifier();
} else {
throw std::runtime_error(
"Worker future support disabled, please set enableFuture=true when creating the "
"Worker to use this method.");
}
}
void Worker::stopRequestNotifierThread()
{
if (_enableFuture) {
_notifier->stopRequestNotifierThread();
} else {
throw std::runtime_error(
"Worker future support disabled, please set enableFuture=true when creating the "
"Worker to use this method.");
}
}
} // namespace python
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/python | rapidsai_public_repos/ucxx/cpp/python/src/notifier.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <memory>
#include <mutex>
#include <utility>
#include <ucxx/log.h>
#include <ucxx/python/notifier.h>
#include <ucxx/python/python_future.h>
namespace ucxx {
namespace python {
std::shared_ptr<::ucxx::Notifier> createNotifier()
{
return std::shared_ptr<::ucxx::Notifier>(new ::ucxx::python::Notifier());
}
Notifier::~Notifier() {}
void Notifier::scheduleFutureNotify(std::shared_ptr<::ucxx::Future> future, ucs_status_t status)
{
ucxx_trace_req(
"Notifier::scheduleFutureNotify(): future: %p, handle: %p", future.get(), future->getHandle());
auto p = std::make_pair(future, status);
{
std::lock_guard<std::mutex> lock(_notifierThreadMutex);
_notifierThreadFutureStatus.push_back(p);
_notifierThreadFutureStatusReady = true;
}
_notifierThreadConditionVariable.notify_one();
ucxx_trace_req("Notifier::scheduleFutureNotify() notified: future: %p, handle: %p",
future.get(),
future->getHandle());
}
void Notifier::runRequestNotifier()
{
decltype(_notifierThreadFutureStatus) notifierThreadFutureStatus;
{
std::unique_lock<std::mutex> lock(_notifierThreadMutex);
notifierThreadFutureStatus = std::move(_notifierThreadFutureStatus);
}
ucxx_trace_req("Notifier::runRequestNotifier() notifying %lu", notifierThreadFutureStatus.size());
for (auto& p : notifierThreadFutureStatus) {
// r->future_set_result;
p.first->set(p.second);
ucxx_trace_req("Notifier::runRequestNotifier() notified future: %p, handle: %p",
p.first.get(),
p.first->getHandle());
}
}
RequestNotifierWaitState Notifier::waitRequestNotifierWithoutTimeout()
{
ucxx_trace_req("Notifier::waitRequestNotifierWithoutTimeout()");
std::unique_lock<std::mutex> lock(_notifierThreadMutex);
_notifierThreadConditionVariable.wait(lock, [this] {
return _notifierThreadFutureStatusReady ||
_notifierThreadFutureStatusFinished == RequestNotifierThreadState::Stopping;
});
auto state = _notifierThreadFutureStatusReady ? RequestNotifierWaitState::Ready
: RequestNotifierWaitState::Shutdown;
ucxx_trace_req("Notifier::waitRequestNotifier() unlock: %d", static_cast<int>(state));
_notifierThreadFutureStatusReady = false;
return state;
}
RequestNotifierWaitState Notifier::waitRequestNotifierWithTimeout(uint64_t period)
{
ucxx_trace_req("Notifier::waitRequestNotifierWithTimeout()");
std::unique_lock<std::mutex> lock(_notifierThreadMutex);
bool condition = _notifierThreadConditionVariable.wait_for(
lock, std::chrono::duration<uint64_t, std::nano>(period), [this] {
return _notifierThreadFutureStatusReady ||
_notifierThreadFutureStatusFinished == RequestNotifierThreadState::Stopping;
});
auto state = (condition ? (_notifierThreadFutureStatusReady ? RequestNotifierWaitState::Ready
: RequestNotifierWaitState::Shutdown)
: RequestNotifierWaitState::Timeout);
ucxx_trace_req("Notifier::waitRequestNotifier() unlock: %d", static_cast<int>(state));
if (state == RequestNotifierWaitState::Ready) _notifierThreadFutureStatusReady = false;
return state;
}
RequestNotifierWaitState Notifier::waitRequestNotifier(uint64_t period)
{
ucxx_trace_req("Notifier::waitRequestNotifier()");
if (_notifierThreadFutureStatusFinished == RequestNotifierThreadState::Stopping) {
_notifierThreadFutureStatusFinished = RequestNotifierThreadState::Running;
return RequestNotifierWaitState::Shutdown;
}
return (period > 0) ? waitRequestNotifierWithTimeout(period)
: waitRequestNotifierWithoutTimeout();
}
void Notifier::stopRequestNotifierThread()
{
{
std::lock_guard<std::mutex> lock(_notifierThreadMutex);
_notifierThreadFutureStatusFinished = RequestNotifierThreadState::Stopping;
}
_notifierThreadConditionVariable.notify_all();
}
} // namespace python
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/tests/endpoint.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include <ucxx/api.h>
namespace {
class EndpointTest : public ::testing::Test {
protected:
std::shared_ptr<ucxx::Context> _context{
ucxx::createContext({}, ucxx::Context::defaultFeatureFlags)};
std::shared_ptr<ucxx::Context> _remoteContext{
ucxx::createContext({}, ucxx::Context::defaultFeatureFlags)};
std::shared_ptr<ucxx::Worker> _worker{nullptr};
std::shared_ptr<ucxx::Worker> _remoteWorker{nullptr};
virtual void SetUp()
{
_worker = _context->createWorker();
_remoteWorker = _remoteContext->createWorker();
}
};
TEST_F(EndpointTest, HandleIsValid)
{
auto ep = _worker->createEndpointFromWorkerAddress(_worker->getAddress());
_worker->progress();
ASSERT_TRUE(ep->getHandle() != nullptr);
}
TEST_F(EndpointTest, IsAlive)
{
GTEST_SKIP()
<< "Connecting to worker via its UCX address doesn't seem to call endpoint error handler";
auto ep = _worker->createEndpointFromWorkerAddress(_remoteWorker->getAddress());
_worker->progress();
_remoteWorker->progress();
ASSERT_TRUE(ep->isAlive());
std::vector<int> buf{123};
auto send_req = ep->tagSend(buf.data(), buf.size() * sizeof(int), 0);
while (!send_req->isCompleted())
_worker->progress();
_remoteWorker = nullptr;
_remoteContext = nullptr;
_worker->progress();
ASSERT_FALSE(ep->isAlive());
}
} // namespace
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/tests/CMakeLists.txt | # ======================================================================================================
# SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD 3-Clause License
# ======================================================================================================
# ##################################################################################################
# * compiler function -----------------------------------------------------------------------------
# This function takes in a test name and test source and handles setting all of the associated
# properties and linking to build the test
function(ConfigureTest CMAKE_TEST_NAME)
add_executable(${CMAKE_TEST_NAME} ${ARGN})
set_target_properties(
${CMAKE_TEST_NAME}
PROPERTIES RUNTIME_OUTPUT_DIRECTORY "$<BUILD_INTERFACE:${UCXX_BINARY_DIR}/gtests>"
INSTALL_RPATH "\$ORIGIN/../../../lib"
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
)
target_include_directories(
${CMAKE_TEST_NAME} PUBLIC "$<BUILD_INTERFACE:${UCXX_SOURCE_DIR}>"
"$<BUILD_INTERFACE:${UCXX_SOURCE_DIR}/src>"
)
target_link_libraries(
${CMAKE_TEST_NAME} PRIVATE ucxx GTest::gmock_main GTest::gtest_main
$<TARGET_NAME_IF_EXISTS:conda_env>
)
add_test(NAME ${CMAKE_TEST_NAME} COMMAND ${CMAKE_TEST_NAME})
install(
TARGETS ${CMAKE_TEST_NAME}
COMPONENT testing
DESTINATION bin/gtests/libucxx
EXCLUDE_FROM_ALL
)
endfunction()
# ##################################################################################################
# test sources ##################################################################################
# ##################################################################################################
# ##################################################################################################
# * ucxx tests ------------------------------------------------------------------------------------
ConfigureTest(
UCXX_TEST
buffer.cpp
config.cpp
context.cpp
endpoint.cpp
header.cpp
listener.cpp
request.cpp
utils.cpp
worker.cpp
)
# ##################################################################################################
# enable testing ################################################################################
# ##################################################################################################
enable_testing()
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/tests/buffer.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <algorithm>
#include <numeric>
#include <utility>
#include <gtest/gtest.h>
#include <ucxx/api.h>
namespace {
class BufferAllocator : public ::testing::Test,
public ::testing::WithParamInterface<std::pair<ucxx::BufferType, size_t>> {
protected:
ucxx::BufferType _type;
size_t _size;
std::shared_ptr<ucxx::Buffer> _buffer;
void SetUp()
{
auto param = GetParam();
_type = param.first;
_size = param.second;
_buffer = allocateBuffer(_type, _size);
}
};
TEST_P(BufferAllocator, TestType)
{
ASSERT_EQ(_buffer->getType(), _type);
if (_type == ucxx::BufferType::Host) {
auto buffer = std::dynamic_pointer_cast<ucxx::HostBuffer>(_buffer);
ASSERT_EQ(buffer->getType(), _type);
auto releasedBuffer = buffer->release();
ASSERT_EQ(buffer->getType(), ucxx::BufferType::Invalid);
free(releasedBuffer);
} else if (_type == ucxx::BufferType::RMM) {
#if UCXX_ENABLE_RMM
auto buffer = std::dynamic_pointer_cast<ucxx::RMMBuffer>(_buffer);
ASSERT_EQ(buffer->getType(), _type);
auto releasedBuffer = buffer->release();
ASSERT_EQ(buffer->getType(), ucxx::BufferType::Invalid);
#else
GTEST_SKIP() << "UCXX was not built with RMM support";
#endif
}
ASSERT_EQ(_buffer->getType(), ucxx::BufferType::Invalid);
}
TEST_P(BufferAllocator, TestSize)
{
ASSERT_EQ(_buffer->getSize(), _size);
if (_type == ucxx::BufferType::Host) {
auto buffer = std::dynamic_pointer_cast<ucxx::HostBuffer>(_buffer);
ASSERT_EQ(buffer->getSize(), _size);
auto releasedBuffer = buffer->release();
ASSERT_EQ(buffer->getSize(), 0u);
free(releasedBuffer);
} else if (_type == ucxx::BufferType::RMM) {
#if UCXX_ENABLE_RMM
auto buffer = std::dynamic_pointer_cast<ucxx::RMMBuffer>(_buffer);
ASSERT_EQ(buffer->getSize(), _size);
auto releasedBuffer = buffer->release();
ASSERT_EQ(buffer->getSize(), 0u);
#else
GTEST_SKIP() << "UCXX was not built with RMM support";
#endif
}
ASSERT_EQ(_buffer->getSize(), 0u);
}
TEST_P(BufferAllocator, TestData)
{
ASSERT_NE(_buffer->data(), nullptr);
if (_type == ucxx::BufferType::Host) {
auto buffer = std::dynamic_pointer_cast<ucxx::HostBuffer>(_buffer);
ASSERT_EQ(buffer->data(), _buffer->data());
auto releasedBuffer = buffer->release();
ASSERT_NE(releasedBuffer, nullptr);
free(releasedBuffer);
} else if (_type == ucxx::BufferType::RMM) {
#if UCXX_ENABLE_RMM
auto buffer = std::dynamic_pointer_cast<ucxx::RMMBuffer>(_buffer);
ASSERT_EQ(buffer->data(), _buffer->data());
auto releasedBuffer = buffer->release();
EXPECT_THROW(buffer->data(), std::runtime_error);
ASSERT_NE(releasedBuffer, nullptr);
#else
GTEST_SKIP() << "UCXX was not built with RMM support";
#endif
}
EXPECT_THROW(_buffer->data(), std::runtime_error);
}
TEST_P(BufferAllocator, TestThrowAfterRelease)
{
if (_type == ucxx::BufferType::Host) {
auto buffer = std::dynamic_pointer_cast<ucxx::HostBuffer>(_buffer);
auto releasedBuffer = buffer->release();
EXPECT_THROW(buffer->data(), std::runtime_error);
EXPECT_THROW(buffer->release(), std::runtime_error);
free(releasedBuffer);
} else if (_type == ucxx::BufferType::RMM) {
#if UCXX_ENABLE_RMM
auto buffer = std::dynamic_pointer_cast<ucxx::RMMBuffer>(_buffer);
auto releasedBuffer = buffer->release();
EXPECT_THROW(buffer->data(), std::runtime_error);
EXPECT_THROW(buffer->release(), std::runtime_error);
#else
GTEST_SKIP() << "UCXX was not built with RMM support";
#endif
}
EXPECT_THROW(_buffer->data(), std::runtime_error);
}
INSTANTIATE_TEST_SUITE_P(Host,
BufferAllocator,
testing::Values(std::make_pair(ucxx::BufferType::Host, 1),
std::make_pair(ucxx::BufferType::Host, 1000),
std::make_pair(ucxx::BufferType::Host, 1000000)));
#if UCXX_ENABLE_RMM
INSTANTIATE_TEST_SUITE_P(RMM,
BufferAllocator,
testing::Values(std::make_pair(ucxx::BufferType::RMM, 1),
std::make_pair(ucxx::BufferType::RMM, 1000),
std::make_pair(ucxx::BufferType::RMM, 1000000)));
#endif
} // namespace
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/tests/listener.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include <ucxx/api.h>
#include "include/utils.h"
namespace {
constexpr size_t MaxProgressAttempts = 50;
constexpr size_t MaxFlakyAttempts = 3;
struct ListenerContainer {
ucs_status_t status{UCS_OK};
std::shared_ptr<ucxx::Worker> worker{nullptr};
std::shared_ptr<ucxx::Listener> listener{nullptr};
std::shared_ptr<ucxx::Endpoint> endpoint{nullptr};
bool transferCompleted{false};
};
typedef std::shared_ptr<ListenerContainer> ListenerContainerPtr;
static void listenerCallback(ucp_conn_request_h connRequest, void* arg)
{
ListenerContainer* listenerContainer = reinterpret_cast<ListenerContainer*>(arg);
ucp_conn_request_attr_t attr{};
attr.field_mask = UCP_CONN_REQUEST_ATTR_FIELD_CLIENT_ADDR;
listenerContainer->status = ucp_conn_request_query(connRequest, &attr);
if (listenerContainer->status != UCS_OK) return;
listenerContainer->endpoint =
listenerContainer->listener->createEndpointFromConnRequest(connRequest);
}
class ListenerTest : public ::testing::Test {
protected:
std::shared_ptr<ucxx::Context> _context{
ucxx::createContext({}, ucxx::Context::defaultFeatureFlags)};
std::shared_ptr<ucxx::Worker> _worker{nullptr};
virtual void SetUp() { _worker = _context->createWorker(); }
ListenerContainerPtr createListenerContainer()
{
auto listenerContainer = std::make_shared<ListenerContainer>();
listenerContainer->worker = _worker;
return listenerContainer;
}
virtual std::shared_ptr<ucxx::Listener> createListener(ListenerContainerPtr listenerContainer)
{
auto listener = _worker->createListener(0, listenerCallback, listenerContainer.get());
listenerContainer->listener = listener;
return listener;
}
};
class ListenerPortTest : public ListenerTest, public ::testing::WithParamInterface<uint16_t> {
protected:
virtual std::shared_ptr<ucxx::Listener> createListener(ListenerContainerPtr listenerContainer)
{
auto listener = _worker->createListener(GetParam(), listenerCallback, listenerContainer.get());
listenerContainer->listener = listener;
return listener;
}
};
TEST_F(ListenerTest, HandleIsValid)
{
auto listenerContainer = createListenerContainer();
auto listener = createListener(listenerContainer);
_worker->progress();
ASSERT_TRUE(listener->getHandle() != nullptr);
}
TEST_P(ListenerPortTest, Port)
{
auto listenerContainer = createListenerContainer();
auto listener = createListener(listenerContainer);
_worker->progress();
if (GetParam() == 0)
ASSERT_GE(listener->getPort(), 1024);
else
ASSERT_EQ(listener->getPort(), 12345);
}
INSTANTIATE_TEST_SUITE_P(PortAssignment, ListenerPortTest, ::testing::Values(0, 12345));
TEST_F(ListenerTest, EndpointSendRecv)
{
auto listenerContainer = createListenerContainer();
auto listener = createListener(listenerContainer);
auto progress = getProgressFunction(_worker, ProgressMode::Polling);
progress();
auto ep = _worker->createEndpointFromHostname("127.0.0.1", listener->getPort());
while (listenerContainer->endpoint == nullptr)
progress();
std::vector<std::shared_ptr<ucxx::Request>> requests;
std::vector<int> client_buf{123};
std::vector<int> server_buf{0};
requests.push_back(ep->tagSend(client_buf.data(), client_buf.size() * sizeof(int), 0));
requests.push_back(
listenerContainer->endpoint->tagRecv(&server_buf.front(), server_buf.size() * sizeof(int), 0));
::waitRequests(_worker, requests, progress);
ASSERT_EQ(server_buf[0], client_buf[0]);
requests.push_back(
listenerContainer->endpoint->tagSend(&server_buf.front(), server_buf.size() * sizeof(int), 1));
requests.push_back(ep->tagRecv(client_buf.data(), client_buf.size() * sizeof(int), 1));
::waitRequests(_worker, requests, progress);
ASSERT_EQ(client_buf[0], server_buf[0]);
std::vector<int> buf{0};
}
TEST_F(ListenerTest, IsAlive)
{
auto listenerContainer = createListenerContainer();
auto listener = createListener(listenerContainer);
_worker->progress();
auto ep = _worker->createEndpointFromHostname("127.0.0.1", listener->getPort());
while (listenerContainer->endpoint == nullptr)
_worker->progress();
ASSERT_TRUE(ep->isAlive());
std::vector<int> buf{123};
auto send_req = ep->tagSend(buf.data(), buf.size() * sizeof(int), 0);
while (!send_req->isCompleted())
_worker->progress();
listenerContainer->endpoint = nullptr;
for (size_t attempt = 0; attempt < MaxProgressAttempts && ep->isAlive(); ++attempt)
_worker->progress();
ASSERT_FALSE(ep->isAlive());
}
TEST_F(ListenerTest, RaiseOnError)
{
auto run = [this](bool lastAttempt) {
auto listenerContainer = createListenerContainer();
auto listener = createListener(listenerContainer);
_worker->progress();
auto ep = _worker->createEndpointFromHostname("127.0.0.1", listener->getPort());
while (listenerContainer->endpoint == nullptr)
_worker->progress();
listenerContainer->endpoint = nullptr;
bool success = false;
for (size_t attempt = 0; attempt < MaxProgressAttempts; ++attempt) {
try {
_worker->progress();
ep->raiseOnError();
} catch (ucxx::Error) {
success = true;
break;
}
}
if (!success && !lastAttempt) return false;
EXPECT_THROW(ep->raiseOnError(), ucxx::Error);
return true;
};
for (size_t flakyAttempt = 0; flakyAttempt < MaxFlakyAttempts; ++flakyAttempt) {
if (run(flakyAttempt == MaxFlakyAttempts - 1)) break;
}
}
TEST_F(ListenerTest, CloseCallback)
{
auto listenerContainer = createListenerContainer();
auto listener = createListener(listenerContainer);
_worker->progress();
auto ep = _worker->createEndpointFromHostname("127.0.0.1", listener->getPort());
bool isClosed = false;
ep->setCloseCallback([](void* isClosed) { *reinterpret_cast<bool*>(isClosed) = true; },
reinterpret_cast<void*>(&isClosed));
while (listenerContainer->endpoint == nullptr)
_worker->progress();
ASSERT_FALSE(isClosed);
listenerContainer->endpoint = nullptr;
for (size_t attempt = 0; attempt < MaxProgressAttempts && !isClosed; ++attempt)
_worker->progress();
ASSERT_TRUE(isClosed);
}
} // namespace
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/tests/utils.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <memory>
#include <vector>
#include "include/utils.h"
void createCudaContextCallback(void* callbackArg)
{
// Force CUDA context creation
cudaFree(0);
}
std::function<void()> getProgressFunction(std::shared_ptr<ucxx::Worker> worker,
ProgressMode progressMode)
{
if (progressMode == ProgressMode::Polling)
return std::bind(std::mem_fn(&ucxx::Worker::progress), worker);
else if (progressMode == ProgressMode::Blocking)
return std::bind(std::mem_fn(&ucxx::Worker::progressWorkerEvent), worker, -1);
else if (progressMode == ProgressMode::Wait)
return std::bind(std::mem_fn(&ucxx::Worker::waitProgress), worker);
else
return std::function<void()>();
}
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/tests/request.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <algorithm>
#include <memory>
#include <numeric>
#include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <ucxx/api.h>
#include "include/utils.h"
namespace {
using ::testing::Combine;
using ::testing::ContainerEq;
using ::testing::Values;
class RequestTest : public ::testing::TestWithParam<
std::tuple<ucxx::BufferType, bool, bool, ProgressMode, size_t>> {
protected:
std::shared_ptr<ucxx::Context> _context{nullptr};
std::shared_ptr<ucxx::Worker> _worker{nullptr};
std::shared_ptr<ucxx::Endpoint> _ep{nullptr};
std::function<void()> _progressWorker;
ucxx::BufferType _bufferType;
ucs_memory_type_t _memoryType;
bool _registerCustomAmAllocator;
bool _enableDelayedSubmission;
ProgressMode _progressMode;
size_t _messageLength;
size_t _messageSize;
size_t _rndvThresh{8192};
size_t _numBuffers{0};
std::vector<std::vector<int>> _send;
std::vector<std::vector<int>> _recv;
std::vector<std::unique_ptr<ucxx::Buffer>> _sendBuffer;
std::vector<std::unique_ptr<ucxx::Buffer>> _recvBuffer;
std::vector<void*> _sendPtr{nullptr};
std::vector<void*> _recvPtr{nullptr};
void SetUp()
{
if (_bufferType == ucxx::BufferType::RMM) {
#if !UCXX_ENABLE_RMM
GTEST_SKIP() << "UCXX was not built with RMM support";
#endif
}
std::tie(_bufferType,
_registerCustomAmAllocator,
_enableDelayedSubmission,
_progressMode,
_messageLength) = GetParam();
_memoryType =
(_bufferType == ucxx::BufferType::RMM) ? UCS_MEMORY_TYPE_CUDA : UCS_MEMORY_TYPE_HOST;
_messageSize = _messageLength * sizeof(int);
_context = ucxx::createContext({{"RNDV_THRESH", std::to_string(_rndvThresh)}},
ucxx::Context::defaultFeatureFlags);
_worker = _context->createWorker(_enableDelayedSubmission);
if (_progressMode == ProgressMode::Blocking) {
_worker->initBlockingProgressMode();
} else if (_progressMode == ProgressMode::ThreadPolling ||
_progressMode == ProgressMode::ThreadBlocking) {
_worker->setProgressThreadStartCallback(::createCudaContextCallback, nullptr);
if (_progressMode == ProgressMode::ThreadPolling) _worker->startProgressThread(true);
if (_progressMode == ProgressMode::ThreadBlocking) _worker->startProgressThread(false);
}
_progressWorker = getProgressFunction(_worker, _progressMode);
_ep = _worker->createEndpointFromWorkerAddress(_worker->getAddress());
}
void allocate(const size_t numBuffers = 1, const bool allocateRecvBuffer = true)
{
_numBuffers = numBuffers;
_send.resize(_numBuffers);
_recv.resize(_numBuffers);
_sendBuffer.resize(_numBuffers);
_sendPtr.resize(_numBuffers);
if (allocateRecvBuffer) {
_recvBuffer.resize(_numBuffers);
_recvPtr.resize(_numBuffers);
}
for (size_t i = 0; i < _numBuffers; ++i) {
_send[i].resize(_messageLength);
_recv[i].resize(_messageLength);
std::iota(_send[i].begin(), _send[i].end(), i);
if (_bufferType == ucxx::BufferType::Host) {
_sendBuffer[i] = std::make_unique<ucxx::HostBuffer>(_messageSize);
if (allocateRecvBuffer) _recvBuffer[i] = std::make_unique<ucxx::HostBuffer>(_messageSize);
std::copy(_send[i].begin(), _send[i].end(), reinterpret_cast<int*>(_sendBuffer[i]->data()));
#if UCXX_ENABLE_RMM
} else if (_bufferType == ucxx::BufferType::RMM) {
_sendBuffer[i] = std::make_unique<ucxx::RMMBuffer>(_messageSize);
if (allocateRecvBuffer) _recvBuffer[i] = std::make_unique<ucxx::RMMBuffer>(_messageSize);
RMM_CUDA_TRY(cudaMemcpyAsync(_sendBuffer[i]->data(),
_send[i].data(),
_messageSize,
cudaMemcpyDefault,
rmm::cuda_stream_default.value()));
#endif
}
_sendPtr[i] = _sendBuffer[i]->data();
if (allocateRecvBuffer) _recvPtr[i] = _recvBuffer[i]->data();
}
#if UCXX_ENABLE_RMM
if (_bufferType == ucxx::BufferType::RMM) { rmm::cuda_stream_default.synchronize(); }
#endif
}
void copyResults()
{
for (size_t i = 0; i < _numBuffers; ++i) {
if (_bufferType == ucxx::BufferType::Host) {
std::copy(reinterpret_cast<int*>(_recvPtr[i]),
reinterpret_cast<int*>(_recvPtr[i]) + _messageLength,
_recv[i].begin());
#if UCXX_ENABLE_RMM
} else if (_bufferType == ucxx::BufferType::RMM) {
RMM_CUDA_TRY(cudaMemcpyAsync(_recv[i].data(),
_recvPtr[i],
_messageSize,
cudaMemcpyDefault,
rmm::cuda_stream_default.value()));
#endif
}
}
#if UCXX_ENABLE_RMM
if (_bufferType == ucxx::BufferType::RMM) { rmm::cuda_stream_default.synchronize(); }
#endif
}
};
TEST_P(RequestTest, ProgressAm)
{
if (_progressMode == ProgressMode::Wait) {
GTEST_SKIP() << "Interrupting UCP worker progress operation in wait mode is not possible";
}
#if !UCXX_ENABLE_RMM
GTEST_SKIP() << "UCXX was not built with RMM support";
#else
if (_registerCustomAmAllocator && _memoryType == UCS_MEMORY_TYPE_CUDA) {
_worker->registerAmAllocator(UCS_MEMORY_TYPE_CUDA, [](size_t length) {
return std::make_shared<ucxx::RMMBuffer>(length);
});
}
allocate(1, false);
// Submit and wait for transfers to complete
std::vector<std::shared_ptr<ucxx::Request>> requests;
requests.push_back(_ep->amSend(_sendPtr[0], _messageSize, _memoryType));
requests.push_back(_ep->amRecv());
waitRequests(_worker, requests, _progressWorker);
auto recvReq = requests[1];
_recvPtr[0] = recvReq->getRecvBuffer()->data();
// Messages larger than `_rndvThresh` are rendezvous and will use custom allocator,
// smaller messages are eager and will always be host-allocated.
ASSERT_THAT(recvReq->getRecvBuffer()->getType(),
(_registerCustomAmAllocator && _messageSize >= _rndvThresh) ? _bufferType
: ucxx::BufferType::Host);
copyResults();
// Assert data correctness
ASSERT_THAT(_recv[0], ContainerEq(_send[0]));
#endif
}
TEST_P(RequestTest, ProgressStream)
{
allocate();
// Submit and wait for transfers to complete
std::vector<std::shared_ptr<ucxx::Request>> requests;
requests.push_back(_ep->streamSend(_sendPtr[0], _messageSize, 0));
requests.push_back(_ep->streamRecv(_recvPtr[0], _messageSize, 0));
waitRequests(_worker, requests, _progressWorker);
copyResults();
// Assert data correctness
ASSERT_THAT(_recv[0], ContainerEq(_send[0]));
}
TEST_P(RequestTest, ProgressTag)
{
allocate();
// Submit and wait for transfers to complete
std::vector<std::shared_ptr<ucxx::Request>> requests;
requests.push_back(_ep->tagSend(_sendPtr[0], _messageSize, 0));
requests.push_back(_ep->tagRecv(_recvPtr[0], _messageSize, 0));
waitRequests(_worker, requests, _progressWorker);
copyResults();
// Assert data correctness
ASSERT_THAT(_recv[0], ContainerEq(_send[0]));
}
TEST_P(RequestTest, ProgressTagMulti)
{
if (_progressMode == ProgressMode::Wait) {
GTEST_SKIP() << "Interrupting UCP worker progress operation in wait mode is not possible";
}
const size_t numMulti = 8;
const bool allocateRecvBuffer = false;
allocate(numMulti, allocateRecvBuffer);
// Allocate buffers for request sizes/types
std::vector<size_t> multiSize(numMulti, _messageSize);
std::vector<int> multiIsCUDA(numMulti, _bufferType == ucxx::BufferType::RMM);
// Submit and wait for transfers to complete
std::vector<std::shared_ptr<ucxx::Request>> requests;
requests.push_back(_ep->tagMultiSend(_sendPtr, multiSize, multiIsCUDA, 0, false));
requests.push_back(_ep->tagMultiRecv(0, false));
waitRequests(_worker, requests, _progressWorker);
auto recvRequest = requests[1];
_recvPtr.resize(_numBuffers);
size_t transferIdx = 0;
// Populate recv pointers
for (const auto& br :
std::dynamic_pointer_cast<ucxx::RequestTagMulti>(requests[1])->_bufferRequests) {
// br->buffer == nullptr are headers
if (br->buffer) {
ASSERT_EQ(br->buffer->getType(), _bufferType);
ASSERT_EQ(br->buffer->getSize(), _messageSize);
_recvPtr[transferIdx] = br->buffer->data();
++transferIdx;
}
}
copyResults();
// Assert data correctness
for (size_t i = 0; i < numMulti; ++i)
ASSERT_THAT(_recv[i], ContainerEq(_send[i]));
}
TEST_P(RequestTest, TagUserCallback)
{
allocate();
std::vector<std::shared_ptr<ucxx::Request>> requests(2);
std::vector<ucs_status_t> requestStatus(2, UCS_INPROGRESS);
auto checkStatus = [&requests, &requestStatus](ucs_status_t status,
::ucxx::RequestCallbackUserData data) {
auto idx = *std::static_pointer_cast<size_t>(data);
if (status != UCS_OK) abort();
requestStatus[idx] = status;
};
auto sendIndex = std::make_shared<size_t>(0u);
auto recvIndex = std::make_shared<size_t>(1u);
// Submit and wait for transfers to complete
requests[0] = _ep->tagSend(_sendPtr[0], _messageSize, 0, false, checkStatus, sendIndex);
requests[1] = _ep->tagRecv(_recvPtr[0], _messageSize, 0, false, checkStatus, recvIndex);
waitRequests(_worker, requests, _progressWorker);
copyResults();
for (const auto request : requests)
ASSERT_THAT(request->getStatus(), UCS_OK);
// Assert data correctness
ASSERT_THAT(_recv[0], ContainerEq(_send[0]));
}
INSTANTIATE_TEST_SUITE_P(ProgressModes,
RequestTest,
Combine(Values(ucxx::BufferType::Host),
Values(false),
Values(false),
Values(ProgressMode::Polling,
ProgressMode::Blocking,
// ProgressMode::Wait, // Hangs on Stream
ProgressMode::ThreadPolling,
ProgressMode::ThreadBlocking),
Values(1, 1024, 2048, 1048576)));
INSTANTIATE_TEST_SUITE_P(DelayedSubmission,
RequestTest,
Combine(Values(ucxx::BufferType::Host),
Values(false),
Values(true),
Values(ProgressMode::ThreadPolling, ProgressMode::ThreadBlocking),
Values(1, 1024, 2048, 1048576)));
#if UCXX_ENABLE_RMM
INSTANTIATE_TEST_SUITE_P(RMMProgressModes,
RequestTest,
Combine(Values(ucxx::BufferType::RMM),
Values(false, true),
Values(false),
Values(ProgressMode::Polling,
ProgressMode::Blocking,
// ProgressMode::Wait, // Hangs on Stream
ProgressMode::ThreadPolling,
ProgressMode::ThreadBlocking),
Values(1, 1024, 2048, 1048576)));
INSTANTIATE_TEST_SUITE_P(RMMDelayedSubmission,
RequestTest,
Combine(Values(ucxx::BufferType::RMM),
Values(false, true),
Values(true),
Values(ProgressMode::ThreadPolling, ProgressMode::ThreadBlocking),
Values(1, 1024, 2048, 1048576)));
#endif
} // namespace
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/tests/header.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <algorithm>
#include <numeric>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <ucxx/api.h>
using ::testing::ContainerEq;
namespace {
TEST(HeaderTest, DataSize)
{
const bool next = false;
const size_t framesSize = 1;
std::vector<int> isCUDA{0};
std::vector<size_t> size{1};
const ucxx::Header header(next, framesSize, isCUDA.data(), size.data());
const size_t ExpectedDataSize =
sizeof(header.next) + sizeof(header.nframes) + (sizeof(header.isCUDA) + sizeof(header.size));
ASSERT_EQ(header.dataSize(), ExpectedDataSize);
}
TEST(HeaderTest, PointerConstructor)
{
const bool next = false;
const size_t framesSize = 5;
std::vector<int> isCUDA{1, 0, 1, 0, 1};
std::vector<size_t> size{1, 2, 3, 4, 5};
const ucxx::Header header(next, framesSize, isCUDA.data(), size.data());
std::vector<int> headerIsCUDA(header.isCUDA.begin(), header.isCUDA.begin() + framesSize);
std::vector<size_t> headerSize(header.size.begin(), header.size.begin() + framesSize);
ASSERT_EQ(header.next, next);
ASSERT_EQ(header.nframes, framesSize);
ASSERT_THAT(headerIsCUDA, ContainerEq(isCUDA));
ASSERT_THAT(headerSize, ContainerEq(size));
auto serialized = header.serialize();
auto deserialized = ucxx::Header(serialized);
ASSERT_EQ(deserialized.dataSize(), header.dataSize());
ASSERT_EQ(deserialized.next, header.next);
ASSERT_EQ(deserialized.nframes, header.nframes);
ASSERT_THAT(deserialized.isCUDA, ContainerEq(header.isCUDA));
ASSERT_THAT(deserialized.size, ContainerEq(header.size));
}
class FromPointerGenerator : public ::testing::Test, public ::testing::WithParamInterface<size_t> {
private:
void generateData()
{
_isCUDA.resize(_framesSize);
_size.resize(_framesSize);
std::iota(_size.begin(), _size.end(), 0);
std::generate(_isCUDA.begin(), _isCUDA.end(), [n = 0]() mutable { return n++ % 2; });
_headers = std::move(ucxx::Header::buildHeaders(_size, _isCUDA));
}
protected:
size_t _framesSize;
std::vector<size_t> _size;
std::vector<int> _isCUDA;
std::vector<ucxx::Header> _headers;
void SetUp()
{
_framesSize = GetParam();
generateData();
}
};
TEST_P(FromPointerGenerator, PointerConstructor)
{
for (size_t i = 0; i < _headers.size(); ++i) {
const auto& header = _headers[i];
const bool next = i != _headers.size() - 1;
const size_t expectedNumFrames =
header.next ? ucxx::HeaderFramesSize : _framesSize - i * ucxx::HeaderFramesSize;
const size_t firstIdx = i * ucxx::HeaderFramesSize;
const size_t lastIdx = std::min((i + 1) * ucxx::HeaderFramesSize, _framesSize);
auto serialized = header.serialize();
auto deserialized = ucxx::Header(serialized);
// Assert next
ASSERT_EQ(header.next, next);
ASSERT_EQ(deserialized.next, header.next);
// Assert number of frames
ASSERT_EQ(header.nframes, expectedNumFrames);
ASSERT_EQ(deserialized.nframes, header.nframes);
// Assert isCUDA
std::vector<int> expectedIsCUDA(std::cbegin(_isCUDA) + firstIdx,
std::cbegin(_isCUDA) + lastIdx);
std::vector<int> headerIsCUDA(header.isCUDA.begin(), header.isCUDA.begin() + expectedNumFrames);
ASSERT_THAT(headerIsCUDA, ContainerEq(expectedIsCUDA));
std::vector<int> deserializedIsCUDA(deserialized.isCUDA.begin(),
deserialized.isCUDA.begin() + expectedNumFrames);
ASSERT_THAT(deserializedIsCUDA, ContainerEq(headerIsCUDA));
// Assert size
std::vector<size_t> expectedSize(std::cbegin(_size) + firstIdx, std::cbegin(_size) + lastIdx);
std::vector<size_t> headerSize(header.size.begin(), header.size.begin() + expectedNumFrames);
ASSERT_THAT(headerSize, ContainerEq(expectedSize));
std::vector<size_t> deserializedSize(deserialized.size.begin(),
deserialized.size.begin() + expectedNumFrames);
ASSERT_THAT(deserializedSize, ContainerEq(headerSize));
}
}
INSTANTIATE_TEST_SUITE_P(SingleFrame,
FromPointerGenerator,
testing::Values(0, 1, 5, 10, 100, 101, 200, 201));
} // namespace
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/tests/config.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <gtest/gtest.h>
#include <ucxx/api.h>
namespace {
TEST(ConfigTest, HandleIsValid)
{
ucxx::ConfigMap configMap{};
ucxx::Config config{configMap};
ASSERT_TRUE(config.getHandle() != nullptr);
}
// TEST(ConfigTest, ConfigMapDefault) {
// ucxx::Config config{};
// auto configMapOut = config.get();
// // ASSERT_GT(configMapOut.size(), 1u);
// // ASSERT_NE(configMapOut.find("TLS"), configMapOut.end());
// // ASSERT_EQ(configMapOut["TLS"], "all");
// for (const auto it : configMapOut) {
// std::cout << it.first << ": " << it.second << std::endl;
// }
// }
TEST(ConfigTest, ConfigMapTLS)
{
ucxx::ConfigMap configMap{{"UCX_TLS", "tcp"}};
ucxx::Config config{configMap};
auto configMapOut = config.get();
ASSERT_GT(configMapOut.size(), 1u);
ASSERT_NE(configMapOut.find("TLS"), configMapOut.end());
ASSERT_EQ(configMapOut["TLS"], "tcp");
// auto configMapOut = config.get();
// for (const auto it : configMapOut) {
// std::cout << it.first << ": " << it.second << std::endl;
// }
}
} // namespace
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/tests/context.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <cstdlib>
#include <string>
#include <gtest/gtest.h>
#include <ucxx/api.h>
namespace {
static std::vector<std::string> TlsConfig{"^tcp", "^tcp,sm", "tcp", "tcp,sm", "all"};
class ContextTestCustomConfig : public testing::TestWithParam<std::string> {};
TEST(ContextTest, HandleIsValid)
{
auto context = ucxx::createContext({}, ucxx::Context::defaultFeatureFlags);
ASSERT_TRUE(context->getHandle() != nullptr);
}
TEST(ContextTest, DefaultConfigsAndFlags)
{
static constexpr auto featureFlags = ucxx::Context::defaultFeatureFlags;
auto context = ucxx::createContext({}, featureFlags);
auto configMapOut = context->getConfig();
ASSERT_GT(configMapOut.size(), 1u);
ASSERT_NE(configMapOut.find("TLS"), configMapOut.end());
if (const char* envTls = std::getenv("UCX_TLS"))
ASSERT_EQ(configMapOut["TLS"], envTls);
else
ASSERT_EQ(configMapOut["TLS"], "all");
ASSERT_EQ(context->getFeatureFlags(), featureFlags);
}
TEST_P(ContextTestCustomConfig, TLS)
{
auto tls = GetParam();
static constexpr auto featureFlags = ucxx::Context::defaultFeatureFlags;
auto context = ucxx::createContext({{"TLS", tls}}, featureFlags);
auto configMapOut = context->getConfig();
ASSERT_GT(configMapOut.size(), 1u);
ASSERT_NE(configMapOut.find("TLS"), configMapOut.end());
ASSERT_EQ(configMapOut["TLS"], tls);
ASSERT_EQ(context->getFeatureFlags(), featureFlags);
}
TEST(ContextTest, CustomFlags)
{
uint64_t featureFlags = UCP_FEATURE_TAG | UCP_FEATURE_WAKEUP;
auto context = ucxx::createContext({}, featureFlags);
ASSERT_EQ(context->getFeatureFlags(), featureFlags);
}
TEST(ContextTest, Info)
{
auto context = ucxx::createContext({{"UCX_TLS", "tcp"}}, ucxx::Context::defaultFeatureFlags);
ASSERT_GT(context->getInfo().size(), 0u);
}
TEST(ContextTest, CreateWorker)
{
auto context = ucxx::createContext({}, ucxx::Context::defaultFeatureFlags);
auto worker1 = ucxx::createWorker(context, false, false);
ASSERT_TRUE(worker1 != nullptr);
auto worker2 = context->createWorker();
ASSERT_TRUE(worker2 != nullptr);
}
INSTANTIATE_TEST_SUITE_P(TLS, ContextTestCustomConfig, testing::ValuesIn(TlsConfig));
} // namespace
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/tests/worker.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <memory>
#include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include <ucxx/api.h>
#include "include/utils.h"
namespace {
using ::testing::Combine;
using ::testing::Values;
class WorkerTest : public ::testing::Test {
protected:
std::shared_ptr<ucxx::Context> _context{
ucxx::createContext({}, ucxx::Context::defaultFeatureFlags)};
std::shared_ptr<ucxx::Worker> _worker{nullptr};
virtual void SetUp() { _worker = _context->createWorker(); }
};
class WorkerCapabilityTest : public ::testing::Test,
public ::testing::WithParamInterface<std::tuple<bool, bool>> {
protected:
std::shared_ptr<ucxx::Context> _context{
ucxx::createContext({}, ucxx::Context::defaultFeatureFlags)};
std::shared_ptr<ucxx::Worker> _worker{nullptr};
bool _enableDelayedSubmission;
bool _enableFuture;
virtual void SetUp()
{
std::tie(_enableDelayedSubmission, _enableFuture) = GetParam();
_worker = _context->createWorker(_enableDelayedSubmission, _enableFuture);
}
};
class WorkerProgressTest : public WorkerTest,
public ::testing::WithParamInterface<std::tuple<bool, ProgressMode>> {
protected:
std::function<void()> _progressWorker;
bool _enableDelayedSubmission;
ProgressMode _progressMode;
void SetUp()
{
std::tie(_enableDelayedSubmission, _progressMode) = GetParam();
_worker = _context->createWorker(_enableDelayedSubmission);
if (_progressMode == ProgressMode::Blocking)
_worker->initBlockingProgressMode();
else if (_progressMode == ProgressMode::ThreadPolling)
_worker->startProgressThread(true);
else if (_progressMode == ProgressMode::ThreadBlocking)
_worker->startProgressThread(false);
_progressWorker = getProgressFunction(_worker, _progressMode);
}
};
TEST_F(WorkerTest, HandleIsValid) { ASSERT_TRUE(_worker->getHandle() != nullptr); }
TEST_P(WorkerCapabilityTest, CheckCapability)
{
ASSERT_EQ(_worker->isDelayedRequestSubmissionEnabled(), _enableDelayedSubmission);
ASSERT_EQ(_worker->isFutureEnabled(), _enableFuture);
}
INSTANTIATE_TEST_SUITE_P(Capabilities,
WorkerCapabilityTest,
Combine(Values(false, true), Values(false, true)));
TEST_F(WorkerTest, TagProbe)
{
auto progressWorker = getProgressFunction(_worker, ProgressMode::Polling);
auto ep = _worker->createEndpointFromWorkerAddress(_worker->getAddress());
ASSERT_FALSE(_worker->tagProbe(0));
std::vector<int> buf{123};
std::vector<std::shared_ptr<ucxx::Request>> requests;
requests.push_back(ep->tagSend(buf.data(), buf.size() * sizeof(int), 0));
waitRequests(_worker, requests, progressWorker);
// Attempt to progress worker 10 times (arbitrarily defined).
// TODO: Maybe a timeout would fit best.
for (size_t i = 0; i < 10 && !_worker->tagProbe(0); ++i)
progressWorker();
ASSERT_TRUE(_worker->tagProbe(0));
}
TEST_F(WorkerTest, AmProbe)
{
auto progressWorker = getProgressFunction(_worker, ProgressMode::Polling);
auto ep = _worker->createEndpointFromWorkerAddress(_worker->getAddress());
ASSERT_FALSE(_worker->amProbe(ep->getHandle()));
std::vector<int> buf{123};
std::vector<std::shared_ptr<ucxx::Request>> requests;
requests.push_back(ep->amSend(buf.data(), buf.size() * sizeof(int), UCS_MEMORY_TYPE_HOST));
waitRequests(_worker, requests, progressWorker);
// Attempt to progress worker 10 times (arbitrarily defined).
// TODO: Maybe a timeout would fit best.
for (size_t i = 0; i < 10 && !_worker->tagProbe(0); ++i)
progressWorker();
ASSERT_TRUE(_worker->amProbe(ep->getHandle()));
}
TEST_P(WorkerProgressTest, ProgressAm)
{
if (_progressMode == ProgressMode::Wait) {
// TODO: Is this the same reason as TagMulti?
GTEST_SKIP() << "Wait mode not supported";
}
auto ep = _worker->createEndpointFromWorkerAddress(_worker->getAddress());
std::vector<int> send{123};
std::vector<std::shared_ptr<ucxx::Request>> requests;
requests.push_back(ep->amSend(send.data(), send.size() * sizeof(int), UCS_MEMORY_TYPE_HOST));
requests.push_back(ep->amRecv());
waitRequests(_worker, requests, _progressWorker);
auto recvReq = requests[1];
auto recvBuffer = recvReq->getRecvBuffer();
ASSERT_EQ(recvBuffer->getType(), ucxx::BufferType::Host);
ASSERT_EQ(recvBuffer->getSize(), send.size() * sizeof(int));
std::vector<int> recvAbstract(reinterpret_cast<int*>(recvBuffer->data()),
reinterpret_cast<int*>(recvBuffer->data()) + send.size());
ASSERT_EQ(recvAbstract[0], send[0]);
}
TEST_P(WorkerProgressTest, ProgressStream)
{
auto ep = _worker->createEndpointFromWorkerAddress(_worker->getAddress());
std::vector<int> send{123};
std::vector<int> recv(1);
std::vector<std::shared_ptr<ucxx::Request>> requests;
requests.push_back(ep->streamSend(send.data(), send.size() * sizeof(int), 0));
requests.push_back(ep->streamRecv(recv.data(), recv.size() * sizeof(int), 0));
waitRequests(_worker, requests, _progressWorker);
ASSERT_EQ(recv[0], send[0]);
}
TEST_P(WorkerProgressTest, ProgressTag)
{
auto ep = _worker->createEndpointFromWorkerAddress(_worker->getAddress());
std::vector<int> send{123};
std::vector<int> recv(1);
std::vector<std::shared_ptr<ucxx::Request>> requests;
requests.push_back(ep->tagSend(send.data(), send.size() * sizeof(int), 0));
requests.push_back(ep->tagRecv(recv.data(), recv.size() * sizeof(int), 0));
waitRequests(_worker, requests, _progressWorker);
ASSERT_EQ(recv[0], send[0]);
}
TEST_P(WorkerProgressTest, ProgressTagMulti)
{
if (_progressMode == ProgressMode::Wait) {
GTEST_SKIP() << "Interrupting UCP worker progress operation in wait mode is not possible";
}
auto ep = _worker->createEndpointFromWorkerAddress(_worker->getAddress());
std::vector<int> send{123};
const size_t numMulti = 8;
std::vector<void*> multiBuffer(numMulti, send.data());
std::vector<size_t> multiSize(numMulti, send.size() * sizeof(int));
std::vector<int> multiIsCUDA(numMulti, false);
std::vector<std::shared_ptr<ucxx::Request>> requests;
requests.push_back(ep->tagMultiSend(multiBuffer, multiSize, multiIsCUDA, 0, false));
requests.push_back(ep->tagMultiRecv(0, false));
waitRequests(_worker, requests, _progressWorker);
for (const auto& br :
std::dynamic_pointer_cast<ucxx::RequestTagMulti>(requests[1])->_bufferRequests) {
// br->buffer == nullptr are headers
if (br->buffer) {
ASSERT_EQ(br->buffer->getType(), ucxx::BufferType::Host);
ASSERT_EQ(br->buffer->getSize(), send.size() * sizeof(int));
std::vector<int> recvAbstract(reinterpret_cast<int*>(br->buffer->data()),
reinterpret_cast<int*>(br->buffer->data()) + send.size());
ASSERT_EQ(recvAbstract[0], send[0]);
const auto& recvConcretePtr = std::dynamic_pointer_cast<ucxx::HostBuffer>(br->buffer);
ASSERT_EQ(recvConcretePtr->getType(), ucxx::BufferType::Host);
ASSERT_EQ(recvConcretePtr->getSize(), send.size() * sizeof(int));
std::vector<int> recvConcrete(reinterpret_cast<int*>(recvConcretePtr->data()),
reinterpret_cast<int*>(recvConcretePtr->data()) + send.size());
ASSERT_EQ(recvConcrete[0], send[0]);
}
}
}
INSTANTIATE_TEST_SUITE_P(ProgressModes,
WorkerProgressTest,
Combine(Values(false),
Values(ProgressMode::Polling,
ProgressMode::Blocking,
ProgressMode::Wait,
ProgressMode::ThreadPolling,
ProgressMode::ThreadBlocking)));
INSTANTIATE_TEST_SUITE_P(
DelayedSubmission,
WorkerProgressTest,
Combine(Values(true), Values(ProgressMode::ThreadPolling, ProgressMode::ThreadBlocking)));
} // namespace
| 0 |
rapidsai_public_repos/ucxx/cpp/tests | rapidsai_public_repos/ucxx/cpp/tests/include/utils.h | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#pragma once
#include <functional>
#include <memory>
#include <vector>
#include <cuda_runtime_api.h>
#include <ucxx/api.h>
enum class ProgressMode {
Polling,
Blocking,
Wait,
ThreadPolling,
ThreadBlocking,
};
void createCudaContextCallback(void* callbackArg);
template <typename RequestType>
inline void waitRequests(std::shared_ptr<ucxx::Worker> worker,
const std::vector<std::shared_ptr<RequestType>>& requests,
const std::function<void()>& progressWorker)
{
auto remainingRequests = requests;
while (!remainingRequests.empty()) {
auto updatedRequests = std::exchange(remainingRequests, decltype(remainingRequests)());
for (auto const& r : updatedRequests) {
if (progressWorker) progressWorker();
if (!r->isCompleted())
remainingRequests.push_back(r);
else
r->checkError();
}
}
}
std::function<void()> getProgressFunction(std::shared_ptr<ucxx::Worker> worker,
ProgressMode progressMode);
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/benchmarks/perftest.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <unistd.h> // for getopt, optarg
#include <atomic>
#include <cassert>
#include <chrono>
#include <iostream>
#include <memory>
#include <numeric>
#include <string>
#include <thread>
#include <unordered_map>
#include <vector>
#include <ucxx/api.h>
#include <ucxx/utils/sockaddr.h>
#include <ucxx/utils/ucx.h>
enum class ProgressMode {
Polling,
Blocking,
Wait,
ThreadPolling,
ThreadBlocking,
};
enum transfer_type_t { SEND, RECV };
typedef std::unordered_map<transfer_type_t, std::vector<char>> BufferMap;
typedef std::unordered_map<transfer_type_t, ucp_tag_t> TagMap;
typedef std::shared_ptr<BufferMap> BufferMapPtr;
typedef std::shared_ptr<TagMap> TagMapPtr;
struct app_context_t {
ProgressMode progress_mode = ProgressMode::Blocking;
const char* server_addr = NULL;
uint16_t listener_port = 12345;
size_t message_size = 8;
size_t n_iter = 100;
size_t warmup_iter = 3;
bool reuse_alloc = false;
bool verify_results = false;
};
class ListenerContext {
private:
std::shared_ptr<ucxx::Worker> _worker{nullptr};
std::shared_ptr<ucxx::Endpoint> _endpoint{nullptr};
std::shared_ptr<ucxx::Listener> _listener{nullptr};
std::atomic<bool> _isAvailable{true};
public:
explicit ListenerContext(std::shared_ptr<ucxx::Worker> worker) : _worker{worker} {}
~ListenerContext() { releaseEndpoint(); }
void setListener(std::shared_ptr<ucxx::Listener> listener) { _listener = listener; }
std::shared_ptr<ucxx::Listener> getListener() { return _listener; }
std::shared_ptr<ucxx::Endpoint> getEndpoint() { return _endpoint; }
bool isAvailable() const { return _isAvailable; }
void createEndpointFromConnRequest(ucp_conn_request_h conn_request)
{
if (!isAvailable()) throw std::runtime_error("Listener context already has an endpoint");
static bool endpoint_error_handling = true;
_endpoint = _listener->createEndpointFromConnRequest(conn_request, endpoint_error_handling);
_isAvailable = false;
}
void releaseEndpoint()
{
_endpoint.reset();
_isAvailable = true;
}
};
static void listener_cb(ucp_conn_request_h conn_request, void* arg)
{
char ip_str[INET6_ADDRSTRLEN];
char port_str[INET6_ADDRSTRLEN];
ucp_conn_request_attr_t attr{};
ListenerContext* listener_ctx = reinterpret_cast<ListenerContext*>(arg);
attr.field_mask = UCP_CONN_REQUEST_ATTR_FIELD_CLIENT_ADDR;
ucxx::utils::ucsErrorThrow(ucp_conn_request_query(conn_request, &attr));
ucxx::utils::sockaddr_get_ip_port_str(&attr.client_address, ip_str, port_str, INET6_ADDRSTRLEN);
std::cout << "Server received a connection request from client at address " << ip_str << ":"
<< port_str << std::endl;
if (listener_ctx->isAvailable()) {
listener_ctx->createEndpointFromConnRequest(conn_request);
} else {
// The server is already handling a connection request from a client,
// reject this new one
std::cout << "Rejecting a connection request from " << ip_str << ":" << port_str << "."
<< std::endl
<< "Only one client at a time is supported." << std::endl;
ucxx::utils::ucsErrorThrow(
ucp_listener_reject(listener_ctx->getListener()->getHandle(), conn_request));
}
}
static void printUsage()
{
std::cerr << " basic client/server example" << std::endl;
std::cerr << std::endl;
std::cerr << "Usage: basic [server-hostname] [options]" << std::endl;
std::cerr << std::endl;
std::cerr << "Parameters are:" << std::endl;
std::cerr << " -m progress mode to use, valid values are: 'polling', 'blocking',"
<< std::endl;
std::cerr << " 'thread-polling' and 'thread-blocking' (default: 'blocking')"
<< std::endl;
std::cerr << " -t use thread progress mode (disabled)" << std::endl;
std::cerr << " -p <port> port number to listen at (12345)" << std::endl;
std::cerr << " -s <bytes> message size (8)" << std::endl;
std::cerr << " -n <int> number of iterations to run (100)" << std::endl;
std::cerr << " -r reuse memory allocation (disabled)" << std::endl;
std::cerr << " -v verify results (disabled)" << std::endl;
std::cerr << " -w <int> number of warmup iterations to run (3)" << std::endl;
std::cerr << " -h print this help" << std::endl;
std::cerr << std::endl;
}
ucs_status_t parseCommand(app_context_t* app_context, int argc, char* const argv[])
{
optind = 1;
int c;
while ((c = getopt(argc, argv, "m:p:s:w:n:rvh")) != -1) {
switch (c) {
case 'm':
if (strcmp(optarg, "blocking") == 0) {
app_context->progress_mode = ProgressMode::Blocking;
break;
} else if (strcmp(optarg, "polling") == 0) {
app_context->progress_mode = ProgressMode::Polling;
break;
} else if (strcmp(optarg, "thread-blocking") == 0) {
app_context->progress_mode = ProgressMode::ThreadBlocking;
break;
} else if (strcmp(optarg, "thread-polling") == 0) {
app_context->progress_mode = ProgressMode::ThreadPolling;
break;
} else if (strcmp(optarg, "wait") == 0) {
app_context->progress_mode = ProgressMode::Wait;
break;
} else {
std::cerr << "Invalid progress mode: " << optarg << std::endl;
return UCS_ERR_INVALID_PARAM;
}
case 'p':
app_context->listener_port = atoi(optarg);
if (app_context->listener_port <= 0) {
std::cerr << "Wrong listener port: " << app_context->listener_port << std::endl;
return UCS_ERR_INVALID_PARAM;
}
break;
case 's':
app_context->message_size = atoi(optarg);
if (app_context->message_size <= 0) {
std::cerr << "Wrong message size: " << app_context->message_size << std::endl;
return UCS_ERR_INVALID_PARAM;
}
break;
case 'w':
app_context->warmup_iter = atoi(optarg);
if (app_context->warmup_iter <= 0) {
std::cerr << "Wrong number of warmup iterations: " << app_context->warmup_iter
<< std::endl;
return UCS_ERR_INVALID_PARAM;
}
break;
case 'n':
app_context->n_iter = atoi(optarg);
if (app_context->n_iter <= 0) {
std::cerr << "Wrong number of iterations: " << app_context->n_iter << std::endl;
return UCS_ERR_INVALID_PARAM;
}
break;
case 'r': app_context->reuse_alloc = true; break;
case 'v': app_context->verify_results = true; break;
case 'h':
default: printUsage(); return UCS_ERR_INVALID_PARAM;
}
}
if (optind < argc) { app_context->server_addr = argv[optind]; }
return UCS_OK;
}
std::function<void()> getProgressFunction(std::shared_ptr<ucxx::Worker> worker,
ProgressMode progressMode)
{
switch (progressMode) {
case ProgressMode::Polling: return std::bind(std::mem_fn(&ucxx::Worker::progress), worker);
case ProgressMode::Blocking:
return std::bind(std::mem_fn(&ucxx::Worker::progressWorkerEvent), worker, -1);
case ProgressMode::Wait: return std::bind(std::mem_fn(&ucxx::Worker::waitProgress), worker);
default: return []() {};
}
}
void waitRequests(ProgressMode progressMode,
std::shared_ptr<ucxx::Worker> worker,
const std::vector<std::shared_ptr<ucxx::Request>>& requests)
{
auto progress = getProgressFunction(worker, progressMode);
// Wait until all messages are completed
for (auto& r : requests) {
while (!r->isCompleted())
progress();
r->checkError();
}
}
std::string parseTime(size_t countNs)
{
if (countNs < 1e3)
return std::to_string(countNs) + std::string("ns");
else if (countNs < 1e6)
return std::to_string(countNs / 1e3) + std::string("us");
else if (countNs < 1e9)
return std::to_string(countNs / 1e6) + std::string("ms");
else
return std::to_string(countNs / 1e9) + std::string("s");
}
std::string parseBandwidth(size_t totalBytes, size_t countNs)
{
double bw = totalBytes / (countNs / 1e9);
if (bw < 1024)
return std::to_string(bw) + std::string("B/s");
else if (bw < (1024 * 1024))
return std::to_string(bw / 1024) + std::string("KB/s");
else if (bw < (1024 * 1024 * 1024))
return std::to_string(bw / (1024 * 1024)) + std::string("MB/s");
else
return std::to_string(bw / (1024 * 1024 * 1024)) + std::string("GB/s");
}
BufferMapPtr allocateTransferBuffers(size_t message_size)
{
return std::make_shared<BufferMap>(BufferMap{{SEND, std::vector<char>(message_size, 0xaa)},
{RECV, std::vector<char>(message_size)}});
}
auto doTransfer(const app_context_t& app_context,
std::shared_ptr<ucxx::Worker> worker,
std::shared_ptr<ucxx::Endpoint> endpoint,
TagMapPtr tagMap,
BufferMapPtr bufferMapReuse)
{
BufferMapPtr localBufferMap;
if (!app_context.reuse_alloc) localBufferMap = allocateTransferBuffers(app_context.message_size);
BufferMapPtr bufferMap = app_context.reuse_alloc ? bufferMapReuse : localBufferMap;
auto start = std::chrono::high_resolution_clock::now();
std::vector<std::shared_ptr<ucxx::Request>> requests = {
endpoint->tagSend((*bufferMap)[SEND].data(), app_context.message_size, (*tagMap)[SEND]),
endpoint->tagRecv((*bufferMap)[RECV].data(), app_context.message_size, (*tagMap)[RECV])};
// Wait for requests and clear requests
waitRequests(app_context.progress_mode, worker, requests);
auto stop = std::chrono::high_resolution_clock::now();
if (app_context.verify_results) {
for (size_t j = 0; j < (*bufferMap)[SEND].size(); ++j)
assert((*bufferMap)[RECV][j] == (*bufferMap)[RECV][j]);
}
return std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
}
int main(int argc, char** argv)
{
app_context_t app_context;
if (parseCommand(&app_context, argc, argv) != UCS_OK) return -1;
// Setup: create UCP context, worker, listener and client endpoint.
auto context = ucxx::createContext({}, ucxx::Context::defaultFeatureFlags);
auto worker = context->createWorker();
bool is_server = app_context.server_addr == NULL;
auto tagMap = std::make_shared<TagMap>(TagMap{
{SEND, is_server ? 0 : 1},
{RECV, is_server ? 1 : 0},
});
std::shared_ptr<ListenerContext> listener_ctx;
std::shared_ptr<ucxx::Endpoint> endpoint;
std::shared_ptr<ucxx::Listener> listener;
if (is_server) {
listener_ctx = std::make_unique<ListenerContext>(worker);
listener = worker->createListener(app_context.listener_port, listener_cb, listener_ctx.get());
listener_ctx->setListener(listener);
}
// Initialize worker progress
if (app_context.progress_mode == ProgressMode::Blocking)
worker->initBlockingProgressMode();
else if (app_context.progress_mode == ProgressMode::ThreadBlocking)
worker->startProgressThread(false);
else if (app_context.progress_mode == ProgressMode::ThreadPolling)
worker->startProgressThread(true);
auto progress = getProgressFunction(worker, app_context.progress_mode);
// Block until client connects
while (is_server && listener_ctx->isAvailable())
progress();
if (is_server)
endpoint = listener_ctx->getEndpoint();
else
endpoint =
worker->createEndpointFromHostname(app_context.server_addr, app_context.listener_port, true);
std::vector<std::shared_ptr<ucxx::Request>> requests;
// Allocate wireup buffers
auto wireupBufferMap = std::make_shared<BufferMap>(
BufferMap{{SEND, std::vector<char>{1, 2, 3}}, {RECV, std::vector<char>(3, 0)}});
// Schedule small wireup messages to let UCX identify capabilities between endpoints
requests.push_back(endpoint->tagSend((*wireupBufferMap)[SEND].data(),
(*wireupBufferMap)[SEND].size() * sizeof(int),
(*tagMap)[SEND]));
requests.push_back(endpoint->tagRecv((*wireupBufferMap)[RECV].data(),
(*wireupBufferMap)[RECV].size() * sizeof(int),
(*tagMap)[RECV]));
// Wait for wireup requests and clear requests
waitRequests(app_context.progress_mode, worker, requests);
requests.clear();
// Verify wireup result
for (size_t i = 0; i < (*wireupBufferMap)[SEND].size(); ++i)
assert((*wireupBufferMap)[RECV][i] == (*wireupBufferMap)[SEND][i]);
BufferMapPtr bufferMapReuse;
if (app_context.reuse_alloc) bufferMapReuse = allocateTransferBuffers(app_context.message_size);
// Warmup
for (size_t n = 0; n < app_context.warmup_iter; ++n)
doTransfer(app_context, worker, endpoint, tagMap, bufferMapReuse);
// Schedule send and recv messages on different tags and different ordering
size_t total_duration_ns = 0;
for (size_t n = 0; n < app_context.n_iter; ++n) {
auto duration_ns = doTransfer(app_context, worker, endpoint, tagMap, bufferMapReuse);
total_duration_ns += duration_ns;
auto elapsed = parseTime(duration_ns);
auto bandwidth = parseBandwidth(app_context.message_size * 2, duration_ns);
if (!is_server)
std::cout << "Elapsed, bandwidth: " << elapsed << ", " << bandwidth << std::endl;
}
auto total_elapsed = parseTime(total_duration_ns);
auto total_bandwidth =
parseBandwidth(app_context.n_iter * app_context.message_size * 2, total_duration_ns);
if (!is_server)
std::cout << "Total elapsed, bandwidth: " << total_elapsed << ", " << total_bandwidth
<< std::endl;
// Stop progress thread
if (app_context.progress_mode == ProgressMode::ThreadBlocking ||
app_context.progress_mode == ProgressMode::ThreadPolling)
worker->stopProgressThread();
return 0;
}
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/benchmarks/CMakeLists.txt | # ======================================================================================================
# SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD 3-Clause License
# ======================================================================================================
find_package(Threads REQUIRED)
# ##################################################################################################
# * compiler function -----------------------------------------------------------------------------
add_custom_command(
OUTPUT UCXX_BENCHMARKS
COMMAND echo Running benchmarks
COMMAND mkdir -p results
VERBATIM
COMMENT "Running ucxx benchmarks."
USES_TERMINAL
)
# This function takes in a benchmark name and benchmark source and handles setting all of the
# associated properties and linking to build the benchmark
function(ConfigureBench CMAKE_BENCH_NAME)
add_executable(${CMAKE_BENCH_NAME} ${ARGN})
set_target_properties(
${CMAKE_BENCH_NAME}
PROPERTIES RUNTIME_OUTPUT_DIRECTORY "$<BUILD_INTERFACE:${UCXX_BINARY_DIR}/benchmarks>"
INSTALL_RPATH "\$ORIGIN/../../../lib"
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
)
target_link_libraries(
${CMAKE_BENCH_NAME} PRIVATE ucxx
$<TARGET_NAME_IF_EXISTS:conda_env>
)
add_custom_command(
OUTPUT UCXX_BENCHMARKS
# COMMAND ${CMAKE_BENCH_NAME} --benchmark_out_format=json
# --benchmark_out=results/${CMAKE_BENCH_NAME}.json
COMMAND ${CMAKE_BENCH_NAME}
APPEND
COMMENT "Adding ${CMAKE_BENCH_NAME}"
)
install(
TARGETS ${CMAKE_BENCH_NAME}
COMPONENT benchmarks
DESTINATION bin/benchmarks/libucxx
EXCLUDE_FROM_ALL
)
endfunction()
# ##################################################################################################
# * perftest benchmarks ----------------------------------------------------------------------------
ConfigureBench(ucxx_perftest perftest.cpp)
add_custom_target(
run_benchmarks
DEPENDS UCXX_BENCHMARKS
COMMENT "Custom command for running ucxx benchmarks."
)
| 0 |
rapidsai_public_repos/ucxx/cpp/cmake | rapidsai_public_repos/ucxx/cpp/cmake/thirdparty/get_gtest.cmake | # =============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# This function finds gtest and sets any additional necessary environment variables.
function(find_and_configure_gtest)
include(${rapids-cmake-dir}/cpm/gtest.cmake)
# Find or install GoogleTest
rapids_cpm_gtest(BUILD_EXPORT_SET ucxx-testing-exports INSTALL_EXPORT_SET ucxx-testing-exports)
if(GTest_ADDED)
rapids_export(
BUILD GTest
VERSION ${GTest_VERSION}
EXPORT_SET GTestTargets
GLOBAL_TARGETS gtest gmock gtest_main gmock_main
NAMESPACE GTest::
)
include("${rapids-cmake-dir}/export/find_package_root.cmake")
rapids_export_find_package_root(
BUILD GTest [=[${CMAKE_CURRENT_LIST_DIR}]=] EXPORT_SET ucxx-testing-exports
)
endif()
endfunction()
find_and_configure_gtest()
| 0 |
rapidsai_public_repos/ucxx/cpp/cmake | rapidsai_public_repos/ucxx/cpp/cmake/thirdparty/get_rmm.cmake | # =============================================================================
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# This function finds rmm and sets any additional necessary environment variables.
function(find_and_configure_rmm)
include(${rapids-cmake-dir}/cpm/rmm.cmake)
# Find or install RMM
rapids_cpm_rmm(BUILD_EXPORT_SET ucxx-exports INSTALL_EXPORT_SET ucxx-exports)
endfunction()
find_and_configure_rmm()
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/endpoint.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <memory>
#include <sstream>
#include <string>
#include <thread>
#include <utility>
#include <vector>
#include <ucp/api/ucp.h>
#include <ucxx/component.h>
#include <ucxx/endpoint.h>
#include <ucxx/exception.h>
#include <ucxx/listener.h>
#include <ucxx/request_am.h>
#include <ucxx/request_stream.h>
#include <ucxx/request_tag.h>
#include <ucxx/request_tag_multi.h>
#include <ucxx/typedefs.h>
#include <ucxx/utils/callback_notifier.h>
#include <ucxx/utils/sockaddr.h>
#include <ucxx/utils/ucx.h>
#include <ucxx/worker.h>
namespace ucxx {
static std::shared_ptr<Worker> getWorker(std::shared_ptr<Component> workerOrListener)
{
auto worker = std::dynamic_pointer_cast<Worker>(workerOrListener);
if (worker == nullptr) {
auto listener = std::dynamic_pointer_cast<Listener>(workerOrListener);
if (listener == nullptr)
throw std::invalid_argument(
"Invalid object, it's not a shared_ptr to either ucxx::Worker nor ucxx::Listener");
worker = std::dynamic_pointer_cast<Worker>(listener->getParent());
}
return worker;
}
Endpoint::Endpoint(std::shared_ptr<Component> workerOrListener,
ucp_ep_params_t* params,
bool endpointErrorHandling)
: _endpointErrorHandling{endpointErrorHandling}
{
auto worker = ::ucxx::getWorker(workerOrListener);
if (worker == nullptr || worker->getHandle() == nullptr)
throw ucxx::Error("Worker not initialized");
setParent(workerOrListener);
_callbackData = std::make_unique<ErrorCallbackData>(
(ErrorCallbackData){.status = UCS_OK, .inflightRequests = _inflightRequests, .worker = worker});
params->err_mode =
(endpointErrorHandling ? UCP_ERR_HANDLING_MODE_PEER : UCP_ERR_HANDLING_MODE_NONE);
params->err_handler.cb = Endpoint::errorCallback;
params->err_handler.arg = _callbackData.get();
if (worker->isProgressThreadRunning()) {
ucs_status_t status = UCS_INPROGRESS;
utils::CallbackNotifier callbackNotifier{};
auto worker = ::ucxx::getWorker(_parent);
worker->registerGenericPre([this, ¶ms, &callbackNotifier, &status]() {
auto worker = ::ucxx::getWorker(_parent);
status = ucp_ep_create(worker->getHandle(), params, &_handle);
callbackNotifier.set();
});
callbackNotifier.wait();
utils::ucsErrorThrow(status);
} else {
utils::ucsErrorThrow(ucp_ep_create(worker->getHandle(), params, &_handle));
}
ucxx_trace("Endpoint created: %p, UCP handle: %p, parent: %p, endpointErrorHandling: %d",
this,
_handle,
_parent.get(),
endpointErrorHandling);
}
std::shared_ptr<Endpoint> createEndpointFromHostname(std::shared_ptr<Worker> worker,
std::string ipAddress,
uint16_t port,
bool endpointErrorHandling)
{
if (worker == nullptr || worker->getHandle() == nullptr)
throw ucxx::Error("Worker not initialized");
ucp_ep_params_t params = {.field_mask = UCP_EP_PARAM_FIELD_FLAGS | UCP_EP_PARAM_FIELD_SOCK_ADDR |
UCP_EP_PARAM_FIELD_ERR_HANDLING_MODE |
UCP_EP_PARAM_FIELD_ERR_HANDLER,
.flags = UCP_EP_PARAMS_FLAGS_CLIENT_SERVER};
auto info = ucxx::utils::get_addrinfo(ipAddress.c_str(), port);
params.sockaddr.addrlen = info->ai_addrlen;
params.sockaddr.addr = info->ai_addr;
return std::shared_ptr<Endpoint>(new Endpoint(worker, ¶ms, endpointErrorHandling));
}
std::shared_ptr<Endpoint> createEndpointFromConnRequest(std::shared_ptr<Listener> listener,
ucp_conn_request_h connRequest,
bool endpointErrorHandling)
{
if (listener == nullptr || listener->getHandle() == nullptr)
throw ucxx::Error("Worker not initialized");
ucp_ep_params_t params = {
.field_mask = UCP_EP_PARAM_FIELD_FLAGS | UCP_EP_PARAM_FIELD_CONN_REQUEST |
UCP_EP_PARAM_FIELD_ERR_HANDLING_MODE | UCP_EP_PARAM_FIELD_ERR_HANDLER,
.flags = UCP_EP_PARAMS_FLAGS_NO_LOOPBACK,
.conn_request = connRequest};
return std::shared_ptr<Endpoint>(new Endpoint(listener, ¶ms, endpointErrorHandling));
}
std::shared_ptr<Endpoint> createEndpointFromWorkerAddress(std::shared_ptr<Worker> worker,
std::shared_ptr<Address> address,
bool endpointErrorHandling)
{
if (worker == nullptr || worker->getHandle() == nullptr)
throw ucxx::Error("Worker not initialized");
if (address == nullptr || address->getHandle() == nullptr || address->getLength() == 0)
throw ucxx::Error("Address not initialized");
ucp_ep_params_t params = {.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS |
UCP_EP_PARAM_FIELD_ERR_HANDLING_MODE |
UCP_EP_PARAM_FIELD_ERR_HANDLER,
.address = address->getHandle()};
return std::shared_ptr<Endpoint>(new Endpoint(worker, ¶ms, endpointErrorHandling));
}
Endpoint::~Endpoint()
{
close(10000000000 /* 10s */);
ucxx_trace("Endpoint destroyed: %p, UCP handle: %p", this, _originalHandle);
}
void Endpoint::close(uint64_t period, uint64_t maxAttempts)
{
if (_handle == nullptr) return;
size_t canceled = cancelInflightRequests(3000000000 /* 3s */, 3);
ucxx_debug("Endpoint %p canceled %lu requests", _handle, canceled);
// Close the endpoint
unsigned closeMode = UCP_EP_CLOSE_MODE_FORCE;
if (_endpointErrorHandling && _callbackData->status != UCS_OK) {
// We force close endpoint if endpoint error handling is enabled and
// the endpoint status is not UCS_OK
closeMode = UCP_EP_CLOSE_MODE_FORCE;
}
auto worker = ::ucxx::getWorker(_parent);
ucs_status_ptr_t status;
if (worker->isProgressThreadRunning()) {
bool closeSuccess = false;
for (uint64_t i = 0; i < maxAttempts && !closeSuccess; ++i) {
utils::CallbackNotifier callbackNotifierPre{};
worker->registerGenericPre([this, &callbackNotifierPre, &status, closeMode]() {
status = ucp_ep_close_nb(_handle, closeMode);
callbackNotifierPre.set();
});
if (!callbackNotifierPre.wait(period)) continue;
while (UCS_PTR_IS_PTR(status)) {
utils::CallbackNotifier callbackNotifierPost{};
worker->registerGenericPost([this, &callbackNotifierPost, &status]() {
ucs_status_t s = ucp_request_check_status(status);
if (UCS_PTR_STATUS(s) != UCS_INPROGRESS) {
ucp_request_free(status);
_callbackData->status = UCS_PTR_STATUS(s);
if (UCS_PTR_STATUS(status) != UCS_OK) {
ucxx_error("Error while closing endpoint: %s",
ucs_status_string(UCS_PTR_STATUS(status)));
}
}
callbackNotifierPost.set();
});
if (!callbackNotifierPost.wait(period)) continue;
}
closeSuccess = true;
}
if (!closeSuccess) {
_callbackData->status = UCS_ERR_ENDPOINT_TIMEOUT;
ucxx_error("All attempts to close timed out on endpoint: %p, UCP handle: %p", this, _handle);
}
} else {
status = ucp_ep_close_nb(_handle, closeMode);
if (UCS_PTR_IS_PTR(status)) {
ucs_status_t s;
while ((s = ucp_request_check_status(status)) == UCS_INPROGRESS)
worker->progress();
ucp_request_free(status);
_callbackData->status = s;
} else if (UCS_PTR_STATUS(status) != UCS_OK) {
ucxx_error("Error while closing endpoint: %s", ucs_status_string(UCS_PTR_STATUS(status)));
}
}
ucxx_trace("Endpoint closed: %p, UCP handle: %p", this, _handle);
if (_callbackData->closeCallback) {
ucxx_debug("Calling user callback for endpoint %p", _handle);
_callbackData->closeCallback(_callbackData->closeCallbackArg);
_callbackData->closeCallback = nullptr;
_callbackData->closeCallbackArg = nullptr;
}
std::swap(_handle, _originalHandle);
}
ucp_ep_h Endpoint::getHandle() { return _handle; }
bool Endpoint::isAlive() const
{
if (!_endpointErrorHandling) return true;
return _callbackData->status == UCS_OK;
}
void Endpoint::raiseOnError()
{
ucs_status_t status = _callbackData->status;
if (status == UCS_OK || !_endpointErrorHandling) return;
std::string statusString{ucs_status_string(status)};
std::stringstream errorMsgStream;
errorMsgStream << "Endpoint " << std::hex << _handle << " error: " << statusString;
utils::ucsErrorThrow(status, errorMsgStream.str());
}
void Endpoint::setCloseCallback(std::function<void(void*)> closeCallback, void* closeCallbackArg)
{
_callbackData->closeCallback = closeCallback;
_callbackData->closeCallbackArg = closeCallbackArg;
}
std::shared_ptr<Request> Endpoint::registerInflightRequest(std::shared_ptr<Request> request)
{
if (!request->isCompleted()) _inflightRequests->insert(request);
/**
* If the endpoint errored while the request was being submitted, the error
* handler may have been called already and we need to register any new requests
* for cancelation, including the present one.
*/
if (_callbackData->status != UCS_OK)
_callbackData->worker->scheduleRequestCancel(_inflightRequests);
return request;
}
void Endpoint::removeInflightRequest(const Request* const request)
{
_inflightRequests->remove(request);
}
size_t Endpoint::cancelInflightRequests(uint64_t period, uint64_t maxAttempts)
{
auto worker = ::ucxx::getWorker(this->_parent);
size_t canceled = 0;
if (std::this_thread::get_id() == worker->getProgressThreadId()) {
canceled = _inflightRequests->cancelAll();
worker->progress();
} else if (worker->isProgressThreadRunning()) {
bool cancelSuccess = false;
for (uint64_t i = 0; i < maxAttempts && !cancelSuccess; ++i) {
utils::CallbackNotifier callbackNotifierPre{};
worker->registerGenericPre([this, &callbackNotifierPre, &canceled]() {
canceled = _inflightRequests->cancelAll();
callbackNotifierPre.set();
});
if (!callbackNotifierPre.wait(period)) continue;
utils::CallbackNotifier callbackNotifierPost{};
worker->registerGenericPost([&callbackNotifierPost]() { callbackNotifierPost.set(); });
if (!callbackNotifierPost.wait(period)) continue;
cancelSuccess = true;
}
if (!cancelSuccess)
ucxx_error("All attempts to cancel inflight requests failed on endpoint: %p, UCP handle: %p",
this,
_handle);
} else {
canceled = _inflightRequests->cancelAll();
}
return canceled;
}
std::shared_ptr<Request> Endpoint::amSend(void* buffer,
size_t length,
ucs_memory_type_t memoryType,
const bool enablePythonFuture,
RequestCallbackUserFunction callbackFunction,
RequestCallbackUserData callbackData)
{
auto endpoint = std::dynamic_pointer_cast<Endpoint>(shared_from_this());
return registerInflightRequest(createRequestAmSend(
endpoint, buffer, length, memoryType, enablePythonFuture, callbackFunction, callbackData));
}
std::shared_ptr<Request> Endpoint::amRecv(const bool enablePythonFuture,
RequestCallbackUserFunction callbackFunction,
RequestCallbackUserData callbackData)
{
auto endpoint = std::dynamic_pointer_cast<Endpoint>(shared_from_this());
return registerInflightRequest(
createRequestAmRecv(endpoint, enablePythonFuture, callbackFunction, callbackData));
}
std::shared_ptr<Request> Endpoint::streamSend(void* buffer,
size_t length,
const bool enablePythonFuture)
{
auto endpoint = std::dynamic_pointer_cast<Endpoint>(shared_from_this());
return registerInflightRequest(
createRequestStream(endpoint, true, buffer, length, enablePythonFuture));
}
std::shared_ptr<Request> Endpoint::streamRecv(void* buffer,
size_t length,
const bool enablePythonFuture)
{
auto endpoint = std::dynamic_pointer_cast<Endpoint>(shared_from_this());
return registerInflightRequest(
createRequestStream(endpoint, false, buffer, length, enablePythonFuture));
}
std::shared_ptr<Request> Endpoint::tagSend(void* buffer,
size_t length,
ucp_tag_t tag,
const bool enablePythonFuture,
RequestCallbackUserFunction callbackFunction,
RequestCallbackUserData callbackData)
{
auto endpoint = std::dynamic_pointer_cast<Endpoint>(shared_from_this());
return registerInflightRequest(createRequestTag(
endpoint, true, buffer, length, tag, enablePythonFuture, callbackFunction, callbackData));
}
std::shared_ptr<Request> Endpoint::tagRecv(void* buffer,
size_t length,
ucp_tag_t tag,
const bool enablePythonFuture,
RequestCallbackUserFunction callbackFunction,
RequestCallbackUserData callbackData)
{
auto endpoint = std::dynamic_pointer_cast<Endpoint>(shared_from_this());
return registerInflightRequest(createRequestTag(
endpoint, false, buffer, length, tag, enablePythonFuture, callbackFunction, callbackData));
}
std::shared_ptr<Request> Endpoint::tagMultiSend(const std::vector<void*>& buffer,
const std::vector<size_t>& size,
const std::vector<int>& isCUDA,
const ucp_tag_t tag,
const bool enablePythonFuture)
{
auto endpoint = std::dynamic_pointer_cast<Endpoint>(shared_from_this());
return registerInflightRequest(
createRequestTagMultiSend(endpoint, buffer, size, isCUDA, tag, enablePythonFuture));
}
std::shared_ptr<Request> Endpoint::tagMultiRecv(const ucp_tag_t tag, const bool enablePythonFuture)
{
auto endpoint = std::dynamic_pointer_cast<Endpoint>(shared_from_this());
return registerInflightRequest(createRequestTagMultiRecv(endpoint, tag, enablePythonFuture));
}
std::shared_ptr<Worker> Endpoint::getWorker() { return ::ucxx::getWorker(_parent); }
void Endpoint::errorCallback(void* arg, ucp_ep_h ep, ucs_status_t status)
{
ErrorCallbackData* data = reinterpret_cast<ErrorCallbackData*>(arg);
data->status = status;
data->worker->scheduleRequestCancel(data->inflightRequests);
if (data->closeCallback) {
ucxx_debug("Calling user callback for endpoint %p", ep);
data->closeCallback(data->closeCallbackArg);
data->closeCallback = nullptr;
data->closeCallbackArg = nullptr;
}
// Connection reset and timeout often represent just a normal remote
// endpoint disconnect, log only in debug mode.
if (status == UCS_ERR_CONNECTION_RESET || status == UCS_ERR_ENDPOINT_TIMEOUT)
ucxx_debug("Error callback for endpoint %p called with status %d: %s",
ep,
status,
ucs_status_string(status));
else
ucxx_error("Error callback for endpoint %p called with status %d: %s",
ep,
status,
ucs_status_string(status));
}
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/address.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <memory>
#include <string>
#include <ucxx/address.h>
#include <ucxx/utils/ucx.h>
namespace ucxx {
Address::Address(std::shared_ptr<Worker> worker, ucp_address_t* address, size_t length)
: _handle{address}, _length{length}
{
if (worker != nullptr) setParent(worker);
}
Address::~Address()
{
if (_handle == nullptr) return;
auto worker = std::dynamic_pointer_cast<Worker>(getParent());
if (worker == nullptr) {
std::free(_handle);
} else {
ucp_worker_release_address(worker->getHandle(), _handle);
}
}
std::shared_ptr<Address> createAddressFromWorker(std::shared_ptr<Worker> worker)
{
ucp_worker_h ucp_worker = worker->getHandle();
ucp_address_t* address{nullptr};
size_t length = 0;
utils::ucsErrorThrow(ucp_worker_get_address(ucp_worker, &address, &length));
return std::shared_ptr<Address>(new Address(worker, address, length));
}
std::shared_ptr<Address> createAddressFromString(std::string addressString)
{
char* address = new char[addressString.length()];
size_t length = addressString.length();
memcpy(reinterpret_cast<char*>(address), addressString.c_str(), length);
return std::shared_ptr<Address>(
new Address(nullptr, reinterpret_cast<ucp_address_t*>(address), length));
}
ucp_address_t* Address::getHandle() const { return _handle; }
size_t Address::getLength() const { return _length; }
std::string Address::getString() const
{
return std::string{reinterpret_cast<char*>(_handle), _length};
}
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/inflight_requests.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <memory>
#include <ucxx/inflight_requests.h>
#include <ucxx/log.h>
#include <ucxx/request.h>
namespace ucxx {
InflightRequests::~InflightRequests() { cancelAll(); }
size_t InflightRequests::size() { return _inflightRequests->size(); }
void InflightRequests::insert(std::shared_ptr<Request> request)
{
std::lock_guard<std::mutex> lock(_mutex);
_inflightRequests->insert({request.get(), request});
}
void InflightRequests::merge(InflightRequestsMapPtr inflightRequestsMap)
{
std::lock_guard<std::mutex> lock(_mutex);
_inflightRequests->merge(*inflightRequestsMap);
}
void InflightRequests::remove(const Request* const request)
{
do {
int result = std::try_lock(_cancelMutex, _mutex);
/**
* `result` can be have one of three values:
* -1 (both arguments were locked): Remove request and return.
* 0 (failed to lock argument 0): Failed acquiring `_cancelMutex`, cancel in
* progress, nothing to do but return. The method was
* called during execution of `cancelAll()` and the
* `Request*` callback was invoked.
* 1 (failed to lock argument 1): Only `_cancelMutex` was acquired, another
* operation in progress, retry.
*/
if (result == 0) {
return;
} else if (result == -1) {
auto search = _inflightRequests->find(request);
decltype(search->second) tmpRequest;
if (search != _inflightRequests->end()) {
/**
* If this is the last request to hold `std::shared_ptr<ucxx::Endpoint>` erasing it
* may cause the `ucxx::Endpoint`s destructor and subsequently the `close()` method
* to be called which will in turn call `cancelAll()` and attempt to take the
* mutexes. For this reason we should make a temporary copy of the request being
* erased from `_inflightRequests` to allow unlocking the mutexes and only then
* destroy the object upon this method's return.
*/
tmpRequest = search->second;
_inflightRequests->erase(search);
}
_cancelMutex.unlock();
_mutex.unlock();
return;
}
} while (true);
}
size_t InflightRequests::cancelAll()
{
decltype(_inflightRequests) toCancel;
size_t total;
{
std::scoped_lock lock{_cancelMutex, _mutex};
total = _inflightRequests->size();
// Fast path when no requests have been registered or the map has been
// previously released.
if (total == 0) return 0;
toCancel = std::exchange(_inflightRequests, std::make_unique<InflightRequestsMap>());
}
ucxx_debug("Canceling %lu requests", total);
for (auto& r : *toCancel) {
auto request = r.second;
if (request != nullptr) { request->cancel(); }
}
toCancel->clear();
return total;
}
InflightRequestsMapPtr InflightRequests::release()
{
std::lock_guard<std::mutex> lock(_mutex);
return std::exchange(_inflightRequests, std::make_unique<InflightRequestsMap>());
}
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/component.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <memory>
#include <ucxx/component.h>
namespace ucxx {
Component::~Component() {}
// Called from child's constructor
void Component::setParent(std::shared_ptr<Component> parent) { _parent = parent; }
std::shared_ptr<Component> Component::getParent() const { return _parent; }
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/buffer.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <iterator>
#include <memory>
#include <utility>
#include <ucxx/buffer.h>
#if UCXX_ENABLE_RMM
#include <rmm/device_buffer.hpp>
#endif
namespace ucxx {
Buffer::Buffer(const BufferType bufferType, const size_t size)
: _bufferType{bufferType}, _size{size}
{
}
Buffer::~Buffer() {}
BufferType Buffer::getType() const noexcept { return _bufferType; }
size_t Buffer::getSize() const noexcept { return _size; }
HostBuffer::HostBuffer(const size_t size) : Buffer(BufferType::Host, size), _buffer{malloc(size)}
{
ucxx_trace_data("HostBuffer(%lu), _buffer: %p", size, _buffer);
}
HostBuffer::~HostBuffer()
{
if (_buffer) free(_buffer);
}
void* HostBuffer::release()
{
ucxx_trace_data("HostBuffer::release(), _buffer: %p", _buffer);
if (!_buffer) throw std::runtime_error("Invalid object or already released");
_bufferType = ucxx::BufferType::Invalid;
_size = 0;
return std::exchange(_buffer, nullptr);
}
void* HostBuffer::data()
{
ucxx_trace_data("HostBuffer::data(), _buffer: %p", _buffer);
if (!_buffer) throw std::runtime_error("Invalid object or already released");
return _buffer;
}
#if UCXX_ENABLE_RMM
RMMBuffer::RMMBuffer(const size_t size)
: Buffer(BufferType::RMM, size),
_buffer{std::make_unique<rmm::device_buffer>(size, rmm::cuda_stream_default)}
{
ucxx_trace_data("RMMBuffer(%lu), _buffer: %p", size, _buffer.get());
}
std::unique_ptr<rmm::device_buffer> RMMBuffer::release()
{
ucxx_trace_data("RMMBuffer::release(), _buffer: %p", _buffer.get());
if (!_buffer) throw std::runtime_error("Invalid object or already released");
_bufferType = ucxx::BufferType::Invalid;
_size = 0;
return std::move(_buffer);
}
void* RMMBuffer::data()
{
ucxx_trace_data("RMMBuffer::data(), _buffer: %p", _buffer.get());
if (!_buffer) throw std::runtime_error("Invalid object or already released");
return _buffer->data();
}
#endif
std::shared_ptr<Buffer> allocateBuffer(const BufferType bufferType, const size_t size)
{
#if UCXX_ENABLE_RMM
if (bufferType == BufferType::RMM)
return std::make_shared<RMMBuffer>(size);
else
#else
if (bufferType == BufferType::RMM)
throw std::runtime_error("RMM support not enabled, please compile with -DUCXX_ENABLE_RMM=1");
#endif
return std::make_shared<HostBuffer>(size);
}
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/request_tag.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <cstdio>
#include <memory>
#include <string>
#include <ucp/api/ucp.h>
#include <ucxx/delayed_submission.h>
#include <ucxx/request_tag.h>
namespace ucxx {
std::shared_ptr<RequestTag> createRequestTag(std::shared_ptr<Component> endpointOrWorker,
bool send,
void* buffer,
size_t length,
ucp_tag_t tag,
const bool enablePythonFuture = false,
RequestCallbackUserFunction callbackFunction = nullptr,
RequestCallbackUserData callbackData = nullptr)
{
auto req = std::shared_ptr<RequestTag>(new RequestTag(endpointOrWorker,
send,
buffer,
length,
tag,
enablePythonFuture,
callbackFunction,
callbackData));
// A delayed notification request is not populated immediately, instead it is
// delayed to allow the worker progress thread to set its status, and more
// importantly the Python future later on, so that we don't need the GIL here.
req->_worker->registerDelayedSubmission(
req, std::bind(std::mem_fn(&Request::populateDelayedSubmission), req.get()));
return req;
}
RequestTag::RequestTag(std::shared_ptr<Component> endpointOrWorker,
bool send,
void* buffer,
size_t length,
ucp_tag_t tag,
const bool enablePythonFuture,
RequestCallbackUserFunction callbackFunction,
RequestCallbackUserData callbackData)
: Request(endpointOrWorker,
std::make_shared<DelayedSubmission>(send, buffer, length, tag),
std::string(send ? "tagSend" : "tagRecv"),
enablePythonFuture),
_length(length)
{
if (send && _endpoint == nullptr)
throw ucxx::Error("An endpoint is required to send tag messages");
_callback = callbackFunction;
_callbackData = callbackData;
}
void RequestTag::callback(void* request, ucs_status_t status, const ucp_tag_recv_info_t* info)
{
// TODO: Decide on behavior. See https://github.com/rapidsai/ucxx/issues/104 .
// if (status != UCS_ERR_CANCELED && info->length != _length) {
// status = UCS_ERR_MESSAGE_TRUNCATED;
// const char* fmt = "length mismatch: %llu (got) != %llu (expected)";
// size_t len = std::snprintf(nullptr, 0, fmt, info->length, _length);
// _status_msg = std::string(len + 1, '\0'); // +1 for null terminator
// std::snprintf(_status_msg.data(), _status_msg.size(), fmt, info->length, _length);
// }
Request::callback(request, status);
}
void RequestTag::tagSendCallback(void* request, ucs_status_t status, void* arg)
{
Request* req = reinterpret_cast<Request*>(arg);
ucxx_trace_req_f(req->getOwnerString().c_str(), request, "tagSend", "tagSendCallback");
return req->callback(request, status);
}
void RequestTag::tagRecvCallback(void* request,
ucs_status_t status,
const ucp_tag_recv_info_t* info,
void* arg)
{
RequestTag* req = reinterpret_cast<RequestTag*>(arg);
ucxx_trace_req_f(req->getOwnerString().c_str(), request, "tagRecv", "tagRecvCallback");
return req->callback(request, status, info);
}
void RequestTag::request()
{
static const ucp_tag_t tagMask = -1;
ucp_request_param_t param = {.op_attr_mask = UCP_OP_ATTR_FIELD_CALLBACK |
UCP_OP_ATTR_FIELD_DATATYPE |
UCP_OP_ATTR_FIELD_USER_DATA,
.datatype = ucp_dt_make_contig(1),
.user_data = this};
void* request = nullptr;
if (_delayedSubmission->_send) {
param.cb.send = tagSendCallback;
request = ucp_tag_send_nbx(_endpoint->getHandle(),
_delayedSubmission->_buffer,
_delayedSubmission->_length,
_delayedSubmission->_tag,
¶m);
} else {
param.cb.recv = tagRecvCallback;
request = ucp_tag_recv_nbx(_worker->getHandle(),
_delayedSubmission->_buffer,
_delayedSubmission->_length,
_delayedSubmission->_tag,
tagMask,
¶m);
}
std::lock_guard<std::recursive_mutex> lock(_mutex);
_request = request;
}
void RequestTag::populateDelayedSubmission()
{
if (_delayedSubmission->_send && _endpoint->getHandle() == nullptr) {
ucxx_warn("Endpoint was closed before message could be sent");
Request::callback(this, UCS_ERR_CANCELED);
return;
} else if (!_delayedSubmission->_send && _worker->getHandle() == nullptr) {
ucxx_warn("Worker was closed before message could be received");
Request::callback(this, UCS_ERR_CANCELED);
return;
}
request();
if (_enablePythonFuture)
ucxx_trace_req_f(
_ownerString.c_str(),
_request,
_operationName.c_str(),
"tag 0x%lx, buffer %p, size %lu, future %p, future handle %p, populateDelayedSubmission",
_delayedSubmission->_tag,
_delayedSubmission->_buffer,
_delayedSubmission->_length,
_future.get(),
_future->getHandle());
else
ucxx_trace_req_f(_ownerString.c_str(),
_request,
_operationName.c_str(),
"tag 0x%lx, buffer %p, size %lu, populateDelayedSubmission",
_delayedSubmission->_tag,
_delayedSubmission->_buffer,
_delayedSubmission->_length);
process();
}
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/request_helper.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <memory>
#include <vector>
#include <ucxx/request.h>
namespace ucxx {
void waitSingleRequest(std::shared_ptr<Worker> worker, std::shared_ptr<Request> request)
{
while (!request->isCompleted())
worker->progress();
// while (!request->isCompleted());
request->checkError();
}
void waitRequests(std::shared_ptr<Worker> worker, std::vector<std::shared_ptr<Request>> requests)
{
for (auto& r : requests)
waitSingleRequest(worker, r);
}
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/request_am.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <cstdio>
#include <memory>
#include <sstream>
#include <string>
#include <ucp/api/ucp.h>
#include <ucxx/buffer.h>
#include <ucxx/delayed_submission.h>
#include <ucxx/internal/request_am.h>
#include <ucxx/request_am.h>
namespace ucxx {
std::shared_ptr<RequestAm> createRequestAmSend(
std::shared_ptr<Endpoint> endpoint,
void* buffer,
size_t length,
ucs_memory_type_t memoryType = UCS_MEMORY_TYPE_HOST,
const bool enablePythonFuture = false,
RequestCallbackUserFunction callbackFunction = nullptr,
RequestCallbackUserData callbackData = nullptr)
{
auto req = std::shared_ptr<RequestAm>(new RequestAm(
endpoint, buffer, length, memoryType, enablePythonFuture, callbackFunction, callbackData));
// A delayed notification request is not populated immediately, instead it is
// delayed to allow the worker progress thread to set its status, and more
// importantly the Python future later on, so that we don't need the GIL here.
req->_worker->registerDelayedSubmission(
req, std::bind(std::mem_fn(&Request::populateDelayedSubmission), req.get()));
return req;
}
std::shared_ptr<RequestAm> createRequestAmRecv(
std::shared_ptr<Endpoint> endpoint,
const bool enablePythonFuture = false,
RequestCallbackUserFunction callbackFunction = nullptr,
RequestCallbackUserData callbackData = nullptr)
{
auto worker = endpoint->getWorker();
auto createRequest = [endpoint, enablePythonFuture, callbackFunction, callbackData]() {
return std::shared_ptr<RequestAm>(
new RequestAm(endpoint, enablePythonFuture, callbackFunction, callbackData));
};
return worker->getAmRecv(endpoint->getHandle(), createRequest);
}
RequestAm::RequestAm(std::shared_ptr<Endpoint> endpoint,
void* buffer,
size_t length,
ucs_memory_type_t memoryType,
const bool enablePythonFuture,
RequestCallbackUserFunction callbackFunction,
RequestCallbackUserData callbackData)
: Request(endpoint,
std::make_shared<DelayedSubmission>(true, buffer, length, 0, memoryType),
std::string("amSend"),
enablePythonFuture)
{
_callback = callbackFunction;
_callbackData = callbackData;
}
RequestAm::RequestAm(std::shared_ptr<Component> endpointOrWorker,
const bool enablePythonFuture,
RequestCallbackUserFunction callbackFunction,
RequestCallbackUserData callbackData)
: Request(endpointOrWorker, nullptr, std::string("amRecv"), enablePythonFuture)
{
_callback = callbackFunction;
_callbackData = callbackData;
}
static void _amSendCallback(void* request, ucs_status_t status, void* user_data)
{
Request* req = reinterpret_cast<Request*>(user_data);
ucxx_trace_req_f(req->getOwnerString().c_str(), request, "amSend", "_amSendCallback");
req->callback(request, status);
}
static void _recvCompletedCallback(void* request,
ucs_status_t status,
size_t length,
void* user_data)
{
internal::RecvAmMessage* recvAmMessage = static_cast<internal::RecvAmMessage*>(user_data);
ucxx_trace_req_f(
recvAmMessage->_request->getOwnerString().c_str(), request, "amRecv", "amRecvCallback");
recvAmMessage->callback(request, status);
}
ucs_status_t RequestAm::recvCallback(void* arg,
const void* header,
size_t header_length,
void* data,
size_t length,
const ucp_am_recv_param_t* param)
{
internal::AmData* amData = static_cast<internal::AmData*>(arg);
auto worker = amData->_worker.lock();
auto& ownerString = amData->_ownerString;
auto& recvPool = amData->_recvPool;
auto& recvWait = amData->_recvWait;
if ((param->recv_attr & UCP_AM_RECV_ATTR_FIELD_REPLY_EP) == 0)
ucxx_error("UCP_AM_RECV_ATTR_FIELD_REPLY_EP not set");
ucp_ep_h ep = param->reply_ep;
bool is_rndv = param->recv_attr & UCP_AM_RECV_ATTR_FLAG_RNDV;
std::shared_ptr<Buffer> buf{nullptr};
auto allocatorType = *static_cast<const ucs_memory_type_t*>(header);
std::shared_ptr<RequestAm> req{nullptr};
{
std::lock_guard<std::mutex> lock(amData->_mutex);
auto reqs = recvWait.find(ep);
if (reqs != recvWait.end() && !reqs->second.empty()) {
req = reqs->second.front();
reqs->second.pop();
ucxx_trace_req("amRecv recvWait: %p", req.get());
} else {
req = std::shared_ptr<RequestAm>(
new RequestAm(worker, worker->isFutureEnabled(), nullptr, nullptr));
auto [queue, _] = recvPool.try_emplace(ep, std::queue<std::shared_ptr<RequestAm>>());
queue->second.push(req);
ucxx_trace_req("amRecv recvPool: %p", req.get());
}
}
if (is_rndv) {
if (amData->_allocators.find(allocatorType) == amData->_allocators.end()) {
// TODO: Is a hard failure better?
// ucxx_debug("Unsupported memory type %d", allocatorType);
// internal::RecvAmMessage recvAmMessage(amData, ep, req, nullptr);
// recvAmMessage.callback(nullptr, UCS_ERR_UNSUPPORTED);
// return UCS_ERR_UNSUPPORTED;
ucxx_trace_req("No allocator registered for memory type %d, falling back to host memory.",
allocatorType);
allocatorType = UCS_MEMORY_TYPE_HOST;
}
std::shared_ptr<Buffer> buf = amData->_allocators.at(allocatorType)(length);
auto recvAmMessage = std::make_shared<internal::RecvAmMessage>(amData, ep, req, buf);
ucp_request_param_t request_param = {.op_attr_mask = UCP_OP_ATTR_FIELD_CALLBACK |
UCP_OP_ATTR_FIELD_USER_DATA |
UCP_OP_ATTR_FLAG_NO_IMM_CMPL,
.cb = {.recv_am = _recvCompletedCallback},
.user_data = recvAmMessage.get()};
ucs_status_ptr_t status =
ucp_am_recv_data_nbx(worker->getHandle(), data, buf->data(), length, &request_param);
if (req->_enablePythonFuture)
ucxx_trace_req_f(ownerString.c_str(),
status,
"amRecv rndv",
"ep %p, buffer %p, size %lu, future %p, future handle %p, recvCallback",
ep,
buf->data(),
length,
req->_future.get(),
req->_future->getHandle());
else
ucxx_trace_req_f(ownerString.c_str(),
status,
"amRecv rndv",
"ep %p, buffer %p, size %lu, recvCallback",
ep,
buf->data(),
length);
if (req->isCompleted()) {
// The request completed/errored immediately
ucs_status_t s = UCS_PTR_STATUS(status);
recvAmMessage->callback(nullptr, s);
return s;
} else {
// The request will be handled by the callback
recvAmMessage->setUcpRequest(status);
amData->_registerInflightRequest(req);
{
std::lock_guard<std::mutex> lock(amData->_mutex);
amData->_recvAmMessageMap.emplace(req.get(), recvAmMessage);
}
return UCS_INPROGRESS;
}
} else {
std::shared_ptr<Buffer> buf = amData->_allocators.at(UCS_MEMORY_TYPE_HOST)(length);
if (length > 0) memcpy(buf->data(), data, length);
if (req->_enablePythonFuture)
ucxx_trace_req_f(ownerString.c_str(),
nullptr,
"amRecv eager",
"ep: %p, buffer %p, size %lu, future %p, future handle %p, recvCallback",
ep,
buf->data(),
length,
req->_future.get(),
req->_future->getHandle());
else
ucxx_trace_req_f(ownerString.c_str(),
nullptr,
"amRecv eager",
"ep: %p, buffer %p, size %lu, recvCallback",
ep,
buf->data(),
length);
internal::RecvAmMessage recvAmMessage(amData, ep, req, buf);
recvAmMessage.callback(nullptr, UCS_OK);
return UCS_OK;
}
}
std::shared_ptr<Buffer> RequestAm::getRecvBuffer() { return _buffer; }
void RequestAm::request()
{
static const ucp_tag_t tagMask = -1;
ucp_request_param_t param = {.op_attr_mask = UCP_OP_ATTR_FIELD_CALLBACK |
UCP_OP_ATTR_FIELD_FLAGS |
UCP_OP_ATTR_FIELD_USER_DATA,
.flags = UCP_AM_SEND_FLAG_REPLY,
.datatype = ucp_dt_make_contig(1),
.user_data = this};
_sendHeader = _delayedSubmission->_memoryType;
if (_delayedSubmission->_send) {
param.cb.send = _amSendCallback;
void* request = ucp_am_send_nbx(_endpoint->getHandle(),
0,
&_sendHeader,
sizeof(_sendHeader),
_delayedSubmission->_buffer,
_delayedSubmission->_length,
¶m);
std::lock_guard<std::recursive_mutex> lock(_mutex);
_request = request;
} else {
throw ucxx::UnsupportedError(
"Receiving active messages must be handled by the worker's callback");
}
}
void RequestAm::populateDelayedSubmission()
{
request();
if (_enablePythonFuture)
ucxx_trace_req_f(_ownerString.c_str(),
_request,
_operationName.c_str(),
"buffer %p, size %lu, future %p, future handle %p, populateDelayedSubmission",
_delayedSubmission->_buffer,
_delayedSubmission->_length,
_future.get(),
_future->getHandle());
else
ucxx_trace_req_f(_ownerString.c_str(),
_request,
_operationName.c_str(),
"buffer %p, size %lu, populateDelayedSubmission",
_delayedSubmission->_buffer,
_delayedSubmission->_length);
process();
}
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/listener.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <memory>
#include <netinet/in.h>
#include <string>
#include <ucp/api/ucp.h>
#include <ucxx/exception.h>
#include <ucxx/listener.h>
#include <ucxx/utils/callback_notifier.h>
#include <ucxx/utils/sockaddr.h>
#include <ucxx/utils/ucx.h>
namespace ucxx {
Listener::Listener(std::shared_ptr<Worker> worker,
uint16_t port,
ucp_listener_conn_callback_t callback,
void* callbackArgs)
{
if (worker == nullptr || worker->getHandle() == nullptr)
throw ucxx::Error("Worker not initialized");
ucp_listener_params_t params = {
.field_mask = UCP_LISTENER_PARAM_FIELD_SOCK_ADDR | UCP_LISTENER_PARAM_FIELD_CONN_HANDLER,
.conn_handler = {.cb = callback, .arg = callbackArgs}};
auto info = ucxx::utils::get_addrinfo(NULL, port);
params.sockaddr.addr = info->ai_addr;
params.sockaddr.addrlen = info->ai_addrlen;
utils::ucsErrorThrow(ucp_listener_create(worker->getHandle(), ¶ms, &_handle));
ucxx_trace("Listener created: %p, UCP handle: %p", this, _handle);
ucp_listener_attr_t attr = {.field_mask = UCP_LISTENER_ATTR_FIELD_SOCKADDR};
utils::ucsErrorThrow(ucp_listener_query(_handle, &attr));
char ipString[INET6_ADDRSTRLEN];
char portString[INET6_ADDRSTRLEN];
ucxx::utils::sockaddr_get_ip_port_str(&attr.sockaddr, ipString, portString, INET6_ADDRSTRLEN);
_ip = std::string(ipString);
_port = (uint16_t)atoi(portString);
setParent(worker);
}
Listener::~Listener()
{
auto worker = std::static_pointer_cast<Worker>(_parent);
if (worker->isProgressThreadRunning()) {
utils::CallbackNotifier callbackNotifierPre{};
worker->registerGenericPre([this, &callbackNotifierPre]() {
ucp_listener_destroy(_handle);
callbackNotifierPre.set();
});
callbackNotifierPre.wait(10000000000 /* 10s */);
utils::CallbackNotifier callbackNotifierPost{};
worker->registerGenericPost([&callbackNotifierPost]() { callbackNotifierPost.set(); });
callbackNotifierPost.wait(10000000000 /* 10s */);
} else {
ucp_listener_destroy(_handle);
worker->progress();
}
ucxx_trace("Listener destroyed: %p, UCP handle: %p", this, _handle);
}
std::shared_ptr<Listener> createListener(std::shared_ptr<Worker> worker,
uint16_t port,
ucp_listener_conn_callback_t callback,
void* callbackArgs)
{
return std::shared_ptr<Listener>(new Listener(worker, port, callback, callbackArgs));
}
std::shared_ptr<Endpoint> Listener::createEndpointFromConnRequest(ucp_conn_request_h connRequest,
bool endpointErrorHandling)
{
auto listener = std::dynamic_pointer_cast<Listener>(shared_from_this());
auto endpoint = ucxx::createEndpointFromConnRequest(listener, connRequest, endpointErrorHandling);
return endpoint;
}
ucp_listener_h Listener::getHandle() { return _handle; }
uint16_t Listener::getPort() { return _port; }
std::string Listener::getIp() { return _ip; }
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/request.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <chrono>
#include <memory>
#include <sstream>
#include <string>
#include <ucp/api/ucp.h>
#include <ucxx/component.h>
#include <ucxx/endpoint.h>
#include <ucxx/typedefs.h>
#include <ucxx/utils/ucx.h>
namespace ucxx {
Request::Request(std::shared_ptr<Component> endpointOrWorker,
std::shared_ptr<DelayedSubmission> delayedSubmission,
const std::string operationName,
const bool enablePythonFuture)
: _delayedSubmission(delayedSubmission),
_operationName(operationName),
_enablePythonFuture(enablePythonFuture)
{
_endpoint = std::dynamic_pointer_cast<Endpoint>(endpointOrWorker);
_worker =
_endpoint ? _endpoint->getWorker() : std::dynamic_pointer_cast<Worker>(endpointOrWorker);
if (_worker == nullptr || _worker->getHandle() == nullptr)
throw ucxx::Error("Worker not initialized");
if (_endpoint != nullptr && _endpoint->getHandle() == nullptr)
throw ucxx::Error("Endpoint not initialized");
_enablePythonFuture &= _worker->isFutureEnabled();
if (_enablePythonFuture) {
_future = _worker->getFuture();
ucxx_trace_req("req: %p, _future: %p", _request, _future.get());
}
std::stringstream ss;
if (_endpoint) {
setParent(_endpoint);
ss << "ep " << _endpoint->getHandle();
} else {
setParent(_worker);
ss << "worker " << _worker->getHandle();
}
_ownerString = ss.str();
ucxx_trace("Request created: %p, %s", this, _operationName.c_str());
}
Request::~Request() { ucxx_trace("Request destroyed: %p, %s", this, _operationName.c_str()); }
void Request::cancel()
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
if (_status == UCS_INPROGRESS) {
if (UCS_PTR_IS_ERR(_request)) {
ucs_status_t status = UCS_PTR_STATUS(_request);
ucxx_trace_req_f(_ownerString.c_str(),
_request,
_operationName.c_str(),
"unprocessed request during cancelation contains error: %d (%s)",
status,
ucs_status_string(status));
} else {
ucxx_trace_req_f(_ownerString.c_str(), _request, _operationName.c_str(), "canceling");
if (_request != nullptr) ucp_request_cancel(_worker->getHandle(), _request);
}
} else {
ucxx_trace_req_f(_ownerString.c_str(),
_request,
_operationName.c_str(),
"already completed with status: %d (%s)",
_status,
ucs_status_string(_status));
}
}
ucs_status_t Request::getStatus()
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
return _status;
}
void* Request::getFuture()
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
return _future ? _future->getHandle() : nullptr;
}
void Request::checkError()
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
utils::ucsErrorThrow(_status, _status == UCS_ERR_MESSAGE_TRUNCATED ? _status_msg : std::string());
}
bool Request::isCompleted()
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
return _status != UCS_INPROGRESS;
}
void Request::callback(void* request, ucs_status_t status)
{
/**
* Prevent reference count to self from going to zero and thus cause self to be destroyed
* while `callback()` executes.
*/
decltype(shared_from_this()) selfReference = nullptr;
try {
selfReference = shared_from_this();
} catch (std::bad_weak_ptr& exception) {
ucxx_debug("Request %p destroyed before callback() was executed", this);
return;
}
if (_status != UCS_INPROGRESS)
ucxx_trace("Request %p has status already set to %d (%s), callback setting %d (%s)",
this,
_status,
ucs_status_string(_status),
status,
ucs_status_string(status));
if (UCS_PTR_IS_PTR(_request)) ucp_request_free(request);
ucxx_trace("Request completed: %p, handle: %p", this, request);
setStatus(status);
ucxx_trace("Request %p, isCompleted: %d", this, isCompleted());
}
void Request::process()
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
ucs_status_t status = UCS_INPROGRESS;
if (UCS_PTR_IS_ERR(_request)) {
// Operation errored immediately
status = UCS_PTR_STATUS(_request);
} else if (UCS_PTR_IS_PTR(_request)) {
// Completion will be handled by callback
ucxx_trace_req_f(_ownerString.c_str(),
_request,
_operationName.c_str(),
"completion will be handled by callback");
ucxx_trace("Request submitted: %p, handle: %p", this, _request);
return;
} else {
// Operation completed immediately
status = UCS_OK;
}
ucxx_trace_req_f(_ownerString.c_str(),
_request,
_operationName.c_str(),
"status %d (%s)",
status,
ucs_status_string(status));
if (status != UCS_OK) {
ucxx_debug(
"error on %s with status %d (%s)", _operationName.c_str(), status, ucs_status_string(status));
} else {
ucxx_trace_req_f(
_ownerString.c_str(), _request, _operationName.c_str(), "completed immediately");
}
setStatus(status);
}
void Request::setStatus(ucs_status_t status)
{
{
std::lock_guard<std::recursive_mutex> lock(_mutex);
if (_endpoint != nullptr) _endpoint->removeInflightRequest(this);
_worker->removeInflightRequest(this);
ucxx_trace_req_f(_ownerString.c_str(),
_request,
_operationName.c_str(),
"callback called with status %d (%s)",
status,
ucs_status_string(status));
if (_status != UCS_INPROGRESS) ucxx_error("setStatus called but the status was already set");
_status = status;
if (_enablePythonFuture) {
auto future = std::static_pointer_cast<ucxx::Future>(_future);
future->notify(status);
}
ucxx_trace_req_f(_ownerString.c_str(),
_request,
_operationName.c_str(),
"callback %p",
_callback.target<void (*)(void)>());
if (_callback) _callback(status, _callbackData);
}
}
const std::string& Request::getOwnerString() const { return _ownerString; }
std::shared_ptr<Buffer> Request::getRecvBuffer() { return nullptr; }
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/worker_progress_thread.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <memory>
#include <ucxx/log.h>
#include <ucxx/utils/callback_notifier.h>
#include <ucxx/worker_progress_thread.h>
namespace ucxx {
void WorkerProgressThread::progressUntilSync(
std::function<bool(void)> progressFunction,
const bool& stop,
ProgressThreadStartCallback startCallback,
ProgressThreadStartCallbackArg startCallbackArg,
std::shared_ptr<DelayedSubmissionCollection> delayedSubmissionCollection)
{
if (startCallback) startCallback(startCallbackArg);
while (!stop) {
delayedSubmissionCollection->processPre();
progressFunction();
delayedSubmissionCollection->processPost();
}
}
WorkerProgressThread::WorkerProgressThread(
const bool pollingMode,
std::function<bool(void)> progressFunction,
std::function<void(void)> signalWorkerFunction,
ProgressThreadStartCallback startCallback,
ProgressThreadStartCallbackArg startCallbackArg,
std::shared_ptr<DelayedSubmissionCollection> delayedSubmissionCollection)
: _pollingMode(pollingMode),
_signalWorkerFunction(signalWorkerFunction),
_startCallback(startCallback),
_startCallbackArg(startCallbackArg),
_delayedSubmissionCollection(delayedSubmissionCollection)
{
_thread = std::thread(WorkerProgressThread::progressUntilSync,
progressFunction,
std::ref(_stop),
_startCallback,
_startCallbackArg,
_delayedSubmissionCollection);
}
WorkerProgressThread::~WorkerProgressThread()
{
if (!_thread.joinable()) {
ucxx_debug("Worker progress thread not running or already stopped");
return;
}
utils::CallbackNotifier callbackNotifierPre{};
_delayedSubmissionCollection->registerGenericPre(
[&callbackNotifierPre]() { callbackNotifierPre.set(); });
_signalWorkerFunction();
callbackNotifierPre.wait();
utils::CallbackNotifier callbackNotifierPost{};
_delayedSubmissionCollection->registerGenericPost([this, &callbackNotifierPost]() {
_stop = true;
callbackNotifierPost.set();
});
_signalWorkerFunction();
callbackNotifierPost.wait();
_thread.join();
}
bool WorkerProgressThread::pollingMode() const { return _pollingMode; }
std::thread::id WorkerProgressThread::getId() const { return _thread.get_id(); }
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/request_stream.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <memory>
#include <string>
#include <ucp/api/ucp.h>
#include <ucxx/delayed_submission.h>
#include <ucxx/request_stream.h>
namespace ucxx {
RequestStream::RequestStream(std::shared_ptr<Endpoint> endpoint,
bool send,
void* buffer,
size_t length,
const bool enablePythonFuture)
: Request(endpoint,
std::make_shared<DelayedSubmission>(send, buffer, length),
std::string(send ? "streamSend" : "streamRecv"),
enablePythonFuture),
_length(length)
{
}
std::shared_ptr<RequestStream> createRequestStream(std::shared_ptr<Endpoint> endpoint,
bool send,
void* buffer,
size_t length,
const bool enablePythonFuture = false)
{
auto req = std::shared_ptr<RequestStream>(
new RequestStream(endpoint, send, buffer, length, enablePythonFuture));
// A delayed notification request is not populated immediately, instead it is
// delayed to allow the worker progress thread to set its status, and more
// importantly the Python future later on, so that we don't need the GIL here.
req->_worker->registerDelayedSubmission(
req, std::bind(std::mem_fn(&Request::populateDelayedSubmission), req.get()));
return req;
}
void RequestStream::request()
{
ucp_request_param_t param = {.op_attr_mask = UCP_OP_ATTR_FIELD_CALLBACK |
UCP_OP_ATTR_FIELD_DATATYPE |
UCP_OP_ATTR_FIELD_USER_DATA,
.datatype = ucp_dt_make_contig(1),
.user_data = this};
void* request = nullptr;
if (_delayedSubmission->_send) {
param.cb.send = streamSendCallback;
request = ucp_stream_send_nbx(
_endpoint->getHandle(), _delayedSubmission->_buffer, _delayedSubmission->_length, ¶m);
} else {
param.op_attr_mask |= UCP_OP_ATTR_FIELD_FLAGS;
param.flags = UCP_STREAM_RECV_FLAG_WAITALL;
param.cb.recv_stream = streamRecvCallback;
request = ucp_stream_recv_nbx(_endpoint->getHandle(),
_delayedSubmission->_buffer,
_delayedSubmission->_length,
&_delayedSubmission->_length,
¶m);
}
std::lock_guard<std::recursive_mutex> lock(_mutex);
_request = request;
}
void RequestStream::populateDelayedSubmission()
{
if (_delayedSubmission->_send && _endpoint->getHandle() == nullptr) {
ucxx_warn("Endpoint was closed before message could be sent");
Request::callback(this, UCS_ERR_CANCELED);
return;
} else if (!_delayedSubmission->_send && _worker->getHandle() == nullptr) {
ucxx_warn("Worker was closed before message could be received");
Request::callback(this, UCS_ERR_CANCELED);
return;
}
request();
if (_enablePythonFuture)
ucxx_trace_req_f(_ownerString.c_str(),
_request,
_operationName.c_str(),
"buffer %p, size %lu, future %p, future handle %p, populateDelayedSubmission",
_delayedSubmission->_buffer,
_delayedSubmission->_length,
_future.get(),
_future->getHandle());
else
ucxx_trace_req_f(_ownerString.c_str(),
_request,
_operationName.c_str(),
"buffer %p, size %lu, populateDelayedSubmission",
_delayedSubmission->_buffer,
_delayedSubmission->_length);
process();
}
void RequestStream::callback(void* request, ucs_status_t status, size_t length)
{
status = length == _length ? status : UCS_ERR_MESSAGE_TRUNCATED;
if (status == UCS_ERR_MESSAGE_TRUNCATED) {
const char* fmt = "length mismatch: %llu (got) != %llu (expected)";
size_t len = std::snprintf(nullptr, 0, fmt, length, _length);
_status_msg = std::string(len + 1, '\0'); // +1 for null terminator
std::snprintf(_status_msg.data(), _status_msg.size(), fmt, length, _length);
}
Request::callback(request, status);
}
void RequestStream::streamSendCallback(void* request, ucs_status_t status, void* arg)
{
Request* req = reinterpret_cast<Request*>(arg);
ucxx_trace_req_f(req->getOwnerString().c_str(), request, "streamSend", "streamSendCallback");
return req->callback(request, status);
}
void RequestStream::streamRecvCallback(void* request, ucs_status_t status, size_t length, void* arg)
{
RequestStream* req = reinterpret_cast<RequestStream*>(arg);
ucxx_trace_req_f(req->getOwnerString().c_str(), request, "streamRecv", "streamRecvCallback");
return req->callback(request, status, length);
}
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/header.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <algorithm>
#include <iostream>
#include <iterator>
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include <ucxx/header.h>
namespace ucxx {
Header::Header(bool next, size_t nframes, int* isCUDA, size_t* size) : next{next}, nframes{nframes}
{
std::copy(isCUDA, isCUDA + nframes, this->isCUDA.begin());
std::copy(size, size + nframes, this->size.begin());
if (nframes < HeaderFramesSize) {
std::fill(this->isCUDA.begin() + nframes, this->isCUDA.begin() + HeaderFramesSize, false);
std::fill(this->size.begin() + nframes, this->size.begin() + HeaderFramesSize, 0);
}
}
Header::Header(std::string serializedHeader) { deserialize(serializedHeader); }
size_t Header::dataSize() { return sizeof(next) + sizeof(nframes) + sizeof(isCUDA) + sizeof(size); }
const std::string Header::serialize() const
{
std::stringstream ss;
ss.write((char const*)&next, sizeof(next));
ss.write((char const*)&nframes, sizeof(nframes));
for (size_t i = 0; i < HeaderFramesSize; ++i)
ss.write((char const*)&isCUDA[i], sizeof(isCUDA[i]));
for (size_t i = 0; i < HeaderFramesSize; ++i)
ss.write((char const*)&size[i], sizeof(size[i]));
return ss.str();
}
void Header::deserialize(const std::string& serializedHeader)
{
std::stringstream ss{serializedHeader};
ss.read(reinterpret_cast<char*>(&next), sizeof(next));
ss.read(reinterpret_cast<char*>(&nframes), sizeof(nframes));
for (size_t i = 0; i < HeaderFramesSize; ++i)
ss.read(reinterpret_cast<char*>(&isCUDA[i]), sizeof(isCUDA[i]));
for (size_t i = 0; i < HeaderFramesSize; ++i)
ss.read(reinterpret_cast<char*>(&size[i]), sizeof(size[i]));
}
std::vector<Header> Header::buildHeaders(const std::vector<size_t>& size,
const std::vector<int>& isCUDA)
{
const size_t totalFrames = size.size();
if (isCUDA.size() != totalFrames)
throw std::length_error("size and isCUDA must have the same length");
const size_t totalHeaders = (totalFrames + HeaderFramesSize - 1) / HeaderFramesSize;
std::vector<Header> headers;
for (size_t i = 0; i < totalHeaders; ++i) {
bool hasNext = totalFrames > (i + 1) * HeaderFramesSize;
size_t headerFrames =
hasNext ? HeaderFramesSize : HeaderFramesSize - (HeaderFramesSize * (i + 1) - totalFrames);
size_t idx = i * HeaderFramesSize;
headers.push_back(Header(hasNext,
headerFrames,
const_cast<int*>(reinterpret_cast<const int*>(&isCUDA[idx])),
const_cast<size_t*>(reinterpret_cast<const size_t*>(&size[idx]))));
}
return headers;
}
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/request_tag_multi.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <functional>
#include <memory>
#include <mutex>
#include <string>
#include <vector>
#include <ucxx/buffer.h>
#include <ucxx/endpoint.h>
#include <ucxx/header.h>
#include <ucxx/request_tag_multi.h>
#include <ucxx/utils/ucx.h>
#include <ucxx/worker.h>
namespace ucxx {
BufferRequest::BufferRequest() { ucxx_trace("BufferRequest created: %p", this); }
BufferRequest::~BufferRequest() { ucxx_trace("BufferRequest destroyed: %p", this); }
RequestTagMulti::RequestTagMulti(std::shared_ptr<Endpoint> endpoint,
const bool send,
const ucp_tag_t tag,
const bool enablePythonFuture)
: Request(endpoint,
std::make_shared<DelayedSubmission>(!send, nullptr, 0, 0),
std::string(send ? "tagMultiSend" : "tagMultiRecv"),
enablePythonFuture),
_send(send),
_tag(tag)
{
auto worker = endpoint->getWorker();
if (enablePythonFuture) _future = worker->getFuture();
}
RequestTagMulti::~RequestTagMulti()
{
for (auto& br : _bufferRequests) {
const auto& ptr = br->request.get();
if (ptr != nullptr)
ucxx_trace("RequestTagMulti destroying BufferRequest: %p", br->request.get());
/**
* FIXME: The `BufferRequest`s destructor should be doing this, but it seems a
* reference to the object is lingering and thus it never gets destroyed. This
* causes a chain effect that prevents `Worker` and `Context` from being destroyed
* as well. It seems the default destructor fails only for frames, headers seem
* to be destroyed as expected.
*/
br->request = nullptr;
}
}
std::shared_ptr<RequestTagMulti> createRequestTagMultiSend(std::shared_ptr<Endpoint> endpoint,
const std::vector<void*>& buffer,
const std::vector<size_t>& size,
const std::vector<int>& isCUDA,
const ucp_tag_t tag,
const bool enablePythonFuture)
{
auto ret =
std::shared_ptr<RequestTagMulti>(new RequestTagMulti(endpoint, true, tag, enablePythonFuture));
if (size.size() != buffer.size() || isCUDA.size() != buffer.size())
throw std::runtime_error("All input vectors should be of equal size");
ret->send(buffer, size, isCUDA);
return ret;
}
std::shared_ptr<RequestTagMulti> createRequestTagMultiRecv(std::shared_ptr<Endpoint> endpoint,
const ucp_tag_t tag,
const bool enablePythonFuture)
{
auto ret =
std::shared_ptr<RequestTagMulti>(new RequestTagMulti(endpoint, false, tag, enablePythonFuture));
ret->recvCallback(UCS_OK);
return ret;
}
void RequestTagMulti::recvFrames()
{
if (_send) throw std::runtime_error("Send requests cannot call recvFrames()");
std::vector<Header> headers;
ucxx_trace_req("RequestTagMulti::recvFrames request: %p, tag: %lx, _bufferRequests.size(): %lu",
this,
_tag,
_bufferRequests.size());
for (auto& br : _bufferRequests) {
ucxx_trace_req(
"RequestTagMulti::recvFrames request: %p, tag: %lx, *br->stringBuffer.size(): %lu",
this,
_tag,
br->stringBuffer->size());
headers.push_back(Header(*br->stringBuffer));
}
for (auto& h : headers) {
_totalFrames += h.nframes;
for (size_t i = 0; i < h.nframes; ++i) {
auto bufferRequest = std::make_shared<BufferRequest>();
_bufferRequests.push_back(bufferRequest);
const auto bufferType = h.isCUDA[i] ? ucxx::BufferType::RMM : ucxx::BufferType::Host;
auto buf = allocateBuffer(bufferType, h.size[i]);
bufferRequest->request = _endpoint->tagRecv(
buf->data(),
buf->getSize(),
_tag,
false,
[this](ucs_status_t status, RequestCallbackUserData arg) {
return this->markCompleted(status, arg);
},
bufferRequest);
bufferRequest->buffer = buf;
ucxx_trace_req("RequestTagMulti::recvFrames request: %p, tag: %lx, buffer: %p",
this,
_tag,
bufferRequest->buffer);
}
}
_isFilled = true;
ucxx_trace_req("RequestTagMulti::recvFrames request: %p, tag: %lx, size: %lu, isFilled: %d",
this,
_tag,
_bufferRequests.size(),
_isFilled);
};
void RequestTagMulti::markCompleted(ucs_status_t status, RequestCallbackUserData request)
{
/**
* Prevent reference count to self from going to zero and thus cause self to be destroyed
* while `markCompleted()` executes.
*/
decltype(shared_from_this()) selfReference = nullptr;
try {
selfReference = shared_from_this();
} catch (std::bad_weak_ptr& exception) {
ucxx_debug("RequestTagMulti %p destroyed before all markCompleted() callbacks were executed",
this);
return;
}
ucxx_trace_req("RequestTagMulti::markCompleted request: %p, tag: %lx", this, _tag);
std::lock_guard<std::mutex> lock(_completedRequestsMutex);
if (++_completedRequests == _totalFrames) {
auto s = UCS_OK;
// Get the first non-UCS_OK status and set that as complete status
for (const auto& br : _bufferRequests) {
if (br->request) {
s = br->request->getStatus();
if (s != UCS_OK) break;
}
}
// Check the status of the current (and final) message as it may have completed before
// `_bufferRequests->request` was populated.
if (s == UCS_OK) s = status;
setStatus(s);
}
ucxx_trace_req("RequestTagMulti::markCompleted request: %p, tag: %lx, completed: %lu/%lu",
this,
_tag,
_completedRequests,
_totalFrames);
}
void RequestTagMulti::recvHeader()
{
if (_send) throw std::runtime_error("Send requests cannot call recvHeader()");
ucxx_trace_req("RequestTagMulti::recvHeader entering, request: %p, tag: %lx", this, _tag);
auto bufferRequest = std::make_shared<BufferRequest>();
_bufferRequests.push_back(bufferRequest);
bufferRequest->stringBuffer = std::make_shared<std::string>(Header::dataSize(), 0);
bufferRequest->request =
_endpoint->tagRecv(&bufferRequest->stringBuffer->front(),
bufferRequest->stringBuffer->size(),
_tag,
false,
[this](ucs_status_t status, RequestCallbackUserData arg) {
return this->recvCallback(status);
});
if (bufferRequest->request->isCompleted()) {
// TODO: Errors may not be raisable within callback
bufferRequest->request->checkError();
}
ucxx_trace_req("RequestTagMulti::recvHeader exiting, request: %p, tag: %lx, empty: %d",
this,
_tag,
_bufferRequests.empty());
}
void RequestTagMulti::recvCallback(ucs_status_t status)
{
if (_send) throw std::runtime_error("Send requests cannot call recvCallback()");
ucxx_trace_req("RequestTagMulti::recvCallback request: %p, tag: %lx", this, _tag);
if (_bufferRequests.empty()) {
recvHeader();
} else {
if (status == UCS_OK) {
ucxx_trace_req(
"RequestTagMulti::recvCallback header received, multi request: %p, tag: %lx", this, _tag);
} else {
ucxx_trace_req(
"RequestTagMulti::recvCallback failed receiving header with status %d (%s), multi request: "
"%p, "
"tag: %lx",
status,
ucs_status_string(status),
this,
_tag);
_status = status;
if (_future) _future->notify(status);
return;
}
auto header = Header(*_bufferRequests.back()->stringBuffer);
if (header.next)
recvHeader();
else
recvFrames();
}
}
void RequestTagMulti::send(const std::vector<void*>& buffer,
const std::vector<size_t>& size,
const std::vector<int>& isCUDA)
{
_totalFrames = buffer.size();
if ((size.size() != _totalFrames) || (isCUDA.size() != _totalFrames))
throw std::length_error("buffer, size and isCUDA must have the same length");
auto headers = Header::buildHeaders(size, isCUDA);
for (const auto& header : headers) {
auto serializedHeader = std::make_shared<std::string>(header.serialize());
auto bufferRequest = std::make_shared<BufferRequest>();
_bufferRequests.push_back(bufferRequest);
bufferRequest->request =
_endpoint->tagSend(&serializedHeader->front(), serializedHeader->size(), _tag, false);
bufferRequest->stringBuffer = serializedHeader;
}
for (size_t i = 0; i < _totalFrames; ++i) {
auto bufferRequest = std::make_shared<BufferRequest>();
_bufferRequests.push_back(bufferRequest);
bufferRequest->request = _endpoint->tagSend(
buffer[i], size[i], _tag, false, [this](ucs_status_t status, RequestCallbackUserData arg) {
return this->markCompleted(status, arg);
});
}
_isFilled = true;
ucxx_trace_req(
"RequestTagMulti::send request: %p, tag: %lx, isFilled: %d", this, _tag, _isFilled);
}
void RequestTagMulti::populateDelayedSubmission() {}
void RequestTagMulti::cancel()
{
for (auto& br : _bufferRequests)
if (br->request) br->request->cancel();
}
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/log.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <algorithm>
#include <cstdlib>
#include <string>
#include <ucxx/log.h>
#include <ucxx/typedefs.h>
namespace ucxx {
ucs_log_component_config_t ucxx_log_component_config = {logLevelDefault, "UCXX"};
// Functions
void parseLogLevel()
{
std::string logLevelName{};
if (const char* env = std::getenv("UCXX_LOG_LEVEL")) {
logLevelName = std::string(env);
std::transform(
logLevelName.begin(), logLevelName.end(), logLevelName.begin(), [](unsigned char c) {
return std::toupper(c);
});
auto level = logLevelNames.find(logLevelName);
if (!logLevelName.empty() && level != logLevelNames.end())
ucxx_log_component_config.log_level = (ucs_log_level_t)level->second;
else
ucxx_warn("UCXX_LOG_LEVEL %s unknown, defaulting to UCXX_LOG_LEVEL=%s",
logLevelName.c_str(),
logLevelNameDefault);
ucxx_info("UCXX_LOG_LEVEL: %s", logLevelName.c_str());
}
}
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/config.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <sstream>
#include <string>
#include <ucxx/config.h>
#include <ucxx/exception.h>
#include <ucxx/utils/file_descriptor.h>
#include <ucxx/utils/ucx.h>
namespace ucxx {
ucp_config_t* Config::readUCXConfig(ConfigMap userOptions)
{
ucs_status_t status;
status = ucp_config_read(NULL, NULL, &_handle);
utils::ucsErrorThrow(status);
// Modify the UCX configuration options based on `userOptions`
for (const auto& kv : userOptions) {
status = ucp_config_modify(_handle, kv.first.c_str(), kv.second.c_str());
if (status != UCS_OK) {
ucp_config_release(_handle);
if (status == UCS_ERR_NO_ELEM)
utils::ucsErrorThrow(status,
std::string("Option ") + kv.first + std::string("doesn't exist"));
else
utils::ucsErrorThrow(status);
}
}
return _handle;
}
ConfigMap Config::ucxConfigToMap()
{
if (_configMap.empty()) {
FILE* textFileDescriptor = utils::createTextFileDescriptor();
ucp_config_print(_handle, textFileDescriptor, NULL, UCS_CONFIG_PRINT_CONFIG);
std::istringstream text{utils::decodeTextFileDescriptor(textFileDescriptor)};
std::string delim = "=";
std::string line;
while (std::getline(text, line)) {
size_t split = line.find(delim);
std::string k = line.substr(4, split - 4); // 4 to strip "UCX_" prefix
std::string v = line.substr(split + delim.length(), std::string::npos);
_configMap[k] = v;
}
}
return _configMap;
}
Config::Config(ConfigMap userOptions) { readUCXConfig(userOptions); }
Config::~Config()
{
if (this->_handle != nullptr) ucp_config_release(this->_handle);
}
ConfigMap Config::get() { return ucxConfigToMap(); }
ucp_config_t* Config::getHandle() { return _handle; }
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/context.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <cstring>
#include <iostream>
#include <memory>
#include <string>
#include <ucxx/context.h>
#include <ucxx/log.h>
#include <ucxx/utils/file_descriptor.h>
#include <ucxx/utils/ucx.h>
namespace ucxx {
Context::Context(const ConfigMap ucxConfig, const uint64_t featureFlags)
: _config{ucxConfig}, _featureFlags{featureFlags}
{
parseLogLevel();
// UCP
ucp_params_t params = {.field_mask = UCP_PARAM_FIELD_FEATURES, .features = featureFlags};
utils::ucsErrorThrow(ucp_init(¶ms, _config.getHandle(), &_handle));
ucxx_trace("Context created: %p, UCP handle: %p", this, _handle);
ucp_context_attr_t attr = {.field_mask = UCP_ATTR_FIELD_MEMORY_TYPES};
ucp_context_query(_handle, &attr);
_cudaSupport = (attr.memory_types & UCS_MEMORY_TYPE_CUDA) == UCS_MEMORY_TYPE_CUDA;
// UCX supports CUDA if TLS is "all", or one of {"cuda",
// "cuda_copy", "cuda_ipc"} is in the active transports.
// If the transport list is negated ("^" at start), then it is to be
// interpreted as all \ given
auto configMap = _config.get();
auto tls = configMap.find("TLS");
if (_cudaSupport) {
if (tls != configMap.end()) {
auto tls_value = tls->second;
if (!tls_value.empty() && tls_value[0] == '^') {
std::size_t current = 1; // Skip the ^
do {
// UCX_TLS lists disabled transports, if this contains either
// "cuda" or "cuda_copy", then there is no cuda support (just
// disabling "cuda_ipc" is fine)
auto next = tls_value.find_first_of(',', current);
auto field = tls_value.substr(current, next - current);
current = next + 1;
if (field == "cuda" || field == "cuda_copy") {
_cudaSupport = false;
break;
}
} while (current != std::string::npos + 1);
} else {
// UCX_TLS lists enabled transports, all, or anything with cuda
// enables cuda support
_cudaSupport = tls_value == "all" || tls_value.find("cuda") != std::string::npos;
}
}
}
ucxx_info("UCP initiated using config: ");
for (const auto& kv : configMap)
ucxx_info(" %s: %s", kv.first.c_str(), kv.second.c_str());
}
std::shared_ptr<Context> createContext(const ConfigMap ucxConfig, const uint64_t featureFlags)
{
return std::shared_ptr<Context>(new Context(ucxConfig, featureFlags));
}
Context::~Context()
{
if (_handle != nullptr) ucp_cleanup(_handle);
ucxx_trace("Context destroyed: %p, UCP handle: %p", this, _handle);
}
ConfigMap Context::getConfig() { return _config.get(); }
ucp_context_h Context::getHandle() { return _handle; }
std::string Context::getInfo()
{
FILE* TextFileDescriptor = utils::createTextFileDescriptor();
ucp_context_print_info(_handle, TextFileDescriptor);
return utils::decodeTextFileDescriptor(TextFileDescriptor);
}
uint64_t Context::getFeatureFlags() const { return _featureFlags; }
bool Context::hasCudaSupport() const { return _cudaSupport; }
std::shared_ptr<Worker> Context::createWorker(const bool enableDelayedSubmission,
const bool enableFuture)
{
auto context = std::dynamic_pointer_cast<Context>(shared_from_this());
auto worker = ucxx::createWorker(context, enableDelayedSubmission, enableFuture);
return worker;
}
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/worker.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <functional>
#include <ios>
#include <memory>
#include <mutex>
#include <queue>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include <sys/epoll.h>
#include <sys/eventfd.h>
#include <unistd.h>
#include <ucxx/buffer.h>
#include <ucxx/internal/request_am.h>
#include <ucxx/request_am.h>
#include <ucxx/request_tag.h>
#include <ucxx/utils/callback_notifier.h>
#include <ucxx/utils/file_descriptor.h>
#include <ucxx/utils/ucx.h>
#include <ucxx/worker.h>
namespace ucxx {
Worker::Worker(std::shared_ptr<Context> context,
const bool enableDelayedSubmission,
const bool enableFuture)
: _enableFuture(enableFuture)
{
if (context == nullptr || context->getHandle() == nullptr)
throw std::runtime_error("Context not initialized");
ucp_worker_params_t params = {.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE,
.thread_mode = UCS_THREAD_MODE_MULTI};
utils::ucsErrorThrow(ucp_worker_create(context->getHandle(), ¶ms, &_handle));
_delayedSubmissionCollection =
std::make_shared<DelayedSubmissionCollection>(enableDelayedSubmission);
if (context->getFeatureFlags() & UCP_FEATURE_AM) {
unsigned int AM_MSG_ID = 0;
_amData = std::make_shared<internal::AmData>();
_amData->_registerInflightRequest = [this](std::shared_ptr<Request> req) {
this->registerInflightRequest(req);
};
registerAmAllocator(UCS_MEMORY_TYPE_HOST,
[](size_t length) { return std::make_shared<HostBuffer>(length); });
ucp_am_handler_param_t am_handler_param = {.field_mask = UCP_AM_HANDLER_PARAM_FIELD_ID |
UCP_AM_HANDLER_PARAM_FIELD_CB |
UCP_AM_HANDLER_PARAM_FIELD_ARG,
.id = AM_MSG_ID,
.cb = RequestAm::recvCallback,
.arg = _amData.get()};
utils::ucsErrorThrow(ucp_worker_set_am_recv_handler(_handle, &am_handler_param));
}
ucxx_trace("Worker created: %p, UCP handle: %p, enableDelayedSubmission: %d, enableFuture: %d",
this,
_handle,
enableDelayedSubmission,
_enableFuture);
setParent(std::dynamic_pointer_cast<Component>(context));
}
static void _drainCallback(void* request,
ucs_status_t status,
const ucp_tag_recv_info_t* info,
void* arg)
{
*reinterpret_cast<ucs_status_t*>(request) = status;
}
void Worker::drainWorkerTagRecv()
{
auto context = std::dynamic_pointer_cast<Context>(_parent);
if (!(context->getFeatureFlags() & UCP_FEATURE_TAG)) return;
ucp_tag_message_h message;
ucp_tag_recv_info_t info;
while ((message = ucp_tag_probe_nb(_handle, 0, 0, 1, &info)) != NULL) {
ucxx_debug("Draining tag receive messages, worker: %p, tag: 0x%lx, length: %lu",
_handle,
info.sender_tag,
info.length);
std::vector<char> buf(info.length);
ucp_request_param_t param = {
.op_attr_mask = UCP_OP_ATTR_FIELD_CALLBACK | UCP_OP_ATTR_FIELD_DATATYPE,
.cb = {.recv = _drainCallback},
.datatype = ucp_dt_make_contig(1)};
ucs_status_ptr_t status =
ucp_tag_msg_recv_nbx(_handle, buf.data(), info.length, message, ¶m);
if (status != nullptr) {
while (UCS_PTR_STATUS(status) == UCS_INPROGRESS)
progress();
}
}
}
std::shared_ptr<RequestAm> Worker::getAmRecv(
ucp_ep_h ep, std::function<std::shared_ptr<RequestAm>()> createAmRecvRequestFunction)
{
std::lock_guard<std::mutex> lock(_amData->_mutex);
auto& recvPool = _amData->_recvPool;
auto& recvWait = _amData->_recvWait;
auto reqs = recvPool.find(ep);
if (reqs != recvPool.end() && !reqs->second.empty()) {
auto req = reqs->second.front();
reqs->second.pop();
return req;
} else {
auto req = createAmRecvRequestFunction();
auto [queue, _] = recvWait.try_emplace(ep, std::queue<std::shared_ptr<RequestAm>>());
queue->second.push(req);
return req;
}
}
std::shared_ptr<Worker> createWorker(std::shared_ptr<Context> context,
const bool enableDelayedSubmission,
const bool enableFuture)
{
auto worker = std::shared_ptr<Worker>(new Worker(context, enableDelayedSubmission, enableFuture));
// We can only get a `shared_ptr<Worker>` for the Active Messages callback after it's
// been created, thus this cannot be in the constructor.
if (worker->_amData != nullptr) {
worker->_amData->_worker = worker;
std::stringstream ownerStream;
ownerStream << "worker " << worker->getHandle();
worker->_amData->_ownerString = ownerStream.str();
}
return worker;
}
Worker::~Worker()
{
size_t canceled = cancelInflightRequests(3000000000 /* 3s */, 3);
ucxx_debug("Worker %p canceled %lu requests", _handle, canceled);
stopProgressThreadNoWarn();
if (_notifier) _notifier->stopRequestNotifierThread();
drainWorkerTagRecv();
ucp_worker_destroy(_handle);
ucxx_trace("Worker destroyed: %p, UCP handle: %p", this, _handle);
if (_epollFileDescriptor >= 0) close(_epollFileDescriptor);
}
ucp_worker_h Worker::getHandle() { return _handle; }
std::string Worker::getInfo()
{
FILE* TextFileDescriptor = utils::createTextFileDescriptor();
ucp_worker_print_info(this->_handle, TextFileDescriptor);
return utils::decodeTextFileDescriptor(TextFileDescriptor);
}
bool Worker::isDelayedRequestSubmissionEnabled() const
{
return _delayedSubmissionCollection->isDelayedRequestSubmissionEnabled();
}
bool Worker::isFutureEnabled() const { return _enableFuture; }
void Worker::initBlockingProgressMode()
{
// In blocking progress mode, we create an epoll file
// descriptor that we can wait on later.
// We also introduce an additional eventfd to allow
// canceling the wait.
int err;
// Return if blocking progress mode was already initialized
if (_epollFileDescriptor >= 0) return;
utils::ucsErrorThrow(ucp_worker_get_efd(_handle, &_workerFileDescriptor));
arm();
_epollFileDescriptor = epoll_create(1);
if (_epollFileDescriptor == -1) throw std::ios_base::failure("epoll_create(1) returned -1");
epoll_event workerEvent = {.events = EPOLLIN,
.data = {
.fd = _workerFileDescriptor,
}};
err = epoll_ctl(_epollFileDescriptor, EPOLL_CTL_ADD, _workerFileDescriptor, &workerEvent);
if (err != 0) throw std::ios_base::failure(std::string("epoll_ctl() returned " + err));
}
bool Worker::arm()
{
ucs_status_t status = ucp_worker_arm(_handle);
if (status == UCS_ERR_BUSY) return false;
utils::ucsErrorThrow(status);
return true;
}
bool Worker::progressWorkerEvent(const int epollTimeout)
{
int ret;
epoll_event ev;
if (progress()) return true;
if ((_epollFileDescriptor == -1) || !arm()) return false;
do {
ret = epoll_wait(_epollFileDescriptor, &ev, 1, epollTimeout);
} while ((ret == -1) && (errno == EINTR || errno == EAGAIN));
return false;
}
void Worker::signal() { utils::ucsErrorThrow(ucp_worker_signal(_handle)); }
bool Worker::waitProgress()
{
utils::ucsErrorThrow(ucp_worker_wait(_handle));
return progress();
}
bool Worker::progressOnce() { return ucp_worker_progress(_handle) != 0; }
bool Worker::progressPending()
{
bool ret = false, prog = false;
do {
prog = progressOnce();
ret |= prog;
} while (prog);
return ret;
}
bool Worker::progress()
{
bool ret = progressPending();
bool progressScheduledCancel = false;
{
std::lock_guard<std::mutex> lock(_inflightRequestsMutex);
// Before canceling requests scheduled for cancelation, attempt to let them complete.
progressScheduledCancel =
_inflightRequestsToCancel != nullptr && _inflightRequestsToCancel->size() > 0;
}
if (progressScheduledCancel) ret |= progressPending();
// Requests that were not completed now must be canceled.
if (cancelInflightRequests(3000000000 /* 3s */, 3) > 0) ret |= progressPending();
return ret;
}
void Worker::registerDelayedSubmission(std::shared_ptr<Request> request,
DelayedSubmissionCallbackType callback)
{
if (_delayedSubmissionCollection->isDelayedRequestSubmissionEnabled()) {
_delayedSubmissionCollection->registerRequest(request, callback);
/* Waking the progress event is needed here because the UCX request is
* not dispatched immediately. Thus we must signal the progress task so
* it will ensure the request is dispatched.
*/
signal();
} else {
callback();
}
}
void Worker::registerGenericPre(DelayedSubmissionCallbackType callback)
{
if (std::this_thread::get_id() == getProgressThreadId()) {
/**
* If the method is called from within the progress thread (e.g., from the
* listener callback), execute it immediately.
*/
callback();
} else {
_delayedSubmissionCollection->registerGenericPre(callback);
/* Waking the progress event is needed here because the UCX request is
* not dispatched immediately. Thus we must signal the progress task so
* it will ensure the request is dispatched.
*/
signal();
}
}
void Worker::registerGenericPost(DelayedSubmissionCallbackType callback)
{
if (std::this_thread::get_id() == getProgressThreadId()) {
/**
* If the method is called from within the progress thread (e.g., from the
* listener callback), execute it immediately.
*/
callback();
} else {
_delayedSubmissionCollection->registerGenericPost(callback);
/* Waking the progress event is needed here because the UCX request is
* not dispatched immediately. Thus we must signal the progress task so
* it will ensure the request is dispatched.
*/
signal();
}
}
#define THROW_FUTURE_NOT_IMPLEMENTED() \
do { \
throw std::runtime_error( \
"ucxx::Worker's future support not implemented, please ensure you use an " \
"implementation with future support and that enableFuture=true is set when creating " \
"the Worker to use this method."); \
} while (0)
void Worker::populateFuturesPool() { THROW_FUTURE_NOT_IMPLEMENTED(); }
std::shared_ptr<Future> Worker::getFuture() { THROW_FUTURE_NOT_IMPLEMENTED(); }
RequestNotifierWaitState Worker::waitRequestNotifier(uint64_t periodNs)
{
THROW_FUTURE_NOT_IMPLEMENTED();
}
void Worker::runRequestNotifier() { THROW_FUTURE_NOT_IMPLEMENTED(); }
void Worker::stopRequestNotifierThread() { THROW_FUTURE_NOT_IMPLEMENTED(); }
void Worker::setProgressThreadStartCallback(std::function<void(void*)> callback, void* callbackArg)
{
_progressThreadStartCallback = callback;
_progressThreadStartCallbackArg = callbackArg;
}
void Worker::startProgressThread(const bool pollingMode, const int epollTimeout)
{
if (_progressThread) {
ucxx_debug("Worker progress thread already running");
return;
}
std::function<bool()> progressFunction;
std::function<void()> signalWorkerFunction;
if (pollingMode) {
progressFunction = [this]() { return this->progress(); };
signalWorkerFunction = []() {};
} else {
initBlockingProgressMode();
progressFunction = [this, epollTimeout]() { return this->progressWorkerEvent(epollTimeout); };
signalWorkerFunction = [this]() { return this->signal(); };
}
_progressThread = std::make_shared<WorkerProgressThread>(pollingMode,
progressFunction,
signalWorkerFunction,
_progressThreadStartCallback,
_progressThreadStartCallbackArg,
_delayedSubmissionCollection);
/**
* Ensure the progress thread's ID is available allowing generic callbacks to run
* successfully even after `_progressThread == nullptr`, which may occur before
* `WorkerProgressThreads`'s destructor completes.
*/
_progressThreadId = _progressThread->getId();
}
void Worker::stopProgressThreadNoWarn() { _progressThread = nullptr; }
void Worker::stopProgressThread()
{
if (!_progressThread)
ucxx_debug("Worker progress thread not running or already stopped");
else
stopProgressThreadNoWarn();
}
bool Worker::isProgressThreadRunning() { return _progressThread != nullptr; }
std::thread::id Worker::getProgressThreadId() { return _progressThreadId; }
size_t Worker::cancelInflightRequests(uint64_t period, uint64_t maxAttempts)
{
size_t canceled = 0;
auto inflightRequestsToCancel = std::make_shared<InflightRequests>();
{
std::lock_guard<std::mutex> lock(_inflightRequestsMutex);
std::swap(_inflightRequestsToCancel, inflightRequestsToCancel);
}
if (std::this_thread::get_id() == getProgressThreadId()) {
canceled = inflightRequestsToCancel->cancelAll();
progressPending();
} else if (isProgressThreadRunning()) {
bool cancelSuccess = false;
for (uint64_t i = 0; i < maxAttempts && !cancelSuccess; ++i) {
utils::CallbackNotifier callbackNotifierPre{};
registerGenericPre([&callbackNotifierPre, &canceled, &inflightRequestsToCancel]() {
canceled = inflightRequestsToCancel->cancelAll();
callbackNotifierPre.set();
});
if (!callbackNotifierPre.wait(period)) continue;
utils::CallbackNotifier callbackNotifierPost{};
registerGenericPost([&callbackNotifierPost]() { callbackNotifierPost.set(); });
if (!callbackNotifierPost.wait(period)) continue;
cancelSuccess = true;
}
if (!cancelSuccess)
ucxx_error("All attempts to cancel inflight requests failed on worker: %p, UCP handle: %p",
this,
_handle);
} else {
canceled = inflightRequestsToCancel->cancelAll();
}
return canceled;
}
void Worker::scheduleRequestCancel(std::shared_ptr<InflightRequests> inflightRequests)
{
{
std::lock_guard<std::mutex> lock(_inflightRequestsMutex);
ucxx_debug("Scheduling cancelation of %lu requests", inflightRequests->size());
_inflightRequestsToCancel->merge(inflightRequests->release());
}
}
std::shared_ptr<Request> Worker::registerInflightRequest(std::shared_ptr<Request> request)
{
if (!request->isCompleted()) {
std::lock_guard<std::mutex> lock(_inflightRequestsMutex);
_inflightRequests->insert(request);
}
return request;
}
void Worker::removeInflightRequest(const Request* const request)
{
{
std::lock_guard<std::mutex> lock(_inflightRequestsMutex);
_inflightRequests->remove(request);
}
}
bool Worker::tagProbe(const ucp_tag_t tag)
{
if (!isProgressThreadRunning()) {
progress();
} else {
/**
* To ensure the worker was progressed at least once, we must make sure a callback runs
* pre-progressing, and another one runs post-progress. Running post-progress only may
* indicate the progress thread has immediately finished executing and post-progress
* ran without a further progress operation.
*/
utils::CallbackNotifier callbackNotifierPre{};
registerGenericPre([&callbackNotifierPre]() { callbackNotifierPre.set(); });
callbackNotifierPre.wait();
utils::CallbackNotifier callbackNotifierPost{};
registerGenericPost([&callbackNotifierPost]() { callbackNotifierPost.set(); });
callbackNotifierPost.wait();
}
ucp_tag_recv_info_t info;
ucp_tag_message_h tag_message = ucp_tag_probe_nb(_handle, tag, -1, 0, &info);
return tag_message != NULL;
}
std::shared_ptr<Request> Worker::tagRecv(void* buffer,
size_t length,
ucp_tag_t tag,
const bool enableFuture,
RequestCallbackUserFunction callbackFunction,
RequestCallbackUserData callbackData)
{
auto worker = std::dynamic_pointer_cast<Worker>(shared_from_this());
return registerInflightRequest(createRequestTag(
worker, false, buffer, length, tag, enableFuture, callbackFunction, callbackData));
}
std::shared_ptr<Address> Worker::getAddress()
{
auto worker = std::dynamic_pointer_cast<Worker>(shared_from_this());
auto address = ucxx::createAddressFromWorker(worker);
return address;
}
std::shared_ptr<Endpoint> Worker::createEndpointFromHostname(std::string ipAddress,
uint16_t port,
bool endpointErrorHandling)
{
auto worker = std::dynamic_pointer_cast<Worker>(shared_from_this());
auto endpoint = ucxx::createEndpointFromHostname(worker, ipAddress, port, endpointErrorHandling);
return endpoint;
}
std::shared_ptr<Endpoint> Worker::createEndpointFromWorkerAddress(std::shared_ptr<Address> address,
bool endpointErrorHandling)
{
auto worker = std::dynamic_pointer_cast<Worker>(shared_from_this());
auto endpoint = ucxx::createEndpointFromWorkerAddress(worker, address, endpointErrorHandling);
return endpoint;
}
std::shared_ptr<Listener> Worker::createListener(uint16_t port,
ucp_listener_conn_callback_t callback,
void* callbackArgs)
{
auto worker = std::dynamic_pointer_cast<Worker>(shared_from_this());
auto listener = ucxx::createListener(worker, port, callback, callbackArgs);
return listener;
}
void Worker::registerAmAllocator(ucs_memory_type_t memoryType, AmAllocatorType allocator)
{
if (_amData == nullptr)
throw std::runtime_error("Active Messages wasn not enabled during context creation");
_amData->_allocators.insert_or_assign(memoryType, allocator);
}
bool Worker::amProbe(const ucp_ep_h endpointHandle) const
{
return _amData->_recvPool.find(endpointHandle) != _amData->_recvPool.end();
}
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/src/delayed_submission.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <memory>
#include <mutex>
#include <utility>
#include <ucp/api/ucp.h>
#include <ucxx/delayed_submission.h>
#include <ucxx/log.h>
namespace ucxx {
DelayedSubmission::DelayedSubmission(const bool send,
void* buffer,
const size_t length,
const ucp_tag_t tag,
const ucs_memory_type_t memoryType)
: _send(send), _buffer(buffer), _length(length), _tag(tag), _memoryType(memoryType)
{
}
RequestDelayedSubmissionCollection::RequestDelayedSubmissionCollection(const std::string name,
const bool enabled)
: BaseDelayedSubmissionCollection<
std::pair<std::shared_ptr<Request>, DelayedSubmissionCallbackType>>{name, enabled}
{
}
void RequestDelayedSubmissionCollection::scheduleLog(
std::pair<std::shared_ptr<Request>, DelayedSubmissionCallbackType> item)
{
ucxx_trace_req("Registered %s: %p", _name.c_str(), item.first.get());
}
void RequestDelayedSubmissionCollection::processItem(
std::pair<std::shared_ptr<Request>, DelayedSubmissionCallbackType> item)
{
auto& req = item.first;
auto& callback = item.second;
ucxx_trace_req("Submitting %s callbacks: %p", _name.c_str(), req.get());
if (callback) callback();
}
GenericDelayedSubmissionCollection::GenericDelayedSubmissionCollection(const std::string name)
: BaseDelayedSubmissionCollection<DelayedSubmissionCallbackType>{name, true}
{
}
void GenericDelayedSubmissionCollection::scheduleLog(DelayedSubmissionCallbackType item)
{
ucxx_trace_req("Registered %s", _name.c_str());
}
void GenericDelayedSubmissionCollection::processItem(DelayedSubmissionCallbackType callback)
{
ucxx_trace_req("Submitting %s callback", _name.c_str());
if (callback) callback();
}
DelayedSubmissionCollection::DelayedSubmissionCollection(bool enableDelayedRequestSubmission)
: _enableDelayedRequestSubmission(enableDelayedRequestSubmission),
_requests(RequestDelayedSubmissionCollection{"request", enableDelayedRequestSubmission})
{
}
bool DelayedSubmissionCollection::isDelayedRequestSubmissionEnabled() const
{
return _enableDelayedRequestSubmission;
}
void DelayedSubmissionCollection::processPre()
{
_requests.process();
_genericPre.process();
}
void DelayedSubmissionCollection::processPost() { _genericPost.process(); }
void DelayedSubmissionCollection::registerRequest(std::shared_ptr<Request> request,
DelayedSubmissionCallbackType callback)
{
_requests.schedule({request, callback});
}
void DelayedSubmissionCollection::registerGenericPre(DelayedSubmissionCallbackType callback)
{
_genericPre.schedule(callback);
}
void DelayedSubmissionCollection::registerGenericPost(DelayedSubmissionCallbackType callback)
{
_genericPost.schedule(callback);
}
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/src | rapidsai_public_repos/ucxx/cpp/src/utils/sockaddr.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arpa/inet.h>
#include <memory>
#include <netdb.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <ucxx/exception.h>
#include <ucxx/utils/sockaddr.h>
namespace ucxx {
namespace utils {
std::unique_ptr<struct addrinfo, void (*)(struct addrinfo*)> get_addrinfo(const char* ip_address,
uint16_t port)
{
std::unique_ptr<struct addrinfo, void (*)(struct addrinfo*)> info(nullptr, ::freeaddrinfo);
{
char ports[6];
struct addrinfo* result = nullptr;
struct addrinfo hints;
// Don't restrict lookups
::memset(&hints, 0, sizeof(hints));
// Except, port is numeric, address may be NULL meaning the
// returned address is the wildcard.
hints.ai_flags = AI_NUMERICSERV | AI_PASSIVE;
if (::snprintf(ports, sizeof(ports), "%u", port) > sizeof(ports))
throw ucxx::Error(std::string("Invalid port"));
if (::getaddrinfo(ip_address, ports, &hints, &result))
throw ucxx::Error(std::string("Invalid IP address or hostname"));
info.reset(result);
}
return info;
}
void sockaddr_get_ip_port_str(const struct sockaddr_storage* sockaddr,
char* ip_str,
char* port_str,
size_t max_str_size)
{
const struct sockaddr_in* addr_in = nullptr;
const struct sockaddr_in6* addr_in6 = nullptr;
switch (sockaddr->ss_family) {
case AF_INET:
addr_in = reinterpret_cast<decltype(addr_in)>(sockaddr);
inet_ntop(AF_INET, &addr_in->sin_addr, ip_str, max_str_size);
snprintf(port_str, max_str_size, "%u", ntohs(addr_in->sin_port));
case AF_INET6:
addr_in6 = reinterpret_cast<decltype(addr_in6)>(sockaddr);
inet_ntop(AF_INET6, &addr_in6->sin6_addr, ip_str, max_str_size);
snprintf(port_str, max_str_size, "%u", ntohs(addr_in6->sin6_port));
default:
ip_str = const_cast<char*>(reinterpret_cast<const char*>("Invalid address family"));
port_str = const_cast<char*>(reinterpret_cast<const char*>("Invalid address family"));
}
}
} // namespace utils
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/src | rapidsai_public_repos/ucxx/cpp/src/utils/ucx.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <string>
#include <ucs/type/status.h>
#include <ucxx/exception.h>
#include <ucxx/utils/ucx.h>
namespace ucxx {
namespace utils {
void ucsErrorThrow(const ucs_status_t status, const std::string& userMessage)
{
std::string message = userMessage.empty() ? ucs_status_string(status) : userMessage;
switch (status) {
case UCS_OK: return;
case UCS_INPROGRESS: return;
case UCS_ERR_NO_MESSAGE: throw ucxx::NoMessageError(message); return;
case UCS_ERR_NO_RESOURCE: throw ucxx::NoResourceError(message); return;
case UCS_ERR_IO_ERROR: throw ucxx::IOError(message); return;
case UCS_ERR_NO_MEMORY: throw ucxx::NoMemoryError(message); return;
case UCS_ERR_INVALID_PARAM: throw ucxx::InvalidParamError(message); return;
case UCS_ERR_UNREACHABLE: throw ucxx::UnreachableError(message); return;
case UCS_ERR_INVALID_ADDR: throw ucxx::InvalidAddrError(message); return;
case UCS_ERR_NOT_IMPLEMENTED: throw ucxx::NotImplementedError(message); return;
case UCS_ERR_MESSAGE_TRUNCATED: throw ucxx::MessageTruncatedError(message); return;
case UCS_ERR_NO_PROGRESS: throw ucxx::NoProgressError(message); return;
case UCS_ERR_BUFFER_TOO_SMALL: throw ucxx::BufferTooSmallError(message); return;
case UCS_ERR_NO_ELEM: throw ucxx::NoElemError(message); return;
case UCS_ERR_SOME_CONNECTS_FAILED: throw ucxx::SomeConnectsFailedError(message); return;
case UCS_ERR_NO_DEVICE: throw ucxx::NoDeviceError(message); return;
case UCS_ERR_BUSY: throw ucxx::BusyError(message); return;
case UCS_ERR_CANCELED: throw ucxx::CanceledError(message); return;
case UCS_ERR_SHMEM_SEGMENT: throw ucxx::ShmemSegmentError(message); return;
case UCS_ERR_ALREADY_EXISTS: throw ucxx::AlreadyExistsError(message); return;
case UCS_ERR_OUT_OF_RANGE: throw ucxx::OutOfRangeError(message); return;
case UCS_ERR_TIMED_OUT: throw ucxx::TimedOutError(message); return;
case UCS_ERR_EXCEEDS_LIMIT: throw ucxx::ExceedsLimitError(message); return;
case UCS_ERR_UNSUPPORTED: throw ucxx::UnsupportedError(message); return;
case UCS_ERR_REJECTED: throw ucxx::RejectedError(message); return;
case UCS_ERR_NOT_CONNECTED: throw ucxx::NotConnectedError(message); return;
case UCS_ERR_CONNECTION_RESET: throw ucxx::ConnectionResetError(message); return;
case UCS_ERR_FIRST_LINK_FAILURE: throw ucxx::FirstLinkFailureError(message); return;
case UCS_ERR_LAST_LINK_FAILURE: throw ucxx::LastLinkFailureError(message); return;
case UCS_ERR_FIRST_ENDPOINT_FAILURE: throw ucxx::FirstEndpointFailureError(message); return;
case UCS_ERR_ENDPOINT_TIMEOUT: throw ucxx::EndpointTimeoutError(message); return;
case UCS_ERR_LAST_ENDPOINT_FAILURE: throw ucxx::LastEndpointFailureError(message); return;
default: throw ucxx::Error(message); return;
}
}
} // namespace utils
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/src | rapidsai_public_repos/ucxx/cpp/src/utils/file_descriptor.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <cstdio>
#include <exception>
#include <ios>
#include <string>
#include <ucxx/utils/file_descriptor.h>
namespace ucxx {
namespace utils {
FILE* createTextFileDescriptor()
{
FILE* textFileDescriptor = std::tmpfile();
if (textFileDescriptor == nullptr) throw std::ios_base::failure("tmpfile() failed");
return textFileDescriptor;
}
std::string decodeTextFileDescriptor(FILE* textFileDescriptor)
{
size_t size;
rewind(textFileDescriptor);
fseek(textFileDescriptor, 0, SEEK_END);
size = ftell(textFileDescriptor);
rewind(textFileDescriptor);
std::string textString(size, '\0');
if (fread(&textString[0], sizeof(char), size, textFileDescriptor) != size)
throw std::ios_base::failure("fread() failed");
fclose(textFileDescriptor);
return textString;
}
} // namespace utils
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/src | rapidsai_public_repos/ucxx/cpp/src/utils/callback_notifier.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <atomic>
#include <features.h>
#ifdef __GLIBC__
#include <gnu/libc-version.h>
#include <string>
#endif
#include <ucxx/log.h>
#include <ucxx/utils/callback_notifier.h>
namespace ucxx {
namespace utils {
#ifdef __GLIBC__
static const bool _useSpinlock = []() {
auto const libcVersion = std::string{gnu_get_libc_version()};
auto const dot = libcVersion.find(".");
if (dot == std::string::npos) {
return false;
} else {
// See https://sourceware.org/bugzilla/show_bug.cgi?id=13165
auto const glibcMajor = std::stoi(libcVersion.substr(0, dot).data());
auto const glibcMinor = std::stoi(libcVersion.substr(dot + 1).data());
auto const use = glibcMajor < 2 || (glibcMajor == 2 && glibcMinor < 25);
ucxx_debug("glibc version %s detected, spinlock use is %d", libcVersion.c_str(), use);
return use;
}
}();
#else
static constexpr bool _useSpinlock = false;
#endif
void CallbackNotifier::set()
{
if (_useSpinlock) {
_flag.store(true, std::memory_order_release);
} else {
{
std::lock_guard lock(_mutex);
// This can be relaxed because the mutex is providing
// ordering.
_flag.store(true, std::memory_order_relaxed);
}
_conditionVariable.notify_all();
}
}
bool CallbackNotifier::wait(uint64_t period)
{
if (_useSpinlock) {
while (!_flag.load(std::memory_order_acquire)) {}
} else {
std::unique_lock lock(_mutex);
// Likewise here, the mutex provides ordering.
if (period > 0) {
return _conditionVariable.wait_for(
lock, std::chrono::duration<uint64_t, std::nano>(period), [this]() {
return _flag.load(std::memory_order_relaxed);
});
} else {
_conditionVariable.wait(lock, [this]() { return _flag.load(std::memory_order_relaxed); });
}
}
return true;
}
} // namespace utils
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/src | rapidsai_public_repos/ucxx/cpp/src/utils/python.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <dlfcn.h>
#include <ucxx/log.h>
#include <ucxx/utils/python.h>
namespace ucxx {
namespace utils {
static bool _ucxxPythonLoadChecked = false;
static void* _ucxxPythonLib = nullptr;
bool isPythonAvailable()
{
if (!_ucxxPythonLoadChecked) {
_ucxxPythonLoadChecked = true;
_ucxxPythonLib = dlopen("libucxx_python.so", RTLD_LAZY);
if (_ucxxPythonLib == nullptr)
ucxx_debug("dlopen('libucxx_python.so') failed");
else
ucxx_debug("dlopen('libucxx_python.so') loaded at %p", _ucxxPythonLib);
}
return _ucxxPythonLib != nullptr;
}
} // namespace utils
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp/src | rapidsai_public_repos/ucxx/cpp/src/internal/request_am.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <ucxx/buffer.h>
#include <ucxx/delayed_submission.h>
#include <ucxx/internal/request_am.h>
#include <ucxx/request_am.h>
namespace ucxx {
namespace internal {
RecvAmMessage::RecvAmMessage(internal::AmData* amData,
ucp_ep_h ep,
std::shared_ptr<RequestAm> request,
std::shared_ptr<Buffer> buffer)
: _amData(amData), _ep(ep), _request(request), _buffer(buffer)
{
_request->_delayedSubmission =
std::make_shared<DelayedSubmission>(false, _buffer->data(), _buffer->getSize());
}
void RecvAmMessage::setUcpRequest(void* request) { _request->_request = request; }
void RecvAmMessage::callback(void* request, ucs_status_t status)
{
_request->_buffer = _buffer;
_request->callback(request, status);
{
std::lock_guard<std::mutex> lock(_amData->_mutex);
_amData->_recvAmMessageMap.erase(_request.get());
}
}
} // namespace internal
} // namespace ucxx
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/examples/CMakeLists.txt | # =================================================================================
# SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD 3-Clause License
# =================================================================================
find_package(Threads REQUIRED)
# ##################################################################################################
# * compiler function -----------------------------------------------------------------------------
add_custom_command(
OUTPUT UCXX_EXAMPLES
COMMAND echo Running examples
COMMAND mkdir -p results
VERBATIM
COMMENT "Running ucxx examples."
USES_TERMINAL
)
# This function takes in an example name and example source and handles setting all of the
# associated properties and linking to build the example
function(ConfigureBench CMAKE_BENCH_NAME)
add_executable(${CMAKE_BENCH_NAME} ${ARGN})
set_target_properties(
${CMAKE_BENCH_NAME}
PROPERTIES RUNTIME_OUTPUT_DIRECTORY "$<BUILD_INTERFACE:${UCXX_BINARY_DIR}/examples>"
INSTALL_RPATH "\$ORIGIN/../../../lib"
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
)
target_link_libraries(
${CMAKE_BENCH_NAME} PRIVATE ucxx
$<TARGET_NAME_IF_EXISTS:conda_env>
)
add_custom_command(
OUTPUT UCXX_EXAMPLES
COMMAND ${CMAKE_BENCH_NAME}
APPEND
COMMENT "Adding ${CMAKE_BENCH_NAME}"
)
install(
TARGETS ${CMAKE_BENCH_NAME}
COMPONENT examples
DESTINATION bin/examples/libucxx
EXCLUDE_FROM_ALL
)
endfunction()
# ##################################################################################################
# * basic example ---------------------------------------------------------------------------------
ConfigureBench(ucxx_example_basic basic.cpp)
add_custom_target(
run_examples
DEPENDS UCXX_EXAMPLES
COMMENT "Custom command for running ucxx examples."
)
| 0 |
rapidsai_public_repos/ucxx/cpp | rapidsai_public_repos/ucxx/cpp/examples/basic.cpp | /**
* SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <cassert>
#include <chrono>
#include <iostream>
#include <memory>
#include <numeric>
#include <thread>
#include <unistd.h>
#include <vector>
#include <ucxx/api.h>
#include <ucxx/utils/sockaddr.h>
#include <ucxx/utils/ucx.h>
enum class ProgressMode {
Polling,
Blocking,
Wait,
ThreadPolling,
ThreadBlocking,
} progress_mode = ProgressMode::Polling;
static uint16_t listener_port = 12345;
class ListenerContext {
private:
std::shared_ptr<ucxx::Worker> _worker{nullptr};
std::shared_ptr<ucxx::Endpoint> _endpoint{nullptr};
std::shared_ptr<ucxx::Listener> _listener{nullptr};
public:
explicit ListenerContext(std::shared_ptr<ucxx::Worker> worker) : _worker{worker} {}
~ListenerContext() { releaseEndpoint(); }
void setListener(std::shared_ptr<ucxx::Listener> listener) { _listener = listener; }
std::shared_ptr<ucxx::Listener> getListener() { return _listener; }
std::shared_ptr<ucxx::Endpoint> getEndpoint() { return _endpoint; }
bool isAvailable() const { return _endpoint == nullptr; }
void createEndpointFromConnRequest(ucp_conn_request_h conn_request)
{
if (!isAvailable()) throw std::runtime_error("Listener context already has an endpoint");
static bool endpoint_error_handling = true;
_endpoint = _listener->createEndpointFromConnRequest(conn_request, endpoint_error_handling);
}
void releaseEndpoint() { _endpoint.reset(); }
};
static void listener_cb(ucp_conn_request_h conn_request, void* arg)
{
char ip_str[INET6_ADDRSTRLEN];
char port_str[INET6_ADDRSTRLEN];
ucp_conn_request_attr_t attr{};
ListenerContext* listener_ctx = reinterpret_cast<ListenerContext*>(arg);
attr.field_mask = UCP_CONN_REQUEST_ATTR_FIELD_CLIENT_ADDR;
ucxx::utils::ucsErrorThrow(ucp_conn_request_query(conn_request, &attr));
ucxx::utils::sockaddr_get_ip_port_str(&attr.client_address, ip_str, port_str, INET6_ADDRSTRLEN);
std::cout << "Server received a connection request from client at address " << ip_str << ":"
<< port_str << std::endl;
if (listener_ctx->isAvailable()) {
listener_ctx->createEndpointFromConnRequest(conn_request);
} else {
// The server is already handling a connection request from a client,
// reject this new one
std::cout << "Rejecting a connection request from " << ip_str << ":" << port_str << "."
<< std::endl
<< "Only one client at a time is supported." << std::endl;
ucxx::utils::ucsErrorThrow(
ucp_listener_reject(listener_ctx->getListener()->getHandle(), conn_request));
}
}
static void printUsage()
{
std::cerr << "Usage: basic [parameters]" << std::endl;
std::cerr << " basic client/server example" << std::endl;
std::cerr << std::endl;
std::cerr << "Parameters are:" << std::endl;
std::cerr << " -m progress mode to use, valid values are: 'polling', 'blocking',"
<< std::endl;
std::cerr << " 'thread-polling', 'thread-blocking' and 'wait' (default: 'blocking')"
<< std::endl;
std::cerr << " -p <port> Port number to listen at" << std::endl;
std::cerr << " -h Print this help" << std::endl;
std::cerr << std::endl;
}
ucs_status_t parseCommand(int argc, char* const argv[])
{
int c;
while ((c = getopt(argc, argv, "m:p:h")) != -1) {
switch (c) {
case 'm':
if (strcmp(optarg, "blocking") == 0) {
progress_mode = ProgressMode::Blocking;
break;
} else if (strcmp(optarg, "polling") == 0) {
progress_mode = ProgressMode::Polling;
break;
} else if (strcmp(optarg, "thread-blocking") == 0) {
progress_mode = ProgressMode::ThreadBlocking;
break;
} else if (strcmp(optarg, "thread-polling") == 0) {
progress_mode = ProgressMode::ThreadPolling;
break;
} else if (strcmp(optarg, "wait") == 0) {
progress_mode = ProgressMode::Wait;
break;
} else {
std::cerr << "Invalid progress mode: " << optarg << std::endl;
return UCS_ERR_INVALID_PARAM;
}
case 'p':
listener_port = atoi(optarg);
if (listener_port <= 0) {
std::cerr << "Wrong listener port: " << listener_port << std::endl;
return UCS_ERR_UNSUPPORTED;
}
break;
case 'h':
default: printUsage(); return UCS_ERR_UNSUPPORTED;
}
}
return UCS_OK;
}
std::function<void()> getProgressFunction(std::shared_ptr<ucxx::Worker> worker,
ProgressMode progressMode)
{
switch (progressMode) {
case ProgressMode::Polling: return std::bind(std::mem_fn(&ucxx::Worker::progress), worker);
case ProgressMode::Blocking:
return std::bind(std::mem_fn(&ucxx::Worker::progressWorkerEvent), worker, -1);
case ProgressMode::Wait: return std::bind(std::mem_fn(&ucxx::Worker::waitProgress), worker);
default: return []() {};
}
}
void waitRequests(ProgressMode progressMode,
std::shared_ptr<ucxx::Worker> worker,
const std::vector<std::shared_ptr<ucxx::Request>>& requests)
{
auto progress = getProgressFunction(worker, progressMode);
// Wait until all messages are completed
for (auto& r : requests) {
while (!r->isCompleted())
progress();
r->checkError();
}
}
int main(int argc, char** argv)
{
if (parseCommand(argc, argv) != UCS_OK) return -1;
// Setup: create UCP context, worker, listener and client endpoint.
auto context = ucxx::createContext({}, ucxx::Context::defaultFeatureFlags);
auto worker = context->createWorker();
auto listener_ctx = std::make_unique<ListenerContext>(worker);
auto listener = worker->createListener(listener_port, listener_cb, listener_ctx.get());
listener_ctx->setListener(listener);
auto endpoint = worker->createEndpointFromHostname("127.0.0.1", listener_port, true);
// Initialize worker progress
if (progress_mode == ProgressMode::Blocking)
worker->initBlockingProgressMode();
else if (progress_mode == ProgressMode::ThreadBlocking)
worker->startProgressThread(false);
else if (progress_mode == ProgressMode::ThreadPolling)
worker->startProgressThread(true);
auto progress = getProgressFunction(worker, progress_mode);
// Block until client connects
while (listener_ctx->isAvailable())
progress();
std::vector<std::shared_ptr<ucxx::Request>> requests;
// Allocate send buffers
std::vector<int> sendWireupBuffer{1, 2, 3};
std::vector<std::vector<int>> sendBuffers{
std::vector<int>(5), std::vector<int>(500), std::vector<int>(50000)};
// Allocate receive buffers
std::vector<int> recvWireupBuffer(sendWireupBuffer.size(), 0);
std::vector<std::vector<int>> recvBuffers;
for (const auto& v : sendBuffers)
recvBuffers.push_back(std::vector<int>(v.size(), 0));
// Schedule small wireup messages to let UCX identify capabilities between endpoints
requests.push_back(listener_ctx->getEndpoint()->tagSend(
sendWireupBuffer.data(), sendWireupBuffer.size() * sizeof(int), 0));
requests.push_back(
endpoint->tagRecv(recvWireupBuffer.data(), sendWireupBuffer.size() * sizeof(int), 0));
::waitRequests(progress_mode, worker, requests);
requests.clear();
// Schedule send and recv messages on different tags and different ordering
requests.push_back(listener_ctx->getEndpoint()->tagSend(
sendBuffers[0].data(), sendBuffers[0].size() * sizeof(int), 0));
requests.push_back(listener_ctx->getEndpoint()->tagRecv(
recvBuffers[1].data(), recvBuffers[1].size() * sizeof(int), 1));
requests.push_back(listener_ctx->getEndpoint()->tagSend(
sendBuffers[2].data(), sendBuffers[2].size() * sizeof(int), 2));
requests.push_back(
endpoint->tagRecv(recvBuffers[2].data(), recvBuffers[2].size() * sizeof(int), 2));
requests.push_back(
endpoint->tagSend(sendBuffers[1].data(), sendBuffers[1].size() * sizeof(int), 1));
requests.push_back(
endpoint->tagRecv(recvBuffers[0].data(), recvBuffers[0].size() * sizeof(int), 0));
// Wait for requests to be set, i.e., transfers complete
::waitRequests(progress_mode, worker, requests);
// Verify results
for (size_t i = 0; i < sendWireupBuffer.size(); ++i)
assert(recvWireupBuffer[i] == sendWireupBuffer[i]);
for (size_t i = 0; i < sendBuffers.size(); ++i)
for (size_t j = 0; j < sendBuffers[i].size(); ++j)
assert(recvBuffers[i][j] == sendBuffers[i][j]);
// Stop progress thread
if (progress_mode == ProgressMode::ThreadBlocking || progress_mode == ProgressMode::ThreadPolling)
worker->stopProgressThread();
return 0;
}
| 0 |
rapidsai_public_repos/ucxx | rapidsai_public_repos/ucxx/docs/Makefile | # Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SOURCEDIR = source
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
| 0 |
rapidsai_public_repos/ucxx/docs | rapidsai_public_repos/ucxx/docs/source/optimizations.rst | Optimizations
=============
This is a list of built-in optimizations in UCXX. Most of them target Python usage, but may be useful in C++ applications depending on their design and needs.
Delayed Submission
------------------
Move transfer submissions (``ucp_{tag,stream}_{send,recv}_nb)``) to the worker progress task.
Most times UCX applications will submit transfer requests immediately from the application thread, returning a ``ucs_status_ptr_t`` which is then checked for immediate completion or not, in the latter case a callback will execute once that completes during some ``ucp_worker_progress()`` call. When writing Python bindings for C/C++ code, it is often good practice to release the GIL instead of holding it while the C/C++ code runs, and thus allowing other threads to acquire the GIL while the low-level piece of code executes. For UCXX that would mean the GIL would be released and then reacquired by the C++ code almost instantaneously, or not released at all. To prevent that, UCXX allows submitting a transfer request intent, while delaying the ``ucp_{tag,stream}_{send,recv}_nb`` request for a future time, for example, during the worker progress task.
The UCX requests also require the UCX spinlock to be acquired. If there is a worker progress task running, this would effectively mean the application thread and the worker progress thread competing for the UCX spinlock simultaneously. Now, if one of the threads has hold of the UCX spinlock and tries to achieve also the GIL while the other thread has the GIL but attempting to acquire the UCX spinlock, that would lead to a deadlock. The solution for this problem in UCXX is to prevent the application thread from ever (or almost ever) acquiring the UCX spinlock, while preventing the worker progress thread from acquiring the GIL. This last problem is solved by using a `Notifier Thread`_.
Flowchart
~~~~~~~~~
To help understanding delayed submission execution, we have two flowcharts to illustrate the process. First we see how a transfer request is processed:
.. mermaid::
flowchart TD
subgraph Process Transfer Request
Enter([Enter])
SubmitRequest[Submit Request]
DelayedSubmissionEnabled{Delayed Submission Enabled?}
DispatchRequest[Dispatch Request]
RegisterDelayedSubmission[Register Delayed Submission]
Exit([Exit])
Enter-->SubmitRequest
SubmitRequest-->DelayedSubmissionEnabled
DelayedSubmissionEnabled-->|No| DispatchRequest
DelayedSubmissionEnabled-->|Yes| RegisterDelayedSubmission
RegisterDelayedSubmission-->Exit
DispatchRequest-->Exit
end
Second, we look at the flowchart for the UCX progress thread:
.. mermaid::
flowchart TD
subgraph UCX Progress Thread
Enter([Enter])
StopSignalRaised{Stop Signal Raised?}
DelayedSubmissionEnabled{Delayed Submission Enabled?}
DelayedSubmissionAvailable{Delayed Submission Available?}
DispatchRequest[Dispatch Request]
UCPWorkerProgress[ucp_worker_progress]
ExecuteCallbacks[Execute Callbacks]
Exit([Exit])
Enter-->StopSignalRaised
StopSignalRaised-->|Yes| Exit
StopSignalRaised-->|No| DelayedSubmissionnEnabled
DelayedSubmissionEnabled-->|Yes| DelayedSubmissionnAvailable
DelayedSubmissionAvailable-->|Yes| DispatchRequest
DelayedSubmissionAvailable-->|No| UCPWorkerProgress
DelayedSubmissionnEnabled-->|No| UCPWorkerProgress
DispatchRequest-->UCPWorkerProgress
UCPWorkerProgress-->ExecuteCallbacks
ExecuteCallbacks-->StopSignalRaised
end
Sequence Diagram
~~~~~~~~~~~~~~~~
It may also be helpful to understand the execution order via a couple of examples in the form of sequence diagrams. To begin with, a simple case might look as follows:
.. mermaid::
sequenceDiagram
participant A as Application Thread (Python)
participant W as Worker Thread
participant P as Submission Pool
A->>P: Register Submission S1 (Eager)
A->>P: Register Submission S2 (RNDV)
P-->>W: Read Pending Submissions (S1, S2)
Note over W: Dispatch S1 (Immediate Completion)
W->>A: Notify Completion S1
Note over W: Dispatch S2 (Enqueued for Progress)
Note over W: ucp_worker_progress (S2 completes)
W->>A: Notify Completion S2
A more complex sequence might look as the example below:
.. mermaid::
sequenceDiagram
participant A as Application Thread
participant W as Worker Thread
participant P as Submission Pool
A->>P: Register Submission S1 (RNDV)
A->>P: Register Submission S2 (Eager)
A->>P: Register Submission S3 (RNDV)
P-->>W: Read Pending Submissions (S1, S2, S3)
Note over W: Dispatch S1 (Enqueued for Progress)
Note over W: Dispatch S2 (Immediate Completion)
A->>P: Register Submission S4 (RNDV)
W->>A: Notify Completion S2
Note over W: Dispatch S3 (Enqueued for Progress)
Note over W: ucp_worker_progress (S3 completes)
W->>A: Notify Completion S3
P-->>W: Read Pending Submissions (S4)
Note over W: Dispatch S4 (Enqueued for Progress)
Note over W: ucp_worker_progress (S1 and S4 complete)
W->>A: Notify Completion S1
W->>A: Notify Completion S4
Please note that both examples above simply illustrate one possible sequence, as asynchronous behavior may occur in a different order depending on several variables, such as latency and rendezvous threshold.
Enable/Disable
~~~~~~~~~~~~~~
- C++: can be disabled via ``UCXXWorker`` constructor passing ``enableDelayedSubmission=false`` (default: ``true``);
- Python sync: can be disabled via ``UCXWorker`` constructor passing ``enable_delayed_submission=False`` (default: ``True``);
- Python async: can be disabled via environment variable ``UCXPY_ENABLE_DELAYED_SUBMISSION=0`` (default: ``1``);
Notifier Thread
---------------
The Notifier Thread is a thread launched from Python that must share the same ``asyncio`` event loop of the main application thread. The thread is used to notify a ``UCXRequest`` Python ``Future`` of its completion, avoiding the worker progress function from requiring GIL acquisition. Although being a separate thread from the main application thread, this will require the GIL exclusively while setting the ``Future`` result/exception and will block the main thread (unless the main thread is doing something that doesn't require the GIL either) during that short period, but will avoid additional ``asyncio`` tasks from flooding the application thread and executing more Python code than absolutely necessary.
The ``NotifierThread`` runs a loop in Python that shall have the following sequence:
1. Populate Python ``Future`` pool (no-op if pool has more objects available than pre-defined);
2. Block while waiting (implemented as a ``std::condition_variable``) for one (or more) ``UCXXRequest`` to complete and be notified by ``UCXXWorker``;
3. Run request notifier (implemented in C/C++ via CPython functions) as an ``asyncio`` coroutine -- required to ensure the event loop is notified of the ``Future`` completion;
Sample thread target function:
.. code-block:: python
async def _run_request_notifier(worker):
worker.run_request_notifier()
def _notifierThread(event_loop, worker):
# Set thread's event loop to the same used in main thread
asyncio.set_event_loop(event_loop)
while True:
# Populate ``Future`` pool -- no-op if holding more than threshold
worker.populate_python_futures_pool()
# Blocks until worker progress function notifies of one
# (or more) UCXXRequest completion
finished = worker.wait_request_notifier()
# If worker is shutting down, return
if finished:
return
# Create task to notify all enqueued waiting futures
# Note: a task must be created from a coroutine
task = asyncio.run_coroutine_threadsafe(
_run_request_notifier(worker), event_loop
)
# Wait until async task completes
try:
task.result()
except Exception as e:
print(f"task.result() error {type(e)}: {e}")
Flowchart
~~~~~~~~~
Arguably, this is a very complex process, involving 3 threads executing different parts of the entire workflow. The flowchart below may help on better understanding the description provided above.
.. mermaid::
flowchart TD
MTEnter([Enter])
MTCreateEventLoop[Create Python Event Loop]
MTCreateThreads[Create Threads]
MTApplicationFinished{Application Finished?}
MTDoStuff[...]
MTSubmitRequest[Submit Request]
MTRequestAsync[Handle Request Async]
MTCompletedImmediately{Request Completed Immediately?}
MTPythonFutureNotify["Future::notify()"]
MTExit([Exit])
ELRunRequestNotifier["[ASYNC] Worker::runRequestNotifier()"]
NTEnter([Enter])
NTRegisterEventLoop[Register Event Loop]
NTPopulateFuturesPool["Worker::populatePythonFuturesPool()"]
NTWaitRequestNotifier["Block: Worker::waitRequestNotifier()"]
NTApplicationFinished{Application Finished}
NTNotifierThreadExit([Exit])
NTRunRequestNotifier["Worker::runRequestNotifier()"]
PTEnter([Enter])
PTDoStuff[...]
PTRequestCallbackEnter([Enter])
PTPythonFutureNotify["Future::notify()"]
PTRequestCallbackExit([Exit])
subgraph PT [Main Python Thread]
MTEnter-->MTCreateEventLoop
MTCreateEventLoop-->MTCreateThreads
MTCreateThreads-->MTDoStuff
MTDoStuff-->MTSubmitRequest
MTSubmitRequest-->MTCompletedImmediately
MTCompletedImmediately-->|Yes| MTPythonFutureNotify
MTCompletedImmediately-->|No| MTRequestAsync
MTRequestAsync-->MTApplicationFinished
MTRequestAsync-.Handle async.->PTRequestCallbackEnter
MTPythonFutureNotify-.Wake.->NTWaitRequestNotifier
MTPythonFutureNotify-->MTApplicationFinished
MTApplicationFinished-->|Yes| MTExit
MTApplicationFinished-->|No| MTDoStuff
subgraph EventLoop
ELDoStuff[...]-->ELRunRequestNotifier
ELRunRequestNotifier-->ELDoStuff
end
MTCreateThreads-.EventLoop.->NTEnter
subgraph PNT [Python Notifier Thread]
NTEnter([Enter])-->NTRegisterEventLoop
NTRegisterEventLoop-->NTPopulateFuturesPool
NTPopulateFuturesPool-->NTWaitRequestNotifier
NTWaitRequestNotifier-->NTRunRequestNotifier
NTRunRequestNotifier-.Coroutine.->ELRunRequestNotifier
NTRunRequestNotifier-->NTApplicationFinished
NTApplicationFinished-->|Yes| NTNotifierThreadExit
NTApplicationFinished-->|No| NTPopulateFuturesPool
end
MTCreateThreads-..->PTEnter
subgraph UPT[UCX Progress Thread]
PTEnter-->PTDoStuff
PTDoStuff-->PTRequestCallbackEnter
subgraph RequestCallback
PTRequestCallbackEnter-->PTPythonFutureNotify
PTPythonFutureNotify-.Wake.->NTWaitRequestNotifier
PTPythonFutureNotify-->PTRequestCallbackExit
end
PTRequestCallbackExit-->PTDoStuff
end
end
Sequence Diagram
~~~~~~~~~~~~~~~~
It may also be helpful to understand the notifier thread with a couple of sequence diagrams. One such case is when a transfer completes immediately, this is the simplest case and exemplified by the diagram below:
.. mermaid::
sequenceDiagram
participant E as Event Loop
participant A as Application Thread (Python)
activate A
A->>E: Create
activate E
participant N as Notifier Thread (Python)
participant W as Worker Thread (C++)
A->>N: Register Event Loop
activate N
Note over A: Dispatch Request (Immediate Completion)
A->>N: Future::notify()
Note over N: Awake: waitRequestNotifier()
N->>E: Notify: runRequestNotifier()
deactivate N
deactivate E
deactivate A
A second case is when the transfer does not complete immediately and must be progressed and further notified from the worker thread, this process is shown in the following diagram:
.. mermaid::
sequenceDiagram
participant E as Event Loop
participant A as Application Thread (Python)
participant N as Notifier Thread (Python)
participant W as Worker Thread (C++)
activate A
activate W
A->>E: Create
activate E
A->>N: Register Event Loop
activate N
Note over A: Dispatch Request
Note over W: Progress
W->>N: Future::notify()
Note over N: Awake: waitRequestNotifier()
N->>E: Notify: runRequestNotifier()
deactivate N
deactivate E
deactivate A
deactivate W
Enable/Disable
~~~~~~~~~~~~~~
Given UCXX C++ layer is Python-agnostic, it must be possible to disable all the Python-specific code at compile-time. Notifying Python ``Future`` can be enabled at C++ compile time via the ``-DUCXX_ENABLE_PYTHON=1`` definition, which is default for ``setuptools`` builds. When ``-DUCXX_ENABLE_PYTHON=0``, all notification happens via coroutines that continuously check for completion of a ``UCXXRequest`` and yield for other async tasks, which tend to be CPU-intensive.
Multi-buffer Transfers
----------------------
Applications like Dask often must send/receive multiple buffers for a single read or write operation. Generally, it's necessary to follow the order below:
1. Send/receive number of frames;
2. Send/receive size and type (host or CUDA) of each frame;
3. Send/allocate-and-receive each frame.
This results in at least 3 send/receive operations, and potentially more when multiple buffers are transferred. To avoid submitting multiple async operations and then waiting on each one individually, UCXX introduces a new ``tag_send_multi``/``tag_recv_multi`` API to simplify that and reduce Python overhead.
On the sender side it works by assembling a ``Header`` object with a pre-defined size (currently ``100`` frames) combining the number of frames included, whether there is a next ``Header`` (in case the number of frames is larger than the pre-defined size), the buffer pointers, buffer types (host or CUDA) and buffer sizes. The ``Header`` is then sent as a single ``tag`` message, followed by all buffers in the order in which each buffer appears in the ``Header``.
The receiver side will always begin by waiting for a ``Header`` of that pre-defined size and parse it. If there's a next ``Header`` it will then wait for it until no more ``Header`` objects are expected. Then it will parse the ``Header``, and looping through each buffer described in the ``Header`` it will allocate memory for that buffer, followed by a ``tag_recv_nb`` operation to receive on that buffer. Note that unlike single-buffer transfers, the receiver side has no way of knowing buffer types/sizes in advance, so allocation can't be done in advance by the user and must be dealt with internally.
Supported Buffer Types
~~~~~~~~~~~~~~~~~~~~~~
Currently, only two types of buffers are supported: host and CUDA. Host buffers are defined in ``UCXXPyHostBuffer`` and are allocated via regular ``malloc`` and released via ``free``. CUDA buffers are defined in ``UCXXPyRMMBuffer``, and as the name suggests it depends on RMM, allocation occurs via ``rmm::device_buffer`` and release occurs when that object goes out-of-scope as implemented by ``rmm::device_buffer`` destructor.
Once ``get()`` is called by the user, the buffer is released and it's the user's responsibility to handle its release. The Cython ``UCXBufferRequest`` interface that converts ``UCXXPyHostBuffer``/``UCXXPyRMMBuffer`` into equivalent ``numpy.ndarray``/``rmm.DeviceBuffer`` ensures the resulting Python object will release the buffer once its reference count goes to zero.
Flowchart
~~~~~~~~~
To help understanding multi-buffer transfers, we have a few flowcharts to illustrate the process in three parts. We begin by looking at multi-receive and multi-send procedures:
.. mermaid::
flowchart TD
SendEnter([Enter])
CreateHeader[Create Header]
SendHeader[/Send Header/]
SendHasNextHeader{Has Next Header?}
SendFrame[/Send Frame/]
SendFramesCompleted{All Frames Sent?}
SendExit([Exit])
RecvEnter([Enter])
RecvHeader[/Receive Header/]
RecvHasNextHeader{Has Next Header?}
RecvAllocateBuffer[Allocate Buffer]
RecvFrame[/Receive Frame/]
RecvFramesCompleted{All Frames Received?}
RecvExit([Exit])
subgraph TMS[tagMultiSend]
SendEnter-->CreateHeader
CreateHeader-->SendHeader
SendHeader-->SendHasNextHeader
SendHasNextHeader-->|Yes| CreateHeader
SendHasNextHeader-->|No| SendFrame
SendFrame-->SendFramesCompleted
SendFramesCompleted-->|Yes| SendExit
SendFramesCompleted-->|No| SendFrame
end
subgraph TMR[tagMultiRecv]
RecvEnter-->RecvHeader
RecvHeader-->RecvHasNextHeader
RecvHasNextHeader-->|Yes| RecvHeader
RecvHasNextHeader-->|No| RecvAllocateBuffer
RecvAllocateBuffer-->RecvFrame
RecvFrame-->RecvFramesCompleted
RecvFramesCompleted-->|Yes| RecvExit
RecvFramesCompleted-->|No| RecvAllocateBuffer
end
The charts above describe how the actual transfers happen. However, we also need to understand the flow for sending and receiving headers describing the actual data being transferred. We see that in the chart below.
.. mermaid::
flowchart TD
CallbackEnter([Enter])
CallbackRequestsEmpty{Buffer Requests Empty?}
CallbackRecvHeader[/Receive Header/]
CallbackHasNextHeader{Has Next Header?}
CallbackRecvFrames[/Receive Frames/]
CallbackExit([Exit])
subgraph Header Receive Callback
CallbackEnter-->CallbackRequestsEmpty
CallbackRequestsEmpty-->|Yes| CallbackRecvHeader
CallbackRequestsEmpty-->|No| CallbackHasNextHeader
CallbackRecvHeader-->CallbackEnter
CallbackHasNextHeader{Has Next Header?}-->|Yes| CallbackRecvHeader
CallbackHasNextHeader-->|No| CallbackRecvFrames
CallbackRecvFrames-->CallbackExit
end
The final step is to look at the callbacks for sending and receiving frames, as shown below.
.. mermaid::
flowchart TD
MarkCompletedEnter([Enter])
MarkCompletedFramesCompleted{All Frames Completed?}
MarkCompletedSetPythonFuture[/Set Python Future/]
MarkCompletedDone([Done])
subgraph Frame Send/Receive Callback
MarkCompletedEnter-->MarkCompletedFramesCompleted
MarkCompletedFramesCompleted-->|Yes| MarkCompletedSetPythonFuture
MarkCompletedFramesCompleted-->|No| MarkCompletedDone
MarkCompletedSetPythonFuture-->MarkCompletedDone
end
Enable/Disable
~~~~~~~~~~~~~~
Since multi-buffer transfers are a new feature in UCXX and do not have an equivalent in neither UCX or UCX-Py, it requires a new API. The new API is composed of ``Endpoint.send_multi(list_of_buffers)`` and ``list_of_buffers = Endpoint.recv_multi()``.
| 0 |
rapidsai_public_repos/ucxx/docs | rapidsai_public_repos/ucxx/docs/source/optimizations.md | # Optimizations
This is a list of built-in optimizations in UCXX. Most of them target Python usage, but may be useful in C++ applications depending on their design and needs.
## Delayed Submission
Move transfer submissions (``ucp_{tag,stream}_{send,recv}_nb)``) to the worker progress task.
Most times UCX applications will submit transfer requests immediately from the application thread, returning a ``ucs_status_ptr_t`` which is then checked for immediate completion or not, in the latter case a callback will execute once that completes during some ``ucp_worker_progress()`` call. When writing Python bindings for C/C++ code, it is often good practice to release the GIL instead of holding it while the C/C++ code runs, and thus allowing other threads to acquire the GIL while the low-level piece of code executes. For UCXX that would mean the GIL would be released and then reacquired by the C++ code almost instantaneously, or not released at all. To prevent that, UCXX allows submitting a transfer request intent, while delaying the ``ucp_{tag,stream}_{send,recv}_nb`` request for a future time, for example, during the worker progress task.
The UCX requests also require the UCX spinlock to be acquired. If there is a worker progress task running, this would effectively mean the application thread and the worker progress thread competing for the UCX spinlock simultaneously. Now, if one of the threads has hold of the UCX spinlock and tries to achieve also the GIL while the other thread has the GIL but attempting to acquire the UCX spinlock, that would lead to a deadlock. The solution for this problem in UCXX is to prevent the application thread from ever (or almost ever) acquiring the UCX spinlock, while preventing the worker progress thread from acquiring the GIL. This last problem is solved by using a `Notifier Thread`.
### Flowchart
To help understanding delayed submission execution, we have two flowcharts to illustrate the process. First we see how a transfer request is processed:
```mermaid
flowchart TD
subgraph Process Transfer Request
Enter([Enter])
SubmitRequest[Submit Request]
DelayedSubmissionEnabled{Delayed Submission Enabled?}
DispatchRequest[Dispatch Request]
RegisterDelayedSubmission[Register Delayed Submission]
Exit([Exit])
Enter-->SubmitRequest
SubmitRequest-->DelayedSubmissionEnabled
DelayedSubmissionEnabled-->|No| DispatchRequest
DelayedSubmissionEnabled-->|Yes| RegisterDelayedSubmission
RegisterDelayedSubmission-->Exit
DispatchRequest-->Exit
end
```
Second, we look at the flowchart for the UCX progress thread:
```mermaid
flowchart TD
subgraph UCX Progress Thread
Enter([Enter])
StopSignalRaised{Stop Signal Raised?}
DelayedSubmissionEnabled{Delayed Submission Enabled?}
DelayedSubmissionAvailable{Delayed Submission Available?}
DispatchRequest[Dispatch Request]
UCPWorkerProgress[ucp_worker_progress]
ExecuteCallbacks[Execute Callbacks]
Exit([Exit])
Enter-->StopSignalRaised
StopSignalRaised-->|Yes| Exit
StopSignalRaised-->|No| DelayedSubmissionnEnabled
DelayedSubmissionEnabled-->|Yes| DelayedSubmissionnAvailable
DelayedSubmissionAvailable-->|Yes| DispatchRequest
DelayedSubmissionAvailable-->|No| UCPWorkerProgress
DelayedSubmissionnEnabled-->|No| UCPWorkerProgress
DispatchRequest-->UCPWorkerProgress
UCPWorkerProgress-->ExecuteCallbacks
ExecuteCallbacks-->StopSignalRaised
end
```
### Sequence Diagram
It may also be helpful to understand the execution order via a couple of examples in the form of sequence diagrams. To begin with, a simple case might look as follows:
```mermaid
sequenceDiagram
participant A as Application Thread (Python)
participant W as Worker Thread
participant P as Submission Pool
A->>P: Register Submission S1 (Eager)
A->>P: Register Submission S2 (RNDV)
P-->>W: Read Pending Submissions (S1, S2)
Note over W: Dispatch S1 (Immediate Completion)
W->>A: Notify Completion S1
Note over W: Dispatch S2 (Enqueued for Progress)
Note over W: ucp_worker_progress (S2 completes)
W->>A: Notify Completion S2
```
A more complex sequence might look as the example below:
```mermaid
sequenceDiagram
participant A as Application Thread
participant W as Worker Thread
participant P as Submission Pool
A->>P: Register Submission S1 (RNDV)
A->>P: Register Submission S2 (Eager)
A->>P: Register Submission S3 (RNDV)
P-->>W: Read Pending Submissions (S1, S2, S3)
Note over W: Dispatch S1 (Enqueued for Progress)
Note over W: Dispatch S2 (Immediate Completion)
A->>P: Register Submission S4 (RNDV)
W->>A: Notify Completion S2
Note over W: Dispatch S3 (Enqueued for Progress)
Note over W: ucp_worker_progress (S3 completes)
W->>A: Notify Completion S3
P-->>W: Read Pending Submissions (S4)
Note over W: Dispatch S4 (Enqueued for Progress)
Note over W: ucp_worker_progress (S1 and S4 complete)
W->>A: Notify Completion S1
W->>A: Notify Completion S4
```
Please note that both examples above simply illustrate one possible sequence, as asynchronous behavior may occur in a different order depending on several variables, such as latency and rendezvous threshold.
### Enable/Disable
- C++: can be disabled via ``UCXXWorker`` constructor passing ``enableDelayedSubmission=false`` (default: ``true``);
- Python sync: can be disabled via ``UCXWorker`` constructor passing ``enable_delayed_submission=False`` (default: ``True``);
- Python async: can be disabled via environment variable ``UCXPY_ENABLE_DELAYED_SUBMISSION=0`` (default: ``1``);
## Notifier Thread
The Notifier Thread is a thread launched from Python that must share the same ``asyncio`` event loop of the main application thread. The thread is used to notify a ``UCXRequest`` Python ``Future`` of its completion, avoiding the worker progress function from requiring GIL acquisition. Although being a separate thread from the main application thread, this will require the GIL exclusively while setting the ``Future`` result/exception and will block the main thread (unless the main thread is doing something that doesn't require the GIL either) during that short period, but will avoid additional ``asyncio`` tasks from flooding the application thread and executing more Python code than absolutely necessary.
The ``NotifierThread`` runs a loop in Python that shall have the following sequence:
1. Populate Python ``Future`` pool (no-op if pool has more objects available than pre-defined);
2. Block while waiting (implemented as a ``std::condition_variable``) for one (or more) ``UCXXRequest`` to complete and be notified by ``UCXXWorker``;
3. Run request notifier (implemented in C/C++ via CPython functions) as an ``asyncio`` coroutine -- required to ensure the event loop is notified of the ``Future`` completion;
Sample thread target function:
```python
async def _run_request_notifier(worker):
worker.run_request_notifier()
def _notifierThread(event_loop, worker):
# Set thread's event loop to the same used in main thread
asyncio.set_event_loop(event_loop)
while True:
# Populate ``Future`` pool -- no-op if holding more than threshold
worker.populate_python_futures_pool()
# Blocks until worker progress function notifies of one
# (or more) UCXXRequest completion
finished = worker.wait_request_notifier()
# If worker is shutting down, return
if finished:
return
# Create task to notify all enqueued waiting futures
# Note: a task must be created from a coroutine
task = asyncio.run_coroutine_threadsafe(
_run_request_notifier(worker), event_loop
)
# Wait until async task completes
try:
task.result()
except Exception as e:
print(f"task.result() error {type(e)}: {e}")
```
### Flowchart
Arguably, this is a very complex process, involving 3 threads executing different parts of the entire workflow. The flowchart below may help on better understanding the description provided above.
```mermaid
flowchart TD
MTEnter([Enter])
MTCreateEventLoop[Create Python Event Loop]
MTCreateThreads[Create Threads]
MTApplicationFinished{Application Finished?}
MTDoStuff[...]
MTSubmitRequest[Submit Request]
MTRequestAsync[Handle Request Async]
MTCompletedImmediately{Request Completed Immediately?}
MTPythonFutureNotify["Future::notify()"]
MTExit([Exit])
ELRunRequestNotifier["[ASYNC] Worker::runRequestNotifier()"]
NTEnter([Enter])
NTRegisterEventLoop[Register Event Loop]
NTPopulateFuturesPool["Worker::populatePythonFuturesPool()"]
NTWaitRequestNotifier["Block: Worker::waitRequestNotifier()"]
NTApplicationFinished{Application Finished}
NTNotifierThreadExit([Exit])
NTRunRequestNotifier["Worker::runRequestNotifier()"]
PTEnter([Enter])
PTDoStuff[...]
PTRequestCallbackEnter([Enter])
PTPythonFutureNotify["Future::notify()"]
PTRequestCallbackExit([Exit])
subgraph PT [Main Python Thread]
MTEnter-->MTCreateEventLoop
MTCreateEventLoop-->MTCreateThreads
MTCreateThreads-->MTDoStuff
MTDoStuff-->MTSubmitRequest
MTSubmitRequest-->MTCompletedImmediately
MTCompletedImmediately-->|Yes| MTPythonFutureNotify
MTCompletedImmediately-->|No| MTRequestAsync
MTRequestAsync-->MTApplicationFinished
MTRequestAsync-.Handle async.->PTRequestCallbackEnter
MTPythonFutureNotify-.Wake.->NTWaitRequestNotifier
MTPythonFutureNotify-->MTApplicationFinished
MTApplicationFinished-->|Yes| MTExit
MTApplicationFinished-->|No| MTDoStuff
subgraph EventLoop
ELDoStuff[...]-->ELRunRequestNotifier
ELRunRequestNotifier-->ELDoStuff
end
MTCreateThreads-.EventLoop.->NTEnter
subgraph PNT [Python Notifier Thread]
NTEnter([Enter])-->NTRegisterEventLoop
NTRegisterEventLoop-->NTPopulateFuturesPool
NTPopulateFuturesPool-->NTWaitRequestNotifier
NTWaitRequestNotifier-->NTRunRequestNotifier
NTRunRequestNotifier-.Coroutine.->ELRunRequestNotifier
NTRunRequestNotifier-->NTApplicationFinished
NTApplicationFinished-->|Yes| NTNotifierThreadExit
NTApplicationFinished-->|No| NTPopulateFuturesPool
end
MTCreateThreads-..->PTEnter
subgraph UPT[UCX Progress Thread]
PTEnter-->PTDoStuff
PTDoStuff-->PTRequestCallbackEnter
subgraph RequestCallback
PTRequestCallbackEnter-->PTPythonFutureNotify
PTPythonFutureNotify-.Wake.->NTWaitRequestNotifier
PTPythonFutureNotify-->PTRequestCallbackExit
end
PTRequestCallbackExit-->PTDoStuff
end
end
```
### Sequence Diagram
It may also be helpful to understand the notifier thread with a couple of sequence diagrams. One such case is when a transfer completes immediately, this is the simplest case and exemplified by the diagram below:
```mermaid
sequenceDiagram
participant E as Event Loop
participant A as Application Thread (Python)
activate A
A->>E: Create
activate E
participant N as Notifier Thread (Python)
participant W as Worker Thread (C++)
A->>N: Register Event Loop
activate N
Note over A: Dispatch Request (Immediate Completion)
A->>N: Future::notify()
Note over N: Awake: waitRequestNotifier()
N->>E: Notify: runRequestNotifier()
deactivate N
deactivate E
deactivate A
```
A second case is when the transfer does not complete immediately and must be progressed and further notified from the worker thread, this process is shown in the following diagram:
```mermaid
sequenceDiagram
participant E as Event Loop
participant A as Application Thread (Python)
participant N as Notifier Thread (Python)
participant W as Worker Thread (C++)
activate A
activate W
A->>E: Create
activate E
A->>N: Register Event Loop
activate N
Note over A: Dispatch Request
Note over W: Progress
W->>N: Future::notify()
Note over N: Awake: waitRequestNotifier()
N->>E: Notify: runRequestNotifier()
deactivate N
deactivate E
deactivate A
deactivate W
```
### Enable/Disable
Given UCXX C++ layer is Python-agnostic, it must be possible to disable all the Python-specific code at compile-time. Notifying Python ``Future`` can be enabled at C++ compile time via the ``-DUCXX_ENABLE_PYTHON=1`` definition, which is default for ``setuptools`` builds. When ``-DUCXX_ENABLE_PYTHON=0``, all notification happens via coroutines that continuously check for completion of a ``UCXXRequest`` and yield for other async tasks, which tend to be CPU-intensive.
## Multi-buffer Transfers
Applications like Dask often must send/receive multiple buffers for a single read or write operation. Generally, it's necessary to follow the order below:
1. Send/receive number of frames;
2. Send/receive size and type (host or CUDA) of each frame;
3. Send/allocate-and-receive each frame.
This results in at least 3 send/receive operations, and potentially more when multiple buffers are transferred. To avoid submitting multiple async operations and then waiting on each one individually, UCXX introduces a new ``tag_send_multi``/``tag_recv_multi`` API to simplify that and reduce Python overhead.
On the sender side it works by assembling a ``Header`` object with a pre-defined size (currently ``100`` frames) combining the number of frames included, whether there is a next ``Header`` (in case the number of frames is larger than the pre-defined size), the buffer pointers, buffer types (host or CUDA) and buffer sizes. The ``Header`` is then sent as a single ``tag`` message, followed by all buffers in the order in which each buffer appears in the ``Header``.
The receiver side will always begin by waiting for a ``Header`` of that pre-defined size and parse it. If there's a next ``Header`` it will then wait for it until no more ``Header`` objects are expected. Then it will parse the ``Header``, and looping through each buffer described in the ``Header`` it will allocate memory for that buffer, followed by a ``tag_recv_nb`` operation to receive on that buffer. Note that unlike single-buffer transfers, the receiver side has no way of knowing buffer types/sizes in advance, so allocation can't be done in advance by the user and must be dealt with internally.
### Supported Buffer Types
Currently, only two types of buffers are supported: host and CUDA. Host buffers are defined in ``UCXXPyHostBuffer`` and are allocated via regular ``malloc`` and released via ``free``. CUDA buffers are defined in ``UCXXPyRMMBuffer``, and as the name suggests it depends on RMM, allocation occurs via ``rmm::device_buffer`` and release occurs when that object goes out-of-scope as implemented by ``rmm::device_buffer`` destructor.
Once ``get()`` is called by the user, the buffer is released and it's the user's responsibility to handle its release. The Cython ``UCXBufferRequest`` interface that converts ``UCXXPyHostBuffer``/``UCXXPyRMMBuffer`` into equivalent ``numpy.ndarray``/``rmm.DeviceBuffer`` ensures the resulting Python object will release the buffer once its reference count goes to zero.
### Flowchart
To help understanding multi-buffer transfers, we have a few flowcharts to illustrate the process in three parts. We begin by looking at multi-receive and multi-send procedures:
```mermaid
flowchart TD
SendEnter([Enter])
CreateHeader[Create Header]
SendHeader[/Send Header/]
SendHasNextHeader{Has Next Header?}
SendFrame[/Send Frame/]
SendFramesCompleted{All Frames Sent?}
SendExit([Exit])
RecvEnter([Enter])
RecvHeader[/Receive Header/]
RecvHasNextHeader{Has Next Header?}
RecvAllocateBuffer[Allocate Buffer]
RecvFrame[/Receive Frame/]
RecvFramesCompleted{All Frames Received?}
RecvExit([Exit])
subgraph TMS[tagMultiSend]
SendEnter-->CreateHeader
CreateHeader-->SendHeader
SendHeader-->SendHasNextHeader
SendHasNextHeader-->|Yes| CreateHeader
SendHasNextHeader-->|No| SendFrame
SendFrame-->SendFramesCompleted
SendFramesCompleted-->|Yes| SendExit
SendFramesCompleted-->|No| SendFrame
end
subgraph TMR[tagMultiRecv]
RecvEnter-->RecvHeader
RecvHeader-->RecvHasNextHeader
RecvHasNextHeader-->|Yes| RecvHeader
RecvHasNextHeader-->|No| RecvAllocateBuffer
RecvAllocateBuffer-->RecvFrame
RecvFrame-->RecvFramesCompleted
RecvFramesCompleted-->|Yes| RecvExit
RecvFramesCompleted-->|No| RecvAllocateBuffer
end
```
The charts above describe how the actual transfers happen. However, we also need to understand the flow for sending and receiving headers describing the actual data being transferred. We see that in the chart below.
```mermaid
flowchart TD
CallbackEnter([Enter])
CallbackRequestsEmpty{Buffer Requests Empty?}
CallbackRecvHeader[/Receive Header/]
CallbackHasNextHeader{Has Next Header?}
CallbackRecvFrames[/Receive Frames/]
CallbackExit([Exit])
subgraph Header Receive Callback
CallbackEnter-->CallbackRequestsEmpty
CallbackRequestsEmpty-->|Yes| CallbackRecvHeader
CallbackRequestsEmpty-->|No| CallbackHasNextHeader
CallbackRecvHeader-->CallbackEnter
CallbackHasNextHeader{Has Next Header?}-->|Yes| CallbackRecvHeader
CallbackHasNextHeader-->|No| CallbackRecvFrames
CallbackRecvFrames-->CallbackExit
end
```
The final step is to look at the callbacks for sending and receiving frames, as shown below.
```mermaid
flowchart TD
MarkCompletedEnter([Enter])
MarkCompletedFramesCompleted{All Frames Completed?}
MarkCompletedSetPythonFuture[/Set Python Future/]
MarkCompletedDone([Done])
subgraph Frame Send/Receive Callback
MarkCompletedEnter-->MarkCompletedFramesCompleted
MarkCompletedFramesCompleted-->|Yes| MarkCompletedSetPythonFuture
MarkCompletedFramesCompleted-->|No| MarkCompletedDone
MarkCompletedSetPythonFuture-->MarkCompletedDone
end
```
### Enable/Disable
Since multi-buffer transfers are a new feature in UCXX and do not have an equivalent in neither UCX or UCX-Py, it requires a new API. The new API is composed of ``Endpoint.send_multi(list_of_buffers)`` and ``list_of_buffers = Endpoint.recv_multi()``.
| 0 |
rapidsai_public_repos/ucxx | rapidsai_public_repos/ucxx/ci/test_python.sh | #!/bin/bash
# SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
set -euo pipefail
source "$(dirname "$0")/test_utils.sh"
rapids-logger "Create test conda environment"
. /opt/conda/etc/profile.d/conda.sh
rapids-dependency-file-generator \
--output conda \
--file_key test_python \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml
rapids-mamba-retry env create --force -f env.yaml -n test
conda activate test
rapids-print-env
print_system_stats
run_tests() {
CMD_LINE="timeout 2m pytest -vs python/ucxx/_lib/tests/"
log_command "${CMD_LINE}"
timeout 2m pytest -vs python/ucxx/_lib/tests/
}
run_tests_async() {
PROGRESS_MODE=$1
ENABLE_DELAYED_SUBMISSION=$2
ENABLE_PYTHON_FUTURE=$3
SKIP=$4
CMD_LINE="UCXPY_PROGRESS_MODE=${PROGRESS_MODE} UCXPY_ENABLE_DELAYED_SUBMISSION=${ENABLE_DELAYED_SUBMISSION} UCXPY_ENABLE_PYTHON_FUTURE=${ENABLE_PYTHON_FUTURE} timeout 20m pytest -vs python/ucxx/_lib_async/tests/ --durations=50"
if [ $SKIP -ne 0 ]; then
echo -e "\e[1;33mSkipping unstable test: ${CMD_LINE}\e[0m"
else
log_command "${CMD_LINE}"
UCXPY_PROGRESS_MODE=${PROGRESS_MODE} UCXPY_ENABLE_DELAYED_SUBMISSION=${ENABLE_DELAYED_SUBMISSION} UCXPY_ENABLE_PYTHON_FUTURE=${ENABLE_PYTHON_FUTURE} timeout 20m pytest -vs python/ucxx/_lib_async/tests/ --durations=50
fi
}
run_py_benchmark() {
BACKEND=$1
PROGRESS_MODE=$2
ASYNCIO_WAIT=$3
ENABLE_DELAYED_SUBMISSION=$4
ENABLE_PYTHON_FUTURE=$5
N_BUFFERS=$6
SLOW=$7
if [ $ASYNCIO_WAIT -ne 0 ]; then
ASYNCIO_WAIT="--asyncio-wait"
else
ASYNCIO_WAIT=""
fi
CMD_LINE="UCXPY_ENABLE_DELAYED_SUBMISSION=${ENABLE_DELAYED_SUBMISSION} UCXPY_ENABLE_PYTHON_FUTURE=${ENABLE_PYTHON_FUTURE} timeout 2m python -m ucxx.benchmarks.send_recv --backend ${BACKEND} -o cupy --reuse-alloc -n 8MiB --n-buffers $N_BUFFERS --progress-mode ${PROGRESS_MODE} ${ASYNCIO_WAIT}"
# Workaround for https://github.com/rapidsai/ucxx/issues/15
CMD_LINE="UCX_KEEPALIVE_INTERVAL=1ms ${CMD_LINE}"
log_command "${CMD_LINE}"
if [ $SLOW -ne 0 ]; then
echo -e "\e[1;33mSLOW BENCHMARK: it may seem like a deadlock but will eventually complete.\e[0m"
fi
UCX_KEEPALIVE_INTERVAL=1ms UCXPY_ENABLE_DELAYED_SUBMISSION=${ENABLE_DELAYED_SUBMISSION} UCXPY_ENABLE_PYTHON_FUTURE=${ENABLE_PYTHON_FUTURE} timeout 2m python -m ucxx.benchmarks.send_recv --backend ${BACKEND} -o cupy --reuse-alloc -n 8MiB --n-buffers $N_BUFFERS --progress-mode ${PROGRESS_MODE} ${ASYNCIO_WAIT}
}
run_distributed_ucxx_tests() {
PROGRESS_MODE=$1
ENABLE_DELAYED_SUBMISSION=$2
ENABLE_PYTHON_FUTURE=$3
CMD_LINE="UCXPY_PROGRESS_MODE=${PROGRESS_MODE} UCXPY_ENABLE_DELAYED_SUBMISSION=${ENABLE_DELAYED_SUBMISSION} UCXPY_ENABLE_PYTHON_FUTURE=${ENABLE_PYTHON_FUTURE} timeout 10m pytest -vs python/distributed-ucxx/distributed_ucxx/tests/"
# Workaround for https://github.com/rapidsai/ucxx/issues/15
# CMD_LINE="UCX_KEEPALIVE_INTERVAL=1ms ${CMD_LINE}"
log_command "${CMD_LINE}"
UCXPY_PROGRESS_MODE=${PROGRESS_MODE} UCXPY_ENABLE_DELAYED_SUBMISSION=${ENABLE_DELAYED_SUBMISSION} UCXPY_ENABLE_PYTHON_FUTURE=${ENABLE_PYTHON_FUTURE} timeout 10m pytest -vs python/distributed-ucxx/distributed_ucxx/tests/
}
rapids-logger "Downloading artifacts from previous jobs"
CPP_CHANNEL=$(rapids-download-conda-from-s3 cpp)
rapids-mamba-retry install \
--channel "${CPP_CHANNEL}" \
libucxx ucxx distributed-ucxx
# TODO: Perhaps install from conda? We need distributed installed in developer
# mode to provide test utils, but that's probably not doable from conda packages.
rapids-logger "Install Distributed in developer mode"
git clone https://github.com/dask/distributed /tmp/distributed
pip install -e /tmp/distributed
print_ucx_config
rapids-logger "Run tests with conda package"
rapids-logger "Python Core Tests"
run_tests
rapids-logger "Python Async Tests"
# run_tests_async PROGRESS_MODE ENABLE_DELAYED_SUBMISSION ENABLE_PYTHON_FUTURE SKIP
run_tests_async thread 0 0 0
run_tests_async thread 1 1 0
rapids-logger "Python Benchmarks"
# run_py_benchmark BACKEND PROGRESS_MODE ASYNCIO_WAIT ENABLE_DELAYED_SUBMISSION ENABLE_PYTHON_FUTURE NBUFFERS SLOW
run_py_benchmark ucxx-core thread 0 0 0 1 0
run_py_benchmark ucxx-core thread 1 0 0 1 0
for nbuf in 1 8; do
if [[ ! $RAPIDS_CUDA_VERSION =~ 11.2.* ]]; then
# run_py_benchmark BACKEND PROGRESS_MODE ASYNCIO_WAIT ENABLE_DELAYED_SUBMISSION ENABLE_PYTHON_FUTURE NBUFFERS SLOW
run_py_benchmark ucxx-async thread 0 0 0 ${nbuf} 0
run_py_benchmark ucxx-async thread 0 0 1 ${nbuf} 0
run_py_benchmark ucxx-async thread 0 1 0 ${nbuf} 0
run_py_benchmark ucxx-async thread 0 1 1 ${nbuf} 0
fi
done
rapids-logger "Distributed Tests"
# run_distributed_ucxx_tests PROGRESS_MODE ENABLE_DELAYED_SUBMISSION ENABLE_PYTHON_FUTURE
run_distributed_ucxx_tests polling 0 0
run_distributed_ucxx_tests thread 0 0
run_distributed_ucxx_tests thread 0 1
run_distributed_ucxx_tests thread 1 0
run_distributed_ucxx_tests thread 1 1
| 0 |
rapidsai_public_repos/ucxx | rapidsai_public_repos/ucxx/ci/test_cpp.sh | #!/bin/bash
# SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
set -euo pipefail
source "$(dirname "$0")/test_utils.sh"
rapids-logger "Create test conda environment"
. /opt/conda/etc/profile.d/conda.sh
rapids-dependency-file-generator \
--output conda \
--file_key test_cpp \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch)" | tee env.yaml
rapids-mamba-retry env create --force -f env.yaml -n test
conda activate test
rapids-print-env
print_system_stats
BINARY_PATH=${CONDA_PREFIX}/bin
_SERVER_PORT=12345
run_tests() {
CMD_LINE="timeout 10m ${BINARY_PATH}/gtests/libucxx/UCXX_TEST"
log_command "${CMD_LINE}"
UCX_TCP_CM_REUSEADDR=y ${CMD_LINE}
}
run_benchmark() {
SERVER_PORT=$1
PROGRESS_MODE=$2
CMD_LINE_SERVER="timeout 1m ${BINARY_PATH}/benchmarks/libucxx/ucxx_perftest -s 8388608 -r -n 20 -m ${PROGRESS_MODE} -p ${SERVER_PORT}"
CMD_LINE_CLIENT="timeout 1m ${BINARY_PATH}/benchmarks/libucxx/ucxx_perftest -s 8388608 -r -n 20 -m ${PROGRESS_MODE} -p ${SERVER_PORT} 127.0.0.1"
log_command "${CMD_LINE_SERVER}"
UCX_TCP_CM_REUSEADDR=y ${CMD_LINE_SERVER} &
sleep 1
log_command "${CMD_LINE_CLIENT}"
${CMD_LINE_CLIENT}
}
run_example() {
SERVER_PORT=$1
PROGRESS_MODE=$2
CMD_LINE="timeout 1m ${BINARY_PATH}/examples/libucxx/ucxx_example_basic -m ${PROGRESS_MODE} -p ${SERVER_PORT}"
log_command "${CMD_LINE}"
UCX_TCP_CM_REUSEADDR=y ${CMD_LINE}
}
run_port_retry() {
MAX_ATTEMPTS=${1}
RUN_TYPE=${2}
PROGRESS_MODE=${3}
set +e
for attempt in $(seq 1 ${MAX_ATTEMPTS}); do
echo "Attempt ${attempt}/${MAX_ATTEMPTS} to run ${RUN_TYPE}"
_SERVER_PORT=$((_SERVER_PORT + 1)) # Use different ports every time to prevent `Device is busy`
if [[ "${RUN_TYPE}" == "benchmark" ]]; then
run_benchmark ${_SERVER_PORT} ${PROGRESS_MODE}
elif [[ "${RUN_TYPE}" == "example" ]]; then
run_example ${_SERVER_PORT} ${PROGRESS_MODE}
else
set -e
echo "Unknown test type "${RUN_TYPE}""
exit 1
fi
LAST_STATUS=$?
if [ ${LAST_STATUS} -eq 0 ]; then
break;
fi
sleep 1
done
set -e
if [ ${LAST_STATUS} -ne 0 ]; then
echo "Failure running benchmark client after ${MAX_ATTEMPTS} attempts"
exit $LAST_STATUS
fi
}
rapids-logger "Downloading artifacts from previous jobs"
CPP_CHANNEL=$(rapids-download-conda-from-s3 cpp)
rapids-mamba-retry install \
--channel "${CPP_CHANNEL}" \
libucxx libucxx-examples libucxx-tests
print_ucx_config
rapids-logger "Run tests with conda package"
rapids-logger "C++ Tests"
run_tests
rapids-logger "C++ Benchmarks"
# run_port_retry MAX_ATTEMPTS RUN_TYPE PROGRESS_MODE
run_port_retry 10 "benchmark" "polling"
run_port_retry 10 "benchmark" "blocking"
run_port_retry 10 "benchmark" "thread-polling"
run_port_retry 10 "benchmark" "thread-blocking"
run_port_retry 10 "benchmark" "wait"
rapids-logger "C++ Examples"
# run_port_retry MAX_ATTEMPTS RUN_TYPE PROGRESS_MODE
run_port_retry 10 "example" "polling"
run_port_retry 10 "example" "blocking"
run_port_retry 10 "example" "thread-polling"
run_port_retry 10 "example" "thread-blocking"
run_port_retry 10 "example" "wait"
| 0 |
rapidsai_public_repos/ucxx | rapidsai_public_repos/ucxx/ci/check_style.sh | #!/bin/bash
# SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
set -euo pipefail
rapids-logger "Create checks conda environment"
. /opt/conda/etc/profile.d/conda.sh
rapids-dependency-file-generator \
--output conda \
--file_key checks \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml
rapids-mamba-retry env create --force -f env.yaml -n checks
conda activate checks
# Run pre-commit checks
pre-commit run --all-files --show-diff-on-failure
| 0 |
rapidsai_public_repos/ucxx | rapidsai_public_repos/ucxx/ci/build_cpp.sh | #!/bin/bash
# SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
set -euo pipefail
source rapids-env-update
export CMAKE_GENERATOR=Ninja
rapids-print-env
rapids-logger "Begin C++ and Python builds"
rapids-conda-retry mambabuild \
conda/recipes/ucxx
rapids-upload-conda-to-s3 cpp
| 0 |
rapidsai_public_repos/ucxx | rapidsai_public_repos/ucxx/ci/test_utils.sh | #!/bin/bash
# SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
log_command() {
CMD_LINE=$1
echo -e "\e[1mRunning: \n ${CMD_LINE}\e[0m"
}
print_system_stats() {
rapids-logger "Check GPU usage"
nvidia-smi
rapids-logger "Check NICs"
awk 'END{print $1}' /etc/hosts
cat /etc/hosts
}
print_ucx_config() {
rapids-logger "UCX Version and Build Configuration"
ucx_info -v
}
| 0 |
rapidsai_public_repos/ucxx/ci | rapidsai_public_repos/ucxx/ci/release/update-version.sh | #!/bin/bash
########################
# UCXX Version Updater #
########################
## Usage
# bash update-version.sh <new_version>
# Format is Major.Minor.Patch - no leading 'v' or trailing 'a'
# Example: 0.30.00
NEXT_FULL_TAG=$1
#Get <major>.<minor> for next version
NEXT_MAJOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[1]}')
NEXT_MINOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[2]}')
NEXT_SHORT_TAG=${NEXT_MAJOR}.${NEXT_MINOR}
# Get RAPIDS version associated w/ ucx-py version
NEXT_RAPIDS_VERSION="$(curl -sL https://version.gpuci.io/ucx-py/${NEXT_SHORT_TAG})"
# Need to distutils-normalize the versions for some use cases
NEXT_SHORT_TAG_PEP440=$(python -c "from setuptools.extern import packaging; print(packaging.version.Version('${NEXT_RAPIDS_VERSION}'))")
echo "Next tag is ${NEXT_SHORT_TAG_PEP440}"
echo "Preparing release: $NEXT_FULL_TAG"
# Inplace sed replace; workaround for Linux and Mac
function sed_runner() {
sed -i.bak ''"$1"'' $2 && rm -f ${2}.bak
}
# C++ update
sed_runner 's/'"libucxx_version .*)"'/'"libucxx_version ${NEXT_FULL_TAG})"'/g' cpp/CMakeLists.txt
# Python updates
sed_runner 's/'"ucxx_version .*)"'/'"ucxx_version ${NEXT_FULL_TAG})"'/g' python/CMakeLists.txt
sed_runner "s/^__version__ = .*/__version__ = \"${NEXT_FULL_TAG}\"/g" python/ucxx/__init__.py
sed_runner "s/^version = .*/version = \"${NEXT_FULL_TAG}\"/g" python/pyproject.toml
sed_runner "s/^__version__ = .*/__version__ = \"${NEXT_FULL_TAG}\"/g" python/distributed-ucxx/distributed_ucxx/__init__.py
sed_runner "s/^version = .*/version = \"${NEXT_FULL_TAG}\"/g" python/distributed-ucxx/pyproject.toml
# bump RAPIDS libs
sed_runner "/- librmm =/ s/=.*/=${NEXT_RAPIDS_VERSION}/g" conda/recipes/ucxx/meta.yaml
sed_runner "/- rmm =/ s/=.*/=${NEXT_RAPIDS_VERSION}/g" conda/recipes/ucxx/meta.yaml
DEPENDENCIES=(
cudf
dask-cuda
dask-cudf
librmm
rmm
)
for DEP in "${DEPENDENCIES[@]}"; do
for FILE in dependencies.yaml conda/environments/*.yaml; do
sed_runner "/-.* ${DEP}==/ s/==.*/==${NEXT_SHORT_TAG_PEP440}\.*/g" ${FILE};
done
sed_runner "/\"${DEP}==/ s/==.*\"/==${NEXT_SHORT_TAG_PEP440}\.*\"/g" python/pyproject.toml;
done
# rapids-cmake version
sed_runner 's/'"branch-.*\/RAPIDS.cmake"'/'"branch-${NEXT_RAPIDS_VERSION}\/RAPIDS.cmake"'/g' fetch_rapids.cmake
for FILE in .github/workflows/*.yaml; do
sed_runner "/shared-workflows/ s/@.*/@branch-${NEXT_RAPIDS_VERSION}/g" "${FILE}"
done
| 0 |
rapidsai_public_repos | rapidsai_public_repos/benchmark/README.md | # RAPIDS Benchmark
This repo contains tools for benchmarking RAPIDS projects, consisting currently of a plugin to [pytest](https://docs.pytest.org/en/latest) that allows it to run benchmarks to measure execution time and GPU memory usage.
## Contributing Guide
Review [CONTRIBUTING.md](CONTRIBUTING.md) for details about the benchmarking infrastructure relevant to maintaining it (implementation details, design decisions, etc.)
## Benchmarking use cases
### Developer Desktop use case
* Developers write benchmarks using either C++ and the GBench framework, or in Python using the pytest framework with a benchmarking plugin (`rapids-pytest-benchmark`)
* Developers analyze results using the reporting capability of GBench and pytest, or using ASV through the use of the `rapids-pytest-benchmark` `--benchmark-asv-*` options (for python) or a script that converts GBench JSON output for use with ASV (for C++).
### Continuous Benchmarking (CB) - _not fully supported, still WIP_
* Similar in concept to CI, CB runs the repo's benchmark suite (or a subset of it) on a PR to help catch regressions prior to merging
* CB will run the same benchmark code used for the Developer Desktop use case using the same tools (python use `pytest` + `rapids-pytest-benchmark`, C++ uses `GBench` + an output conversion script.)
* CB will update an ASV plot containing only points from the last nightly run and the last release for comparison, then data will be added for each commit within the PR. This will allow a dev to see the affects of their PR changes and give them the opportunity to fix a regression prior to merging.
* CB can be configured to optionally fail a PR if performance degraded beyond an allowable tolerance (configured by the devs)
### Nightly Benchmarking
* A scheduled nightly job will be setup up to run the same benchmarks using the same tools, like the desktop and CB cases above.
* The benchmarks will use the ASV output options (`--benchmark-asv-output-dir`) to generate updates to the nightly ASV database for each repo, which will then be used to render HTML for viewing.
## Writing and running python benchmarks
* Benchmarks for RAPIDS Python APIs can be written in python and run using `pytest` and the `rapids-pytest-benchmark` plugin
* `pytest` is the same tool used for running unit tests, and allows developers to easily transition back and forth between ensuring functional correctness with unit tests, and adequate performance using benchmarks
* `rapids-pytest-benchmark` is a plugin to `pytest` that extends another plugin named `pytest-benchmark` with GPU measurements and ASV output capabilities. `pytest-benchmark` is described [here](https://pytest-benchmark.readthedocs.io/en/latest)
* An example of a benchmark running session using `pytest` is below:
```
mymachine:/Projects/cugraph/benchmarks# pytest -v -m small --no-rmm-reinit -k pagerank
========================================================================================================= test session starts ==========================================================================================================
platform linux -- Python 3.6.10, pytest-5.4.3, py-1.8.1, pluggy-0.13.1 -- /opt/conda/envs/rapids/bin/python
cachedir: .pytest_cache
benchmark: 3.2.3 (defaults: timer=time.perf_counter disable_gc=False min_rounds=3 min_time=0.000005 max_time=1.0 calibration_precision=10 warmup=True warmup_iterations=1)
hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('/Projects/cugraph/benchmarks/.hypothesis/examples')
rapids_pytest_benchmark: 0.0.9
rootdir: /Projects/cugraph/benchmarks, inifile: pytest.ini
plugins: arraydiff-0.3, benchmark-3.2.3, doctestplus-0.7.0, astropy-header-0.1.2, openfiles-0.5.0, remotedata-0.3.1, hypothesis-5.16.0, cov-2.9.0, timeout-1.3.4, rapids-pytest-benchmark-0.0.9
collected 289 items / 287 deselected / 2 selected
bench_algos.py::bench_pagerank[ds=../datasets/csv/directed/cit-Patents.csv,mm=False,pa=False] PASSED [ 50%]
bench_algos.py::bench_pagerank[ds=../datasets/csv/undirected/hollywood.csv,mm=False,pa=False] PASSED [100%]
---------------------------------------------------------------------------------------------------------- benchmark: 2 tests ---------------------------------------------------------------------------------------------------------
Name (time in ms, mem in bytes) Min Max Mean StdDev Outliers GPU mem Rounds GPU Rounds
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
bench_pagerank[ds=../datasets/csv/directed/cit-Patents.csv,mm=False,pa=False] 99.1144 (1.0) 100.3615 (1.0) 99.8562 (1.0) 0.3943 (1.0) 3;0 335,544,320 (2.91) 10 10
bench_pagerank[ds=../datasets/csv/undirected/hollywood.csv,mm=False,pa=False] 171.1847 (1.73) 172.5704 (1.72) 171.9952 (1.72) 0.5118 (1.30) 2;0 115,343,360 (1.0) 6 6
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Legend:
Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.
OPS: Operations Per Second, computed as 1 / Mean
================================================================================================== 2 passed, 287 deselected in 15.17s ==================================================================================================
```
The above example demonstrates just a few of the features available:
* `-m small` - this specifies that only benchmarks using the "small" marker should be run. [Markers](https://docs.pytest.org/en/latest/example/markers.html) allow developers to classify benchmarks and even parameters to benchmarks for easily running subsets of benchmarks interactively. In this case, benchmarks were written using [parameters](https://docs.pytest.org/en/latest/parametrize.html), and the parameters have markers. These benchmarks have a parameter to define which dataset they should read, and in this case, those marked with the "small" marker are the only ones used for the benchmark runs.
* `--no-rmm-reinit` - this is a custom option just for these benchmarks. `pytest` allows users to define their own options for special cases using the [`conftest.py` file](https://docs.pytest.org/en/stable/writing_plugins.html#conftest-py-plugins) and the [`pytest_addoption` API](https://docs.pytest.org/en/stable/writing_plugins.html?highlight=pytest_addoption#using-hooks-in-pytest-addoption)
* `-k pagerank` - the [`-k`](https://docs.pytest.org/en/latest/usage.html#specifying-tests-selecting-tests) pytest option allows a user to filter the tests (benchmarks) run to those that match a pattern, in this case, the benchmark names must contain the string "pagerank".
* `rapids-pytest-benchmark` specifically adds these features to `pytest-benchmark`:
* The `gpubenchmark` fixture. This is an extension of the `benchmark` fixture provided by `pytest-benchmark`. A developer simply replaces `benchmark` (described [here](https://pytest-benchmark.readthedocs.io/en/latest/usage.html)) with `gpubenchmark` to use the added features.
* The following CLI options:
```
--benchmark-gpu-device=GPU_DEVICENO
GPU device number to observe for GPU metrics.
--benchmark-gpu-max-rounds=BENCHMARK_GPU_MAX_ROUNDS
Maximum number of rounds to run the test/benchmark
during the GPU measurement phase. If not provided, will
run the same number of rounds performed for the runtime
measurement.
--benchmark-gpu-disable
Do not perform GPU measurements when using the
gpubenchmark fixture, only perform runtime measurements.
--benchmark-asv-output-dir=ASV_DB_DIR
ASV "database" directory to update with benchmark
results.
--benchmark-asv-metadata=ASV_DB_METADATA
Metadata to be included in the ASV report. For example:
"machineName=my_machine2000, gpuType=FastGPU3,
arch=x86_64". If not provided, best-guess values will be
derived from the environment. Valid metadata is:
"machineName", "cudaVer", "osType", "pythonVer",
"commitRepo", "commitBranch", "commitHash",
"commitTime", "gpuType", "cpuType", "arch", "ram",
"gpuRam"
```
* The report pytest-benchmark prints to the console has also been updated to include the GPU memory usage and the number of GPU benchmark rounds run when a developer uses the `gpubenchmark` fixture, as shown above in the example (`GPU mem` and `GPU Rounds`).
* A common pattern with both unit tests and (now) benchmarks is to define a standard set initial [`pytest.ini`](https://docs.pytest.org/en/latest/customize.html#configuration-file-formats), something similar to the following:
```
[pytest]
addopts =
--benchmark-warmup=on
--benchmark-warmup-iterations=1
--benchmark-min-rounds=3
--benchmark-columns="min, max, mean, stddev, outliers, rounds"
markers =
ETL: benchmarks for ETL steps
small: small datasets
directed: directed datasets
undirected: undirected datasets
python_classes =
Bench*
Test*
python_files =
bench_*
test_*
python_functions =
bench_*
test_*
```
The above example adds a specific set of options that a particular project may always want, registers the markers used by the benchmarks (markers should be [registered to prevent a warning](https://docs.pytest.org/en/latest/example/markers.html#registering-markers)), then defines the pattern pytest should match for class names, file names, and function names. Here it's common to have pytest discover both benchmarks (defined here to have a `bench` prefix) and tests (`test` prefix) to allow users to run both in a single run.
Details about writing benchmarks using `pytest-benchmark` (which are 100% applicable to `rapids-pytest-benchmark` if the `gpubenchmark` fixture was used instead) can be found [here](https://pytest-benchmark.readthedocs.io/en/latest/usage.html), and a simple example of a benchmark using the `rapids-pytest-benchmark` features is shown below.
`bench_demo.py`
```
import time
import pytest
@pytest.mark.parametrize("paramA", [0, 2, 5, 9])
def bench_demo(gpubenchmark, paramA):
# Note: this does not use the GPU at all, so mem usage should be 0
gpubenchmark(time.sleep, (paramA * 0.1))
```
This file is in the same directory as other benchmarks, so the run can be limited to only the benchmark here using `-k`:
```
(rapids) root@f078ef9f2198:/Projects/cugraph/benchmarks# pytest -k demo --benchmark-gpu-max-rounds=1
========================================================= test session starts ==========================================================
platform linux -- Python 3.6.10, pytest-5.4.3, py-1.8.1, pluggy-0.13.1
benchmark: 3.2.3 (defaults: timer=time.perf_counter disable_gc=False min_rounds=3 min_time=0.000005 max_time=1.0 calibration_precision=10 warmup=True warmup_iterations=1)
rapids_pytest_benchmark: 0.0.9
rootdir: /Projects/cugraph/benchmarks, inifile: pytest.ini
plugins: arraydiff-0.3, benchmark-3.2.3, doctestplus-0.7.0, astropy-header-0.1.2, openfiles-0.5.0, remotedata-0.3.1, hypothesis-5.16.0, cov-2.9.0, timeout-1.3.4, rapids-pytest-benchmark-0.0.9
collected 293 items / 289 deselected / 4 selected
bench_demo.py .... [100%]
------------------------------------------------------------------------------------- benchmark: 4 tests -----------------------------------------------------------------------------------------------
Name (time in ns, mem in bytes) Min Max Mean StdDev Outliers GPU mem Rounds GPU Rounds
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
bench_demo[0] 782.3110 (1.0) 2,190.8432 (1.0) 789.0240 (1.0) 12.3101 (1.0) 453;1739 0 (1.0) 126561 1
bench_demo[2] 200,284,559.2797 (>1000.0) 200,347,900.3906 (>1000.0) 200,329,241.1566 (>1000.0) 26,022.0129 (>1000.0) 1;0 0 (1.0) 5 1
bench_demo[5] 500,606,104.7316 (>1000.0) 500,676,967.2036 (>1000.0) 500,636,843.3436 (>1000.0) 36,351.5426 (>1000.0) 1;0 0 (1.0) 3 1
bench_demo[9] 901,069,939.1365 (>1000.0) 901,218,764.4839 (>1000.0) 901,159,526.1594 (>1000.0) 78,917.8600 (>1000.0) 1;0 0 (1.0) 3 1
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Legend:
Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.
OPS: Operations Per Second, computed as 1 / Mean
================================================== 4 passed, 289 deselected in 17.73s ==================================================
```
Below are some important points about this run:
* Since the `-v` option is not used, the compact, abbreviated output is generated using a single `.` for each run (4 in this case)
```
bench_demo.py ....
```
* Notice the time units are in nanoseconds. This was used since the fastest runs were too fast to display in ms or even us (benchmarking the sleep of 0 seconds)
* The `pytest-benchmark` defaults are shown in the output, in particular, `min_rounds=3` and `max_time=1.0` are of interest.
* `min_rounds` is the minimum number of times the code being benchmarked will be run in order to compute meaningful stats (min, max, mean, std.dev., etc.). Since this is a minimum, pytest-benchmark will often run (many) more rounds than the minimum.
* `max_time` is used to help determine how many rounds can be run by providing a maximum time, in seconds, for each test/benchmark to run as many rounds as possible in that duration.
* The `--benchmark-gpu-max-rounds=1` option had to be specified. By default, `rapids-pytest-benchmark` will run as many rounds for the separate GPU measurements as were performed by `pytest-benchmark` for the time measurements. Unfortunately, obtaining GPU measurements are very expensive, and much slower than just looking at a timer before and after a call. Because the first parameter was 0, which was a benchmark on the call `time.sleep(0)`, `pytest-benchmark` was able to get 126561 rounds in during the 1.0 second `max_time` duration. Performing 126561 iterations of GPU measurements takes a very long time, so the `--benchmark-gpu-max-rounds=1` option was given to limit the GPU measurments to just 1 round, which is shown in the report. Limiting GPU rounds to a small number is usually aceptable because 1) any warmup rounds that influence GPU measurements were done during the time measurement rounds (which all run to completion before GPU measurements are done), 2) GPU measurements (for memory) are not subject to jitter like time measurements are, so in other words, running the same code will allocate the same number of bytes each time no matter how many times it's run. The reason someone might want to do >1 round at all for GPU measurements is the current GPU measuring code uses a polling technique, which could miss spikes in memory usage (and this becomes much more common the faster the algorithms being run are), and running multiple times helps catch spikes that may have been missed in a prior run.
* Notes:
* A future version of `rapids-pytest-benchmark` will use RMM's logging feature to record memory alloc/free transactions for an accurate memory usage measurement that isn't susceptible to missing spikes.
* A common option to add to `pytest.ini` is `--benchmark-gpu-max-rounds=3`. Since this is a maximum, the number of rounds could be even lower if the algo being benchmarked is slow, and 3 provides a reasonable number of rounds to catch spikes for faster algos.
* As the args to the benchmarked function get larger, we can see the `min_rounds` coming into play more. For a benchmark of `time.sleep(.5)` and `time.sleep(.9)`, which should only allow for 2 and 1 rounds respectively for a `max_time` of 1.0, the `min_rounds` forced 3 runs for better averaging.
### Adding Custom Metric capturing
`rapids-pytest-benchmark` also supports the addition of arbitrary metrics to your benchmarks. You can write a metric capturing function and use the `addMetric()` attribute from the `gpubenchmark` fixture to add any arbitrary measurement that you want.
Example code:
```
def bench_bfs(gpubenchmark, anyGraphWithAdjListComputed):
# This is where we'd call NetworkX.BFS and get its result for comparison
networkXResult = 3
def checkAccuracy(bfsResult):
"""
This function will be called by the benchmarking framework and will be
passed the result of the benchmarked function (in this case,
cugraph.bfs).
Compare that result to NetworkX.BFS()
"""
s=0
for d in bfsResult['distance'].values_host:
s+=d
r = float(s/len(bfsResult))
result= abs(((r - networkXResult) / networkXResult) * 100)
return result
gpubenchmark.addMetric(checkAccuracy, "accuracy", "percent")
gpubenchmark(cugraph.bfs, anyGraphWithAdjListComputed, 0)
```
In this example, cuGraph's BFS algorithm is being benchmarked. In addition to logging the default measurements, it will also log an accuracy metric. The `checkAccuracy()` function is defined which will calculate and return the accuracy value. The `addMetric()` attribute is sent the `checkAccuracy()` callable, a string representing the name of the measurement, and another string representing the unit of measurement.
## Writing and running C++ benchmarks using gbench
TBD
## Using asvdb from python and the command line
[`asvdb`](https://github.com/rapidsai/asvdb) is a library and command-line utility for reading and writing benchmark results from/to an ASV "database" as described [here](https://asv.readthedocs.io/en/stable/dev.html?highlight=%24results_dir#benchmark-suite-layout-and-file-formats).
* `asvdb` is a key component in the benchmark infrastructure suite in that it is the destination for benchmark results measured by the developer's benchmark code, and the source of data for the benchmarking report tools (in this case just ASV).
* Several examples for both reading and writing a database using the CLI and the API are available [here](https://github.com/rapidsai/asvdb/blob/main/README.md)
## Benchmarking old commits
* It's highly likely that a nightly benchmark run will not be run for a merge commit where the actual regression was introduced. At the moment, the nightly benchmark runs will run on _the last merge commit of the day_, and while the code may contain the regression, the commit that was benchmarked may be the commit to examine when looking for it (it may be in another merge commit that happened earlier in the day, between the current benchmark run and the run from the day before).
* Below is a pseudo-script written as part of benchmarking a series of old commits used to find a regression. This process illustrates some (hopefully uncommon) scenarios that actually happened, which can greatly complicate the process. The script captures a procedure run in a RAPIDS `devel` docker container:
```
# uninstall rmm cudf cugraph
# If installed via a local from-source build, use pip and manually remove C++ libs, else use conda
pip uninstall -y rmm cudf dask-cudf cugraph
rm -rf /opt/conda/envs/rapids/include/libcudf
find /opt/conda -type f -name "librmm*" -exec rm -f {} \;
find /opt/conda -type f -name "libcudf*" -exec rm -f {} \;
find /opt/conda -type f -name "libcugraph*" -exec rm -f {} \;
#conda remove -y librmm rmm libcudf cudf dask-cudf libcugraph cugraph
# confirm packages uninstalled with conda list, uninstall again if still there (pip uninstall sometimes needs to be run >once for some reason)
conda list rmm; conda list cudf; conda list cugraph
# install numba=0.48 since older cudf versions being used here need it
conda install -y numba=0.48
# (optional) clone rmm, cudf, cugraph in a separate location if you don't want to modify your working copies (recommended to ensure we're starting with a clean set of sources with no artifacts)
git clone https://github.com/rapidsai/rmm
git clone https://github.com/rapidsai/cudf
git clone https://github.com/rapidsai/cugraph
# copy benchmarks dir from current cugraph for use later in older cugraph
cp -r cugraph/benchmarks /tmp
########################################
# set RMM to old version: 63ebb53bf21a58b98b4596f7b49a46d1d821b05d
#cd <rmm repo>
git reset --hard 63ebb53bf21a58b98b4596f7b49a46d1d821b05d
# install submodules
git submodule update --init --remote --recursive
# confirm the right version (Apr 7)
git log -n1
# build and install RMM
./build.sh
########################################
# set cudf to pre-regression version: 12bd707224680a759e4b274f9ce4013216bf3c1f
#cd <cudf repo>
git reset --hard 12bd707224680a759e4b274f9ce4013216bf3c1f
# install submodules
git submodule update --init --remote --recursive
# confirm the right version (Apr 15)
git log -n1
# build and install cudf
./build.sh
########################################
# set cugraph to version old enough to support old cudf version: 95b80b40b25b733f846da49f821951e3026e9588
#cd <cugraph repo>
git reset --hard 95b80b40b25b733f846da49f821951e3026e9588
# cugraph has no git submodules
# confirm the right version (Apr 16)
git log -n1
# build and install cugraph
./build.sh
########################################
# install benchmark tools and datasets
conda install -c rlratzel -y rapids-pytest-benchmark
# get datasets
#cd <cugraph repo>
cd datasets
mkdir csv
cd csv
wget https://data.rapids.ai/cugraph/benchmark/benchmark_csv_data.tgz
tar -zxf benchmark_csv_data.tgz && rm benchmark_csv_data.tgz
# copy benchmarks to cugraph
#cd <cugraph repo>
cp -r /tmp/benchmarks .
# verify cudf in PYTHONPATH is correct version (look for commit hash in version)
python -c "import cudf; print(cudf.__version__)"
# run benchmarks
cd benchmarks
pytest -v -m small --benchmark-autosave --no-rmm-reinit -k "not force_atlas2 and not betweenness_centrality"
# confirm that these results are "fast" - on my machine, BFS mean time was ~30ms
########################################
# uninstall cudf
pip uninstall -y cudf dask-cudf
rm -rf /opt/conda/envs/rapids/include/libcudf
find /opt/conda -type f -name "libcudf*" -exec rm -f {} \;
#conda remove -y libcudf cudf dask-cudf
# set cudf to version of regression: 4009501328166b109a73a0a9077df513186ffc2a
#cd <cudf repo>
git reset --hard 4009501328166b109a73a0a9077df513186ffc2a
# confirm the right version (Apr 15 - Merge pull request #4883 from rgsl888prabhu/4862_getitem_setitem_in_series)
git log -n1
# CLEAN and build and install cudf
./build.sh clean
./build.sh
# verify cudf in PYTHONPATH is correct version (look for commit hash in version)
python -c "import cudf; print(cudf.__version__)"
# run benchmarks
#cd <cugraph repo>/benchmarks
pytest -v -m small --benchmark-autosave --no-rmm-reinit -k "not force_atlas2 and not betweenness_centrality" --benchmark-compare --benchmark-group-by=fullname
# confirm that these results are "slow" - on my machine, BFS mean time was ~75ms, GPU mem used was ~3.5x more
#-------------------------------------------------------------------------------------- benchmark 'bench_algos.py::bench_bfs[ds=../datasets/csv/directed/cit-Patents.csv]': 2 tests ---------------------------------------------------------------------------------------
#Name (time in ms, mem in bytes) Min Max Mean StdDev Median IQR Outliers OPS GPU mem Rounds Iterations
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#bench_bfs[ds=../datasets/csv/directed/cit-Patents.csv] (0001_95b80b4) 27.3090 (1.0) 39.1467 (1.0) 29.5639 (1.0) 2.9815 (1.0) 28.4831 (1.0) 0.8261 (1.0) 5;6 33.8250 (1.0) 117,440,512 (1.0) 34 1
#bench_bfs[ds=../datasets/csv/directed/cit-Patents.csv] (NOW) 70.0455 (2.56) 83.7894 (2.14) 75.5794 (2.56) 3.7335 (1.25) 76.3104 (2.68) 5.2627 (6.37) 5;0 13.2311 (0.39) 432,013,312 (3.68) 15 1
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
```
| 0 |
rapidsai_public_repos | rapidsai_public_repos/benchmark/CHANGELOG.md | # benchmark 0.0.0 (DD Mon YYYY)
## New Features
- PR #62 - Utilize `rmm` log analysis
- PR #45 - Add capability to parse `requirements` metadata field
## Improvements
- ...
## Bug Fixes
- ...
| 0 |
rapidsai_public_repos | rapidsai_public_repos/benchmark/MAINTAINERS.md | # Maintainer's guide
In no particular order, this covers implementation details and common maintenance tasks for the benchmarking tools. To request more information about a topic, please update this document with text that includes a `FIXME` label and a description of the the topic/information to be added, and a PR will be submitted with the new information.
## Extending rapids-pytest-benchmark
- This is covered in detail in the [`rapids-pytest-benchmark` README](rapids_pytest_benchmark/README.md).
## Working CB design doc
- There is a diagram proposing potential CB implementation details [here](https://docs.google.com/drawings/d/1LBxqMlJM0DObfSjnK-CK8c-MxAdiFGQOXl9tId3Daaw).
- If S3 is used for storage, a separate diagram proposing potential implementation details is [here](https://docs.google.com/drawings/d/1Cd1QDry1THKmzHpHI8jQETsSVdpncG22xqy72yUWIR0)
## asvdb design decisions
- [`asvdb`](https://github.com/rapidsai/asvdb) is a Python module (and CLI tool) that encapsulates the implementation details of how ASV stores results, allowing other tools in a benchmarking workflow to take advantage of ASV reporting without having to implement their own ASV-compatible data generation.
- One approach looked into for this was to pull out the individual classes in `asv` responsible for reading and writing the ASV "database" and use those directly.
- This was initially rejected because:
- It would either require a copy-and-past fork of specific classes which are not optimized for this use case, or an added dependency on the entire `asv` package.
- Since `asvdb` started out as a simple abstraction for a single operation (write results to disk for ASV to read), the added work of pulling in classes from another package didn't seem necessary for some simple JSON dumps.
- **However**, now that `asvdb` has grown in scope, using classes from `asv` might be worth revisiting:
- The classes to use would be:
- `asv/config.py` - the ASV configuration (`asv.conf.json`)
- `asv/benchmarks.py` - meta-data about the benchmarks being run (`results/benchmarks.json`)
- `asv/machine.py` - meta-data about the benchmark machine (`<machine name>/machine.json`)
- `asv/results.py` - all results for a single machine and commit hash
- Using the classes from the `asv` project could also facilitate including `asvdb` as a utility in the `asv` project itself, which could greatly simplify things (community maintainers, free upgrades when ASV updates internals, etc.)

## Proposal for including automated notebook benchmarks
- Some teams still prefer to use notebooks for E2E benchmarks. Notebooks are nice because they're easily shared with the community, marketing, and customers, and help to highlight the performance advantages of RAPIDS in ways other benchmarks don't (they display images, real results, highlight our APIs, show plots, etc.)
- We can add notebooks to our benchmark runs as "just another source of benchmark data" by doing the following:
- Create a new magic specifically for benchmarking
- This magic would essentially do what our `gpubenchmark` fixture and other `rapids-pytest-benchmark` features do for python-based benchmarks (gather time and GPU metrics, push results to an ASV database)
- Have the magic used in the `nbtest.sh` script
- At the moment, the script ignores all magics since many are not compatible with a scripted run, but the `gpubenchmark` magic could be
- From there, `asvdb`, ASV, and any other consumer of benchmarks results wouldn't know or care that the result didn't come from python or gbench.
- This was demo'd here: https://nvmeet.webex.com/webappng/sites/nvmeet/recording/playback/ba0ab73c4a364959b0be6de41a40d289
## ops-utils repo tools
- For convenience, the benchmark jobs use tools in the [`ops-utils` repo](https://github.com/rapidsai/ops-utils/tree/main/benchmark), in particular the `updateJenkinsReport.py` script for creating the nightly overview report.
- Another useful script in this repo is one which can return the exact nightly conda package that was used with a particular commit, called [getNearestCondaPackages.py](https://github.com/rapidsai/ops-utils/blob/main/benchmark/getNearestCondaPackages.py)
## Jenkins jobs overview
- Currently defined [here](http://10.33.227.188/job/wip/job/benchmark-pipeline)
- Currently consists of:
1) A job to backup the current ASV results dirs ("databases"). The job keeps the last 20 database updates backed up (which should represent 20 days worth if the benchmark pipeline ran once-per-day everyday). After 20, the job removes the oldest one before adding the newest one. This job is defined [here](http://10.33.227.188/job/wip/job/backup-benchmark-results)
2) The individual benchmark jobs for the different repos. These currently run in parallel on a specific machine. Note that it's important to run on the same machine each time if possible, since different configurations can potentially invaludate benchmark results! Sometimes even different non-GPU aspects of a machine can come into play if code being benchmarked includes some CPU-based computations. In particaulr, some benchmarks need to compare results to baseline implementations that are CPU-based, so benchmarks like these definitely need to run in a CPU-controlled environment. The most up-to-date benchmark job is cuGraph's, which is defined [here](http://10.33.227.188/job/wip/job/cugraph-e2e-benchmarks)
* As part of the cuGraph job, a custom HTML report is written to provide an easy way to see if a benchmark run failed (since the ASV report itself won't show that), as well as an easy way to go to either the ASV front end (if the jobs ran without errors to see the latest results), or to the individual error logs (if the jobs failed). cuGraph's custom report is [here](http://10.33.227.188:88/asv/cugraph-e2e)
3) The publish job, which publishes the new results into an ASV report using the `asv publish` command on each repo's ASV database. This job is defined [here](http://10.33.227.188/job/wip/job/publish-benchmark-results)
| 0 |
rapidsai_public_repos | rapidsai_public_repos/benchmark/CONTRIBUTING.md | # Contributing to benchmark
If you are interested in contributing to benchmark, your contributions will fall
into three categories:
1. You want to report a bug, feature request, or documentation issue
- File an [issue](https://github.com/rapidsai/benchmark/issues/new/choose)
describing what you encountered or what you want to see changed.
- The RAPIDS team will evaluate the issues and triage them, scheduling
them for a release. If you believe the issue needs priority attention
comment on the issue to notify the team.
2. You want to propose a new Feature and implement it
- Post about your intended feature, and we shall discuss the design and
implementation.
- Once we agree that the plan looks good, go ahead and implement it, using
the [code contributions](#code-contributions) guide below.
3. You want to implement a feature or bug-fix for an outstanding issue
- Follow the [code contributions](#code-contributions) guide below.
- If you need more context on a particular issue, please ask and we shall
provide.
## Maintainer's document
As part of understanding the code in preparation for contributing, a
[maintainer's document](MAINTAINERS.md) is provided which covers the
implementation details and design considerations. Please familiarize yourself
with this before spending time on a contribution.
## Code contributions
### Your first issue
1. Read the project's [README.md](https://github.com/rapidsai/benchmark/blob/main/README.md)
to learn how to setup the development environment
2. Find an issue to work on. The best way is to look for the [good first issue](https://github.com/rapidsai/benchmark/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
or [help wanted](https://github.com/rapidsai/benchmark/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) labels
3. Comment on the issue saying you are going to work on it
4. Code! Make sure to update unit tests!
5. When done, [create your pull request](https://github.com/rapidsai/benchmark/compare)
6. Verify that CI passes all [status checks](https://help.github.com/articles/about-status-checks/). Fix if needed
7. Wait for other developers to review your code and update code as needed
8. Once reviewed and approved, a RAPIDS developer will merge your pull request
Remember, if you are unsure about anything, don't hesitate to comment on issues
and ask for clarifications!
### Seasoned developers
Once you have gotten your feet wet and are more comfortable with the code, you
can look at the prioritized issues of our next release in our [project boards](https://github.com/rapidsai/benchmark/projects).
> **Pro Tip:** Always look at the release board with the highest number for
issues to work on. This is where RAPIDS developers also focus their efforts.
Look at the unassigned issues, and find an issue you are comfortable with
contributing to. Start with _Step 3_ from above, commenting on the issue to let
others know you are working on it. If you have any questions related to the
implementation of the issue, ask them in the issue instead of the PR.
## Attribution
Portions adopted from https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md
| 0 |
rapidsai_public_repos | rapidsai_public_repos/benchmark/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos/benchmark | rapidsai_public_repos/benchmark/rapids_pytest_benchmark/README.md | # rapids-pytest-benchmark
`rapids-pytest-benchmark` is a plugin to [`pytest`](https://docs.pytest.org/en/latest/contents.html) that extends the functionality of the [`pytest-benchmark`](https://pytest-benchmark.readthedocs.io/en/latest) plugin by taking advantage of the [hooks exposed by `pytest-benchmark`](https://pytest-benchmark.readthedocs.io/en/latest/hooks.html) where applicable, and the plugin classes directly for all other modifications.
Unfortunately, at the time of this writing, none of the hooks provided by `pytest-benchmark` were helpful. However, the longer-term plan is to upstream new `pytest-benchmark` hooks that allow for all the `rapids-pytest-benchmark` functionality to be provided by hooks. This is greatly preferred as a proper extension technique, since importing and subclassing another plugin's classes directly is a much more fragile technique, since an update to the "parent" plugin could change class APIs and cause breakages. _Note: the conda recipe specifies exactly `pytest-benchmark=3.2.3` to ensure compatibility with the internal class API_
## Installation
`rapids-pytest-benchmark` is available on [conda](https://anaconda.org/rapidsai/rapids-pytest-benchmark) and can be installed with the following command:
```sh
conda install -c rapidsai rapids-pytest-benchmark
```
## How to use `rapids-pytest-benchmark`
- Install it and confirm that the `--benchmark-gpu-*` and `--benchmark-asv-*` options are shown in `pytest --help`
- Add the `gpubenchmark` fixture to your tests/benchmarks, just as one would do with the `benchmark` fixture described [here](https://pytest-benchmark.readthedocs.io/en/latest/usage.html)
- See the help description for the `--benchmark-gpu-*` and `--benchmark-asv-*` options
- Further details are provided [here](../README.md)
## Implementation details: how `rapids-pytest-benchmark` is pulled into a benchmark run
- `rapids-pytest-benchmark` is a standard `pytest` plugin. See the [`setup.py`](setup.py) file for details on how the `entry_points` specification is used to install the plugin in a way that `pytest` will automatically load it and make it available to users.
- Once loaded, `pytest` looks for various hooks it can call at different points during a `pytest` session in order to allow plugins to extend capabilities. The list of hooks a `pytest` plugin can call are described [here](https://docs.pytest.org/en/latest/reference.html#hook-reference)
- The hooks `rapids-pytest-benchmark` uses are all defined in the [plugin.py](rapids_pytest_benchmark/plugin.py) file.
- Another key contribution a `pytest` plugin can make it to add new [fixtures](https://docs.pytest.org/en/latest/fixture.html#fixture) a test/benchmark author can use. `rapids-pytest-benchmark` adds the `gpubenchmark` fixture in [plugin.py](rapids_pytest_benchmark/plugin.py):
```
@pytest.fixture(scope="function")
def gpubenchmark(request, benchmark):
...
```
- `rapids-pytest-benchmark` is only used if a benchmark uses the `gpubenchmark` fixture.
- The `gpubenchmark` fixture takes advantage of "chaining" or fixtures calling other fixtures, as described [here](https://docs.pytest.org/en/stable/reference.html#fixtures). `gpubenchmark` calls the `pytest-benchmark` **`benchmark`** fixture to create a standard `pytest-benchmark` instance for all time-based metrics.
- `gpubenchmark` then dynamically wraps the `benchmark` instance in an instance of a `GPUBenchmarkFixture`. The `__getattr__()` method defined in the `GPUBenchmarkFixture` class is written to pass all method calls not overridden by `GPUBenchmarkFixture` on to the standard `benchmark` instance. This is different than a standard subclassing technique since it allows for an _already instantiated_ instance of a parent class to be overridden by an instance of a subclass. In fact, the instance that wraps the "parent" instance doesn't even need to be a subclass of it. `GPUBenchmarkFixture` is a subclass just for completeness, and if there's any `isinstance()` calls that expect a standard `Benchmark` instance.
| 0 |
rapidsai_public_repos/benchmark | rapidsai_public_repos/benchmark/rapids_pytest_benchmark/buildconda.sh | #!/bin/bash
set -e
CHANNELS="-c file:///opt/conda/envs/rapids/conda-bld -c rlratzel -c conda-forge"
UPLOAD_FILE=`conda build ${CHANNELS} ./conda --output`
UPLOAD_FILES=$(echo ${UPLOAD_FILE}|sed -e 's/\-py[0-9][0-9]/\-py36/')
UPLOAD_FILES="${UPLOAD_FILES} $(echo ${UPLOAD_FILE}|sed -e 's/\-py[0-9][0-9]/\-py37/')"
conda build ${CHANNELS} --variants="{python: [3.6, 3.7]}" ./conda
if [ "$1" = "--publish" ]; then
anaconda upload ${UPLOAD_FILES}
fi
| 0 |
rapidsai_public_repos/benchmark | rapidsai_public_repos/benchmark/rapids_pytest_benchmark/setup.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
import rapids_pytest_benchmark
setup(
name="rapids-pytest-benchmark",
version=rapids_pytest_benchmark.__version__,
packages=["rapids_pytest_benchmark"],
install_requires=["pytest-benchmark", "asvdb", "pynvml", "rmm"],
# the following makes a plugin available to pytest
entry_points={"pytest11": ["rapids_benchmark = rapids_pytest_benchmark.plugin"]},
# custom PyPI classifier for pytest plugins
classifiers=["Framework :: Pytest"],
)
| 0 |
rapidsai_public_repos/benchmark/rapids_pytest_benchmark | rapidsai_public_repos/benchmark/rapids_pytest_benchmark/rapids_pytest_benchmark/reporting.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import operator
from pytest_benchmark import table as pytest_benchmark_table
from pytest_benchmark import utils as pytest_benchmark_utils
NUMBER_FMT = pytest_benchmark_table.NUMBER_FMT
ALIGNED_NUMBER_FMT = pytest_benchmark_table.ALIGNED_NUMBER_FMT
INT_NUMBER_FMT = "{0:,d}" if sys.version_info[:2] > (2, 6) else "{0:d}"
ALIGNED_INT_NUMBER_FMT = "{0:>{1},d}{2:<{3}}" if sys.version_info[:2] > (2, 6) else "{0:>{1}d}{2:<{3}}"
class GPUTableResults(pytest_benchmark_table.TableResults):
def display(self, tr, groups, progress_reporter=pytest_benchmark_utils.report_progress):
tr.write_line("")
tr.rewrite("Computing stats ...", black=True, bold=True)
for line, (group, benchmarks) in progress_reporter(groups, tr, "Computing stats ... group {pos}/{total}"):
benchmarks = sorted(benchmarks, key=operator.itemgetter(self.sort))
for bench in benchmarks:
bench["name"] = self.name_format(bench)
worst = {}
best = {}
solo = len(benchmarks) == 1
for line, prop in progress_reporter(("min", "max", "mean", "median", "iqr", "stddev", "gpu_mem", "gpu_leaked_mem", "ops"),
tr, "{line}: {value}", line=line):
# During a compare, current or previous results may not have gpu keys
if prop not in bench:
continue
if prop == "ops":
worst[prop] = min(bench[prop] for _, bench in progress_reporter(
benchmarks, tr, "{line} ({pos}/{total})", line=line) if prop in bench)
best[prop] = max(bench[prop] for _, bench in progress_reporter(
benchmarks, tr, "{line} ({pos}/{total})", line=line) if prop in bench)
else:
worst[prop] = max(bench[prop] for _, bench in progress_reporter(
benchmarks, tr, "{line} ({pos}/{total})", line=line) if prop in bench)
best[prop] = min(bench[prop] for _, bench in progress_reporter(
benchmarks, tr, "{line} ({pos}/{total})", line=line) if prop in bench)
for line, prop in progress_reporter(("outliers", "rounds", "iterations", "gpu_rounds"), tr, "{line}: {value}", line=line):
if prop not in bench:
continue
worst[prop] = max(benchmark[prop] for _, benchmark in progress_reporter(
benchmarks, tr, "{line} ({pos}/{total})", line=line))
unit, adjustment = self.scale_unit(unit='seconds', benchmarks=benchmarks, best=best, worst=worst,
sort=self.sort)
ops_unit, ops_adjustment = self.scale_unit(unit='operations', benchmarks=benchmarks, best=best, worst=worst,
sort=self.sort)
labels = {
"name": "Name (time in {0}s, mem in bytes)".format(unit),
"min": "Min",
"max": "Max",
"mean": "Mean",
"stddev": "StdDev",
"gpu_mem": "GPU mem",
"gpu_leaked_mem": "GPU Leaked mem",
"rounds": "Rounds",
"gpu_rounds": "GPU Rounds",
"iterations": "Iterations",
"iqr": "IQR",
"median": "Median",
"outliers": "Outliers",
"ops": "OPS ({0}ops/s)".format(ops_unit) if ops_unit else "OPS",
}
widths = {
"name": 3 + max(len(labels["name"]), max(len(benchmark["name"]) for benchmark in benchmarks)),
"rounds": 2 + max(len(labels["rounds"]), len(str(worst["rounds"]))),
"iterations": 2 + max(len(labels["iterations"]), len(str(worst["iterations"]))),
"outliers": 2 + max(len(labels["outliers"]), len(str(worst["outliers"]))),
"ops": 2 + max(len(labels["ops"]), len(NUMBER_FMT.format(best["ops"] * ops_adjustment))),
}
# gpu_rounds may not be present if user passed --benchmark-gpu-disable
if "gpu_rounds" in worst:
widths["gpu_rounds"] = 2 + max(len(labels["gpu_rounds"]), len(str(worst["gpu_rounds"])))
for prop in "min", "max", "mean", "stddev", "median", "iqr":
widths[prop] = 2 + max(len(labels[prop]), max(
len(NUMBER_FMT.format(bench[prop] * adjustment))
for bench in benchmarks if prop in bench
))
for prop in ["gpu_mem", "gpu_leaked_mem"]:
if [b for b in benchmarks if prop in b]:
widths[prop] = 2 + max(len(labels[prop]), max(
len(INT_NUMBER_FMT.format(bench[prop]))
for bench in benchmarks if prop in bench
))
rpadding = 0 if solo else 10
labels_line = labels["name"].ljust(widths["name"]) + "".join(
labels[prop].rjust(widths[prop]) + (
" " * rpadding
#if prop not in ["outliers", "rounds", "iterations"]
if prop not in ["outliers", "iterations"]
else ""
)
for prop in self.columns if (prop in labels) and (prop in widths)
)
tr.rewrite("")
tr.write_line(
" benchmark{name}: {count} tests ".format(
count=len(benchmarks),
name="" if group is None else " {0!r}".format(group),
).center(len(labels_line), "-"),
yellow=True,
)
tr.write_line(labels_line)
tr.write_line("-" * len(labels_line), yellow=True)
for bench in benchmarks:
has_error = bench.get("has_error")
tr.write(bench["name"].ljust(widths["name"]), red=has_error, invert=has_error)
for prop in self.columns:
if not((prop in bench) and (prop in widths) and (prop in bench) and (prop in worst)):
continue
if prop in ("min", "max", "mean", "stddev", "median", "iqr"):
tr.write(
ALIGNED_NUMBER_FMT.format(
bench[prop] * adjustment,
widths[prop],
pytest_benchmark_table.compute_baseline_scale(best[prop], bench[prop], rpadding),
rpadding
),
green=not solo and bench[prop] == best.get(prop),
red=not solo and bench[prop] == worst.get(prop),
bold=True,
)
elif prop in ("gpu_mem", "gpu_leaked_mem"):
tr.write(
ALIGNED_INT_NUMBER_FMT.format(
bench[prop],
widths[prop],
pytest_benchmark_table.compute_baseline_scale(best[prop], bench[prop], rpadding),
rpadding
),
green=not solo and bench[prop] == best.get(prop),
red=not solo and bench[prop] == worst.get(prop),
bold=True,
)
elif prop == "ops":
tr.write(
ALIGNED_NUMBER_FMT.format(
bench[prop] * ops_adjustment,
widths[prop],
pytest_benchmark_table.compute_baseline_scale(best[prop], bench[prop], rpadding),
rpadding
),
green=not solo and bench[prop] == best.get(prop),
red=not solo and bench[prop] == worst.get(prop),
bold=True,
)
else:
tr.write("{0:>{1}}".format(bench[prop], widths[prop]))
tr.write("\n")
tr.write_line("-" * len(labels_line), yellow=True)
tr.write_line("")
if self.histogram:
# This import requires additional dependencies. Import it
# here so reporting that does not use the histogram feature
# need not install dependencies that will not be used.
from pytest_benchmark import histogram as pytest_benchmark_histogram
if len(benchmarks) > 75:
self.logger.warn("Group {0!r} has too many benchmarks. Only plotting 50 benchmarks.".format(group))
benchmarks = benchmarks[:75]
output_file = pytest_benchmark_histogram.make_histogram(self.histogram, group, benchmarks, unit, adjustment)
self.logger.info("Generated histogram: {0}".format(output_file), bold=True)
tr.write_line("Legend:")
tr.write_line(" Outliers: 1 Standard Deviation from Mean; "
"1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.")
tr.write_line(" OPS: Operations Per Second, computed as 1 / Mean")
| 0 |
rapidsai_public_repos/benchmark/rapids_pytest_benchmark | rapidsai_public_repos/benchmark/rapids_pytest_benchmark/rapids_pytest_benchmark/rmm_resource_analyzer.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import csv
import rmm
import tempfile
class RMMResourceAnalyzer:
"""
Class to control enabling, disabling, & parsing RMM resource
logs.
"""
def __init__(self):
self.max_gpu_util = -1
self.max_gpu_mem_usage = 0
self.leaked_memory = 0
log_file_name = "rapids_pytest_benchmarks_log"
self._log_file_prefix = os.path.join(tempfile.gettempdir(), log_file_name)
def enable_logging(self):
"""
Enable RMM logging. RMM creates a CSV output file derived from
provided file name that looks like: log_file_prefix + ".devX", where
X is the GPU number.
"""
rmm.enable_logging(log_file_name=self._log_file_prefix)
def disable_logging(self):
"""
Disable RMM logging
"""
log_output_files = rmm.get_log_filenames()
rmm.mr._flush_logs()
rmm.disable_logging()
# FIXME: potential improvement here would be to only parse the log files for
# the gpu ID that's passed in via --benchmark-gpu-device
self._parse_results(log_output_files)
for _, log_file in log_output_files.items():
os.remove(log_file)
def _parse_results(self, log_files):
"""
Parse CSV results. CSV file has columns:
Thread,Time,Action,Pointer,Size,Stream
"""
current_mem_usage = 0
for _, log_file in log_files.items():
with open(log_file, mode="r") as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
row_action = row["Action"]
row_size = int(row["Size"])
if row_action == "allocate":
current_mem_usage += row_size
if current_mem_usage > self.max_gpu_mem_usage:
self.max_gpu_mem_usage = current_mem_usage
if row_action == "free":
current_mem_usage -= row_size
self.leaked_memory = current_mem_usage
| 0 |
rapidsai_public_repos/benchmark/rapids_pytest_benchmark | rapidsai_public_repos/benchmark/rapids_pytest_benchmark/rapids_pytest_benchmark/__init__.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.0.15"
def setFixtureParamNames(request, orderedParamNameList):
"""
Given a request fixture and a list of param names ordered based on the order
the params are specified for a parameterized fixture, this will set the
names so reporting tools can label the parameterized benchmark runs
accordingly. This is only needed for parameterized fixtures, since
parameterized benchmarks already assign param names to param values.
Order matters. For example, if the fixture's params are set like this:
params = product([True, False], [True, False])
and the param names are set like this:
orderedParamNameList = ["managed_memory", "pool_allocator"]
then the reports show the options that were set for the benchmark like this:
my_benchmark[managed_memory=True, pool_allocator=True]
my_benchmark[managed_memory=True, pool_allocator=False]
my_benchmark[managed_memory=False, pool_allocator=True]
my_benchmark[managed_memory=False, pool_allocator=False]
orderedParamNameList can have more params specified than are used. For
example, if a fixture only has 2 params, only the first 2 names in
orderedParamNameList are used.
NOTE: the fixture param names set by this function are currently only used
for ASV reporting.
"""
# This function can also be called on a single test param, which may result
# in request.param *not* being a list of param values.
if type(request.param) is list:
numParams = len(request.param)
else:
numParams = 1
if len(orderedParamNameList) < numParams:
raise IndexError("setFixtureParamNames: the number of parameter names "
"is less than the number of parameters.")
request.keywords.setdefault(
"fixture_param_names",
dict())[request.fixturename] = orderedParamNameList[:numParams]
| 0 |
rapidsai_public_repos/benchmark/rapids_pytest_benchmark | rapidsai_public_repos/benchmark/rapids_pytest_benchmark/rapids_pytest_benchmark/plugin.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import time
import platform
import ctypes
import argparse
import subprocess
import json
import pytest
from pytest_benchmark import stats as pytest_benchmark_stats
from pytest_benchmark import utils as pytest_benchmark_utils
from pytest_benchmark import fixture as pytest_benchmark_fixture
from pytest_benchmark import session as pytest_benchmark_session
from pytest_benchmark import compat as pytest_benchmark_compat
import asvdb.utils as asvdbUtils
from asvdb import ASVDb, BenchmarkInfo, BenchmarkResult
from pynvml import smi
import psutil
from . import __version__
from .rmm_resource_analyzer import RMMResourceAnalyzer
from .reporting import GPUTableResults
# FIXME: find a better place to do this and/or a better way
pytest_benchmark_utils.ALLOWED_COLUMNS.append("gpu_mem")
pytest_benchmark_utils.ALLOWED_COLUMNS.append("gpu_leaked_mem")
pytest_benchmark_utils.ALLOWED_COLUMNS.append("gpu_util")
pytest_benchmark_utils.ALLOWED_COLUMNS.append("gpu_rounds")
def pytest_addoption(parser):
group = parser.getgroup("benchmark")
# FIXME: add check for valid dir, similar to "parse_save()" in
# pytest-benchmark
# FIXME: when multi-GPU supported, update the help to mention that the user
# can specify this option multiple times to observe multiple GPUs? This is
# why action=append.
group.addoption(
"--benchmark-gpu-device",
metavar="GPU_DEVICENO", default=[0], type=_parseSaveGPUDeviceNum,
action="append", help="GPU device number to include in benchmark metadata."
)
group.addoption(
"--benchmark-gpu-max-rounds", default=1, type=_parseGpuMaxRounds,
help="Maximum number of rounds to run the test/benchmark during the "
"GPU measurement phase. If not provided, will run the same number of "
"rounds performed for the runtime measurement."
)
group.addoption(
"--benchmark-gpu-disable", action="store_true", default=False,
help="Do not perform GPU measurements when using the gpubenchmark "
"fixture, only perform other enabled measurements."
)
group.addoption(
"--benchmark-custom-metrics-disable", action="store_true", default=False,
help="Do not perform custom metrics measurements when using the "
"gpubenchmark fixture, only perform other enabled measurements."
)
group.addoption(
"--benchmark-asv-output-dir",
metavar="ASV_DB_DIR", default=None,
help='ASV "database" directory to update with benchmark results.'
)
group.addoption(
"--benchmark-asv-metadata",
metavar="ASV_DB_METADATA",
default={}, type=_parseSaveMetadata,
help='Metadata to be included in the ASV report in JSON format. For example: '
'{"machineName":"my_machine2000", "gpuType":"FastGPU3", "arch":"x86_64"}. If not '
'provided, best-guess values will be derived from the environment. '
'Valid metadata is: "machineName", "cudaVer", "osType", "pythonVer", '
'"commitRepo", "commitBranch", "commitHash", "commitTime", "gpuType", '
'"cpuType", "arch", "ram", "gpuRam", "requirements"'
)
def _parseGpuMaxRounds(stringOpt):
"""
Ensures opt passed is a number > 0
"""
if stringOpt != "1":
raise argparse.ArgumentTypeError("Only 1 round is supported until GPU utilization is implemented")
if not stringOpt:
raise argparse.ArgumentTypeError("Cannot be empty")
if stringOpt.isdecimal():
num = int(stringOpt)
if num == 0:
raise argparse.ArgumentTypeError("Must be non-zero")
else:
raise argparse.ArgumentTypeError("Must be an int > 0")
return num
def _parseSaveGPUDeviceNum(stringOpt):
"""
Given a string like "0,1, 2" return [0, 1, 2]
"""
if not stringOpt:
raise argparse.ArgumentTypeError("Cannot be empty")
retList = []
for i in stringOpt.split(","):
try:
num = int(i.strip())
except ValueError:
raise argparse.ArgumentTypeError(f"must specify an int, got {i}")
# FIXME: also check that this is a valid GPU device
if num not in retList:
retList.append(num)
return retList
def _parseSaveMetadata(stringOpt):
"""
Convert JSON input to Python dictionary
"""
if not stringOpt:
raise argparse.ArgumentTypeError("Cannot be empty")
validVars = ["machineName", "cudaVer", "osType", "pythonVer",
"commitRepo", "commitBranch", "commitHash", "commitTime",
"gpuType", "cpuType", "arch", "ram", "gpuRam", "requirements"]
retDict = json.loads(stringOpt)
for key in retDict.keys():
if key not in validVars:
raise argparse.ArgumentTypeError(f'invalid metadata var: "{key}"')
return retDict
class GPUBenchmarkResults:
def __init__(self, gpuMem, gpuUtil, gpuLeakedMem):
self.gpuMem = gpuMem
self.gpuUtil = gpuUtil
self.gpuLeakedMem = gpuLeakedMem
class GPUMetadata(pytest_benchmark_stats.Metadata):
def __init__(self, fixture, iterations, options, fixtureParamNames=None):
super().__init__(fixture, iterations, options)
# Use an overridden Stats object that also handles GPU metrics
self.stats = GPUStats()
# fixture_param_names is used for reporting, see pytest_sessionfinish()
self.fixture_param_names = fixtureParamNames
def updateGPUMetrics(self, gpuBenchmarkResults):
self.stats.updateGPUMetrics(gpuBenchmarkResults)
def updateCustomMetric(self, result, name, unitString):
self.stats.updateCustomMetric(result, name, unitString)
class GPUStats(pytest_benchmark_stats.Stats):
fields = (
"min", "max", "mean", "stddev", "rounds", "gpu_rounds", "median", "gpu_mem", "gpu_util", "gpu_leaked_mem" , "iqr", "q1", "q3", "iqr_outliers", "stddev_outliers",
"outliers", "ld15iqr", "hd15iqr", "ops", "total"
)
def __init__(self):
super().__init__()
self.gpuData = []
# Custom metrics are by:
# key : name of the metric
# value : tuple of (value, unit_type)
self.__customMetrics = {}
def updateGPUMetrics(self, gpuBenchmarkResults):
self.gpuData.append((gpuBenchmarkResults.gpuMem,
gpuBenchmarkResults.gpuUtil,
gpuBenchmarkResults.gpuLeakedMem))
def updateCustomMetric(self, result, name, unitString):
self.__customMetrics[name] = (result, unitString)
def getCustomMetricNames(self):
return list(self.__customMetrics.keys())
def getCustomMetric(self, name):
return self.__customMetrics[name]
# FIXME: this may not need to be here
def as_dict(self):
return super().as_dict()
@pytest_benchmark_utils.cached_property
def gpu_rounds(self):
return len(self.gpuData)
@pytest_benchmark_utils.cached_property
def gpu_mem(self):
return max([i[0] for i in self.gpuData])
@pytest_benchmark_utils.cached_property
def gpu_util(self):
return max([i[1] for i in self.gpuData])
@pytest_benchmark_utils.cached_property
def gpu_leaked_mem(self):
return max([i[2] for i in self.gpuData])
class GPUBenchmarkFixture(pytest_benchmark_fixture.BenchmarkFixture):
def __init__(self, benchmarkFixtureInstance, fixtureParamNames=None,
gpuMaxRounds=None, gpuDisable=False,
customMetricsDisable=False):
self.__benchmarkFixtureInstance = benchmarkFixtureInstance
self.fixture_param_names = fixtureParamNames
self.gpuMaxRounds = gpuMaxRounds
self.gpuDisable = gpuDisable
self.customMetricsDisable = customMetricsDisable
self.__timeOnlyRunner = None
self.__customMetricsDict = {}
def __getattr__(self, attr):
"""
Any method or member that is not defined in this class will fall
through and be accessed on self.__benchmarkFixtureInstance. This
allows this class to override anything on the previously-instantiated
self.__benchmarkFixtureInstance without changing the code that
instantiated it in pytest-benchmark.
"""
return getattr(self.__benchmarkFixtureInstance, attr)
def _make_gpu_runner(self, function_to_benchmark, args, kwargs):
"""
Create a callable that will run the function_to_benchmark with the
provided args and kwargs, and wrap it in calls to perform GPU
measurements. The resulting callable will return a GPUBenchmarkResults
obj containing the measurements.
"""
def runner():
rmm_analyzer = RMMResourceAnalyzer()
rmm_analyzer.enable_logging()
try:
startTime = time.time()
function_to_benchmark(*args, **kwargs)
duration = time.time() - startTime
# Guarantee a minimum time has passed to ensure GPU metrics
# have been taken
if duration < 0.1:
time.sleep(0.1)
finally:
rmm_analyzer.disable_logging()
return GPUBenchmarkResults(gpuMem=rmm_analyzer.max_gpu_mem_usage,
gpuUtil=rmm_analyzer.max_gpu_util,
gpuLeakedMem=rmm_analyzer.leaked_memory)
return runner
def _run_gpu_measurements(self, function_to_benchmark, args, kwargs):
"""
Run as part of _raw() or _raw_pedantic() to perform GPU measurements.
This only runs if benchmarks and gpu benchmarks are enabled.
"""
if self.enabled and not(self.gpuDisable):
gpuRunner = self._make_gpu_runner(function_to_benchmark, args, kwargs)
# This loop can be used to re-implement GPU utiliziation in the future
#
# # Get the number of rounds performed from the runtime measurement
# rounds = self.stats.stats.rounds
# assert rounds > 0 # FIXME: do we need this?
# if self.gpuMaxRounds is not None:
# rounds = min(rounds, self.gpuMaxRounds)
# for _ in pytest_benchmark_compat.XRANGE(rounds):
# self.stats.updateGPUMetrics(gpuRunner())
self.stats.updateGPUMetrics(gpuRunner())
# Set the "mode" (regular or pedantic) here rather than override another
# method. This is needed since cleanup callbacks registered prior to the
# class override dont see the new value and will print a warning saying
# a benchmark was run without using a benchmark fixture. The warning is
# printed based on if mode was ever set or not.
self.__benchmarkFixtureInstance._mode = self._mode
def _run_custom_measurements(self, function_result):
# Run custom metrics if they are enabled
if self.enabled and not(self.customMetricsDisable):
for (metric_name, (metric_callable, metric_unit_string)) in \
self.__customMetricsDict.items():
self.stats.updateCustomMetric(
metric_callable(function_result),
metric_name, metric_unit_string)
def _raw(self, function_to_benchmark, *args, **kwargs):
"""
Run the time measurement as defined in pytest-benchmark, then run GPU
metrics separately. Running separately ensures GPU monitoring does not
affect runtime perf.
"""
function_result = super()._raw(function_to_benchmark, *args, **kwargs)
self._run_gpu_measurements(function_to_benchmark, args, kwargs)
self._run_custom_measurements(function_result)
return function_result
def _raw_pedantic(self, target, args=(), kwargs=None, setup=None, rounds=1,
warmup_rounds=0, iterations=1):
"""
Run the pedantic time measurement as defined in pytest-benchmark, then
run GPU metrics separately. Running separately ensures GPU monitoring
does not affect runtime perf.
"""
result = super()._raw_pedantic(target, args, kwargs, setup, rounds,
warmup_rounds, iterations)
self._run_gpu_measurements(function_to_benchmark, args, kwargs)
self._run_custom_measurements(function_result)
return result
def _make_stats(self, iterations):
"""
Overridden method to create a stats object that can be used as-is by
pytest-benchmark but also accepts GPU metrics.
"""
if self.gpuDisable:
return super()._make_stats(iterations)
bench_stats = GPUMetadata(self,
iterations=iterations,
options={
"disable_gc": self._disable_gc,
"timer": self._timer,
"min_rounds": self._min_rounds,
"max_time": self._max_time,
"min_time": self._min_time,
"warmup": self._warmup,
"gpu_max_rounds": self.gpuMaxRounds,
},
fixtureParamNames=self.fixture_param_names)
self._add_stats(bench_stats)
self.stats = bench_stats
return bench_stats
def addMetric(self, metric_callable, metric_name, metric_unit_string):
"""
Adds a custom metric to the set of metrics gathered as part of this
benchmark run. When the benchmark is run, the metric_callable will also
be run and the return value will be stored under the name metric_name.
"""
self.__customMetricsDict[metric_name] = (metric_callable,
metric_unit_string)
class GPUBenchmarkSession(pytest_benchmark_session.BenchmarkSession):
compared_mapping = None
groups = None
def __init__(self, benchmarkSession):
self.__benchmarkSessionInstance = benchmarkSession
self.compared_mapping = benchmarkSession.compared_mapping
self.groups = benchmarkSession.groups
# Add the GPU columns to the original list in the appropriate order
# FIXME: this always adds gpu_* columns, even if the user specified a
# list of columns that didn't include those. This is because the
# default list of columns is hardcoded in the pytest-benchmark option
# parsing and cannot be overridden without changing pytest-benchmark (I
# think?)
origColumns = self.columns
self.columns = []
for c in ["min", "max", "mean", "stddev", "median", "iqr", "outliers", "ops", "gpu_mem", "gpu_leaked_mem", "rounds", "gpu_rounds", "iterations"]:
# Always add gpu_mem & gpu_leaked_mem (for now), and only add gpu_rounds if rounds was requested.
if (c in origColumns) or \
(c == "gpu_mem") or \
(c == "gpu_leaked_mem") or \
((c == "gpu_rounds") and ("rounds" in origColumns)):
self.columns.append(c)
def __getattr__(self, attr):
return getattr(self.__benchmarkSessionInstance, attr)
def display(self, tr):
if not self.groups:
return
tr.ensure_newline()
results_table = GPUTableResults(
columns=self.columns,
sort=self.sort,
histogram=self.histogram,
name_format=self.name_format,
logger=self.logger,
scale_unit=partial(self.config.hook.pytest_benchmark_scale_unit, config=self.config),
)
results_table.display(tr, self.groups)
self.check_regressions()
self.display_cprofile(tr)
@pytest.fixture(scope="function")
def gpubenchmark(request, benchmark):
# FIXME: if ASV output is enabled, enforce that fixture_param_names are set.
# FIXME: if no params, do not enforce fixture_param_names check
gpuMaxRounds = request.config.getoption("benchmark_gpu_max_rounds")
gpuDisable = request.config.getoption("benchmark_gpu_disable")
customMetricsDisable = request.config.getoption("benchmark_custom_metrics_disable")
return GPUBenchmarkFixture(
benchmark,
fixtureParamNames=request.node.keywords.get("fixture_param_names"),
gpuMaxRounds=gpuMaxRounds,
gpuDisable=gpuDisable,
customMetricsDisable=customMetricsDisable)
################################################################################
def pytest_sessionstart(session):
session.config._benchmarksession_orig = session.config._benchmarksession
session.config._gpubenchmarksession = \
GPUBenchmarkSession(session.config._benchmarksession)
session.config._benchmarksession = session.config._gpubenchmarksession
def _getOSName():
try :
binout = subprocess.check_output(
["bash", "-c",
"source /etc/os-release && echo -n ${ID}-${VERSION_ID}"])
return binout.decode()
except subprocess.CalledProcessError:
return None
def _getCudaVersion():
"""
Get the CUDA version from the CUDA DLL/.so if possible, otherwise return
None. (NOTE: is this better than screen scraping nvidia-smi?)
"""
try :
lib = ctypes.CDLL("libcudart.so")
function = getattr(lib,"cudaRuntimeGetVersion")
result = ctypes.c_int()
resultPtr = ctypes.pointer(result)
function(resultPtr)
# The version is returned as (1000 major + 10 minor). For example, CUDA
# 9.2 would be represented by 9020
major = int(result.value / 1000)
minor = int((result.value - (major * 1000)) / 10)
return f"{major}.{minor}"
# FIXME: do not use a catch-all handler
except:
return None
def _ensureListLike(item):
"""
Return the item if it is a list or tuple, otherwise add it to a list and
return that.
"""
return item if (isinstance(item, list) or isinstance(item, tuple)) \
else [item]
def _getHierBenchNameFromFullname(benchFullname):
"""
Turn a bench name that potentially looks like this:
'foodir/bench_algos.py::BenchStuff::bench_bfs[1-2-False-True]'
into this:
'foodir.bench_algos.BenchStuff.bench_bfs'
"""
benchFullname = benchFullname.partition("[")[0] # strip any params
(modName, _, benchName) = benchFullname.partition("::")
if modName.endswith(".py"):
modName = modName.partition(".")[0]
modName = modName.replace("/", ".")
benchName = benchName.replace("::", ".")
return "%s.%s" % (modName, benchName)
def pytest_sessionfinish(session, exitstatus):
gpuBenchSess = session.config._gpubenchmarksession
config = session.config
asvOutputDir = config.getoption("benchmark_asv_output_dir")
asvMetadata = config.getoption("benchmark_asv_metadata")
gpuDeviceNums = config.getoption("benchmark_gpu_device")
if asvOutputDir and gpuBenchSess.benchmarks:
# FIXME: do not lookup commit metadata if already specified on the
# command line.
(commitHash, commitTime) = asvdbUtils.getCommitInfo()
(commitRepo, commitBranch) = asvdbUtils.getRepoInfo()
# FIXME: do not make pynvml calls if all the metadata provided by pynvml
# was specified on the command line.
smi.nvmlInit()
# only supporting 1 GPU
# FIXME: see if it's possible to auto detect gpu device number instead of
# manually passing a value
gpuDeviceHandle = smi.nvmlDeviceGetHandleByIndex(gpuDeviceNums[0])
uname = platform.uname()
machineName = asvMetadata.get("machineName", uname.machine)
cpuType = asvMetadata.get("cpuType", uname.processor)
arch = asvMetadata.get("arch", uname.machine)
pythonVer = asvMetadata.get("pythonVer",
".".join(platform.python_version_tuple()[:-1]))
cudaVer = asvMetadata.get("cudaVer", _getCudaVersion() or "unknown")
osType = asvMetadata.get("osType",
_getOSName() or platform.linux_distribution()[0])
gpuType = asvMetadata.get("gpuType",
smi.nvmlDeviceGetName(gpuDeviceHandle).decode())
ram = asvMetadata.get("ram", "%d" % psutil.virtual_memory().total)
gpuRam = asvMetadata.get("gpuRam",
"%d" % smi.nvmlDeviceGetMemoryInfo(gpuDeviceHandle).total)
commitHash = asvMetadata.get("commitHash", commitHash)
commitTime = asvMetadata.get("commitTime", commitTime)
commitRepo = asvMetadata.get("commitRepo", commitRepo)
commitBranch = asvMetadata.get("commitBranch", commitBranch)
requirements = asvMetadata.get("requirements", "{}")
suffixDict = dict(gpu_util="gpuutil",
gpu_mem="gpumem",
gpu_leaked_mem="gpu_leaked_mem",
mean="time",
)
unitsDict = dict(gpu_util="percent",
gpu_mem="bytes",
gpu_leaked_mem="bytes",
mean="seconds",
)
db = ASVDb(asvOutputDir, commitRepo, [commitBranch])
bInfo = BenchmarkInfo(machineName=machineName,
cudaVer=cudaVer,
osType=osType,
pythonVer=pythonVer,
commitHash=commitHash,
commitTime=commitTime,
branch=commitBranch,
gpuType=gpuType,
cpuType=cpuType,
arch=arch,
ram=ram,
gpuRam=gpuRam,
requirements=requirements)
for bench in gpuBenchSess.benchmarks:
benchName = _getHierBenchNameFromFullname(bench.fullname)
# build the final params dict by extracting them from the
# bench.params dictionary. Not all benchmarks are parameterized
params = {}
bench_params = bench.params.items() if bench.params is not None else []
for (paramName, paramVal) in bench_params:
# If the params are coming from a fixture, handle them
# differently since they will (should be) stored in a special
# variable accessible with the name of the fixture.
#
# NOTE: "fixture_param_names" must be manually set by the
# benchmark author/user using the "request" fixture! (see below)
#
# @pytest.fixture(params=[1,2,3])
# def someFixture(request):
# request.keywords["fixture_param_names"] = ["the_param_name"]
if hasattr(bench, "fixture_param_names") and \
(bench.fixture_param_names is not None) and \
(paramName in bench.fixture_param_names):
fixtureName = paramName
paramNames = _ensureListLike(bench.fixture_param_names[fixtureName])
paramValues = _ensureListLike(paramVal)
for (pname, pval) in zip(paramNames, paramValues):
params[pname] = pval
# otherwise, a benchmark/test will have params added to the
# bench.params dict as a standard key:value (paramName:paramVal)
else:
params[paramName] = paramVal
resultList = []
for statType in ["mean", "gpu_mem", "gpu_leaked_mem", "gpu_util"]:
bn = "%s_%s" % (benchName, suffixDict[statType])
val = getattr(bench.stats, statType, None)
if val is not None:
bResult = BenchmarkResult(funcName=bn,
argNameValuePairs=list(params.items()),
result=val)
bResult.unit = unitsDict[statType]
resultList.append(bResult)
# If there were any custom metrics, add each of those as well as an
# individual result to the same bInfo isntance.
if isinstance(bench.stats, GPUStats):
for customMetricName in bench.stats.getCustomMetricNames():
(result, unitString) = bench.stats.getCustomMetric(customMetricName)
bn = "%s_%s" % (benchName, customMetricName)
bResult = BenchmarkResult(funcName=bn,
argNameValuePairs=list(params.items()),
result=result)
bResult.unit = unitString
resultList.append(bResult)
db.addResults(bInfo, resultList)
def pytest_report_header(config):
return ("rapids_pytest_benchmark: {version}").format(
version=__version__
)
def DISABLED_pytest_benchmark_scale_unit(config, unit, benchmarks, best, worst, sort):
"""
Scale GPU memory and utilization measurements accordingly
"""
return
| 0 |
rapidsai_public_repos/benchmark/rapids_pytest_benchmark/rapids_pytest_benchmark | rapidsai_public_repos/benchmark/rapids_pytest_benchmark/rapids_pytest_benchmark/tests/test_rmm_analyzer.py | import cudf
from ..rmm_resource_analyzer import RMMResourceAnalyzer
def test_rmm_analyzer():
inst = RMMResourceAnalyzer()
inst.enable_logging()
s = cudf.Series([1])
del s
inst.disable_logging()
assert inst.max_gpu_mem_usage == 8
| 0 |
rapidsai_public_repos/benchmark/rapids_pytest_benchmark | rapidsai_public_repos/benchmark/rapids_pytest_benchmark/conda/meta.yaml | {% set version = load_setup_py_data().get('version') %}
package:
name: rapids-pytest-benchmark
version: {{ version }}
source:
path: ..
build:
script: {{ PYTHON }} -m pip install . --no-deps
noarch: python
requirements:
host:
- python
run:
- asvdb>=0.3.0
- psutil
- pynvml
- pytest-benchmark>=3.2.3
- python
- rmm>=0.19.0a
test:
imports:
- rapids_pytest_benchmark
about:
home: https://github.com/rapidsai/benchmark
license: Apache 2.0
| 0 |
rapidsai_public_repos/benchmark | rapidsai_public_repos/benchmark/parser/GBenchToASV.py | import os
import sys
import json
import subprocess
import argparse
import re
import platform
import psutil
from asvdb import ASVDb, BenchmarkInfo, BenchmarkResult
from pynvml import smi
# USAGE:
#
# -d : JSON Result Directory
# -n : Repository Name
# -t : Target Directory for ASV JSON
# -b : Branch Name
def build_argparse():
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-d', nargs=1, help='JSON Result Directory')
parser.add_argument('-n', nargs=1, help='Repository Name')
parser.add_argument('-t', nargs=1, help='Target Directory for JSON')
parser.add_argument('-b', nargs=1, help='Branch Name')
parser.add_argument('-r', nargs=1, help='Requirements metadata in JSON format', default=['{}'])
return parser
def getCommandOutput(cmd):
result = subprocess.run(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
stdout = result.stdout.decode().strip()
if result.returncode == 0:
return stdout
stderr = result.stderr.decode().strip()
raise RuntimeError("Problem running '%s' (STDOUT: '%s' STDERR: '%s')"
% (cmd, stdout, stderr))
def getSysInfo(requirements):
# Use Node Label from Jenkins if possible
label = os.environ.get('ASV_LABEL')
uname = platform.uname()
if label == None:
label = uname.machine
commitHash = getCommandOutput("git rev-parse HEAD")
commitTime = getCommandOutput("git log -n1 --pretty=%%ct %s" % commitHash)
commitTime = str(int(commitTime)*1000) # ASV wants commit to be in milliseconds
gpuDeviceNums = [0]
gpuDeviceHandle = smi.nvmlDeviceGetHandleByIndex(gpuDeviceNums[0])
bInfo = BenchmarkInfo(
machineName=label,
cudaVer=getCommandOutput("nvcc --version | grep release | awk '{print $5}' | tr -d ,"),
osType="%s %s" % (uname.system, uname.release),
pythonVer=platform.python_version(),
commitHash=commitHash,
commitTime=commitTime,
gpuType=smi.nvmlDeviceGetName(gpuDeviceHandle).decode(),
cpuType=uname.processor,
arch=uname.machine,
ram="%d" % psutil.virtual_memory().total,
requirements=requirements
)
return bInfo
def genBenchmarkResults(fileList, repoName):
pattern = re.compile(r"([^\/]+)")
benchResults = []
for file in fileList:
with open(file, 'r') as in_file:
tests = json.load(in_file)["benchmarks"]
# Create dictionary reference for number of parameters
num_params_dict = {}
for each in tests:
name_and_params = pattern.findall(each["name"])
name = repoName + "." + name_and_params[0]
name = name.replace("<","[").replace(">","]").replace("::", "_")
test_params = name_and_params[1:]
# Get max number of parameters for each benchmark
if name not in num_params_dict:
num_params_dict[name] = len(test_params)
else:
if len(test_params) > num_params_dict[name]:
num_params_dict[name] = len(test_params)
for each in tests:
#Get Benchmark Name and Test Parameters
name_and_params = pattern.findall(each["name"])
name = repoName + "." + name_and_params[0]
name = name.replace("<","[").replace(">","]").replace("::", "_")
test_params = name_and_params[1:]
param_values = []
for idx in range(num_params_dict[name]):
if idx < len(test_params):
param_values.append((f"param{idx}", test_params[idx]))
else:
param_values.append((f"param{idx}", "None"))
#Get result
if "real_time" in each:
bench_result = each["real_time"]
elif "rms" in each:
bench_result = each["rms"]
if "time_unit" in each:
time_unit = each["time_unit"]
else:
time_unit = "seconds"
#Check if benchmark logged throughput
if "bytes_per_second" in each:
bResult = BenchmarkResult(
funcName=name+"_throughput",
argNameValuePairs=param_values,
result=each["bytes_per_second"],
unit="bps"
)
benchResults.append(bResult)
bResult = BenchmarkResult(
funcName=name,
argNameValuePairs=param_values,
result=bench_result,
unit=time_unit
)
benchResults.append(bResult)
return benchResults
def main(args):
ns = build_argparse().parse_args(args)
testResultDir = ns.d[0]
repoName = ns.n[0]
outputDir = ns.t[0]
branchName = ns.b[0]
requirements = json.loads(ns.r[0])
gbenchFileList = os.listdir(testResultDir)
repoUrl = getCommandOutput("git remote -v").split("\n")[-1].split()[1]
db = ASVDb(outputDir, repoUrl, [branchName])
for each in gbenchFileList:
if not ".json" in each:
gbenchFileList.remove(each)
for i,val in enumerate(gbenchFileList):
gbenchFileList[i] = f"{testResultDir}/{val}"
smi.nvmlInit()
system_info = getSysInfo(requirements)
resultList = genBenchmarkResults(gbenchFileList, repoName)
db.addResults(system_info, resultList)
if __name__ == '__main__':
main(sys.argv[1:])
| 0 |
rapidsai_public_repos | rapidsai_public_repos/plotly-dash-rapids-census-demo/entrypoint.sh | #activating the conda environment
source activate rapids
cd /rapids/plotly_census_demo/plotly_demo
if [ "$@" = "dask_app" ]; then
python dask_app.py
else
python app.py
fi
| 0 |
rapidsai_public_repos | rapidsai_public_repos/plotly-dash-rapids-census-demo/README.md | # Plotly-Dash + RAPIDS | Census 2020 Visualization
<a href="https://colab.research.google.com/github/rapidsai/plotly-dash-rapids-census-demo/blob/main/plotly_demo/colab_plotly_rapids_app.ipynb" target="_blank"><img src="https://colab.research.google.com/assets/colab-badge.svg"></a>

## Charts
1. Map chart shows the total population points for chosen view and selected area
2. Top counties bar show the top 15 counties for chosen view and selected area
3. Bottom counties bar show the bottom 15 counties for chosen view and selected area
4. Race Distribution shows distribution of individual races across blocks for chosen view and selected area
Cross-filtering is enabled to link all the four charts using box-select tool
## Data-Selection Views
The demo consists of six views and all views are calculated at a block level
- Total Population view shows total Census 2020 population.
- Migrating In view shows net inward decennial migration.
- Stationary view shows population that were stationary.
- Migrating Out view shows net outward decennial migration.
- Net Migration view shows total decennial migration. Points are colored into three categories - migrating in, stationary, migrating out
- Population with Race shows total Census 2020 population colored into seven race categories - White alone, African American alone, American Indian alone, Asian alone, Native Hawaiian alone, Other Race alone, Two or More races.
## Installation and Run Steps
## Base Layer Setup
The visualization uses a Mapbox base layer that requires an access token. Create one for free [here on mapbox](https://www.mapbox.com/help/define-access-token/). Go to the demo root directory's `plotly_demo` folder and create a token file named `.mapbox_token`. Copy your token contents into the file.
**NOTE:** Installation may fail without the token.
## Data
There is 1 main dataset:
- [Total Population Dataset](https://data.rapids.ai/viz-data/net_migration_dataset.parquet) ; Consists of Census 2020 total population with decennial migration from Census 2010 at a block level.
- [Net Migration Dataset](https://data.rapids.ai/viz-data/net_migration_dataset.parquet) ; Net migration from Census 2010 at a block level.
For more information on how the Census 2020 and 2010 Migration data was prepared to show individual points, refer to the `/data_prep_total_population` folder.
### Conda Env
Verify the following arguments in the `environment.yml` match your system(easy way to check `nvidia-smi`):
cudatoolkit: Version used is `11.5`
```bash
# setup conda environment
conda env create --name plotly_env --file environment.yml
source activate plotly_env
# run and access single GPU version
cd plotly_demo
python app.py
# run and access multi GPU version, run `python dask_app.py --help for args info`
# if --cuda_visible_devices argument is not passed, all the available GPUs are used
cd plotly_demo
python dask_app.py --cuda_visible_devices=0,1
```
### Docker
Verify the following arguments in the Dockerfile match your system:
1. CUDA_VERSION: Supported versions are `11.0+`
2. LINUX_VERSION: Supported OS values are `ubuntu16.04, ubuntu18.04, centos7`
The most up to date OS and CUDA versions supported can be found here: [RAPIDS requirements](https://rapids.ai/start.html#req)
```bash
# build
docker build -t plotly_demo .
# run and access single GPU version via: http://localhost:8050 / http://ip_address:8050 / http://0.0.0.0:8050
docker run --gpus all --name single_gpu -p 8050:8050 plotly_demo
# run and access multi GPU version via: http://localhost:8050 / http://ip_address:8050 / http://0.0.0.0:8050
# Use `--gpus all` to use all the available GPUs
docker run --gpus '"device=0,1"' --name multi_gpu -p 8050:8050 plotly_demo dask_app
```
## Requirements
### CUDA/GPU requirements
- CUDA 11.0+
- NVIDIA driver 450.80.02+
- Pascal architecture or better (Compute Capability >=6.0)
> Recommended Memory: NVIDIA GPU with at least 32GB of memory(or 2 GPUs with equivalent GPU memory when running dask version), and at least 32GB of system memory.
### OS requirements
See the [Rapids System Requirements section](https://rapids.ai/start.html#requirements) for information on compatible OS.
## Dependencies
- python=3.9
- cudatoolkit=11.5
- rapids=22.08
- dash=2.5.1
- jupyterlab=3.4.3
- dash-html-components=2.0.0
- dash-core-components=2.0.0
- dash-daq=0.5.0
- dash_bootstrap_components=1.2.0
## FAQ and Known Issues
**What hardware do I need to run this locally?** To run you need an NVIDIA GPU with at least 32GB of memory(or 2 GPUs with equivalent GPU memory when running dask version), at least 32GB of system memory.
**How did you compute migration?** Migration was computed by comparing the block level population for census 2010 and 2020
**How did you compare population having block level boundary changes?** [Relationship Files](https://www.census.gov/geographies/reference-files/time-series/geo/relationship-files.html#t10t20) provides the 2010 Census Tabulation Block to 2020 Census Tabulation Block Relationship Files. Block relationships may be one-to-one, many-to-one, one-to-many, or many-to-many. Population count was computed in proportion to take into account the division and collation of blocks across 2010 and 2020.
**How did you determine race?** Race for stationary and inward migration individuals was randomly assigned within a block but they add up accurately at the block level. However, due to how data is anonymized, race for outward migration population could not be calculated.
**How did you get individual point locations?** The population density points are randomly placed within a census block and associated to match distribution counts at a census block level.
**How are the population and distributions filtered?** Use the box select tool icon for the map or click and drag for the bar charts.
**Why is the population data from 2010 and 2020?** Only census data is recorded on a block level, which provides the highest resolution population distributions available. For more details on census boundaries refer to the [TIGERweb app](https://tigerweb.geo.census.gov/tigerwebmain/TIGERweb_apps.html).
**The dashboard stop responding or the chart data disappeared!** This is likely caused by an Out of Memory Error and the application must be restarted.
**How do I request a feature or report a bug?** Create an [Issue](https://github.com/rapidsai/plotly-dash-rapids-census-demo/issues) and we will get to it asap.
## Acknowledgments and Data Sources
- 2020 Population Census and 2010 Population Census to compute Migration Dataset, used with permission from IPUMS NHGIS, University of Minnesota, [www.nhgis.org](https://www.nhgis.org/) ( not for redistribution ).
- Base map layer provided by [Mapbox](https://www.mapbox.com/).
- Dashboard developed with [Plotly Dash](https://plotly.com/dash/).
- Geospatial point rendering developed with [Datashader](https://datashader.org/).
- GPU toggle accelerated with [RAPIDS cudf](https://rapids.ai/) and [cupy](https://cupy.chainer.org/), CPU toggle with [pandas](https://pandas.pydata.org/).
- For source code and data workflow, visit our [GitHub](https://github.com/rapidsai/plotly-dash-rapids-census-demo/tree/census-2020).
| 0 |
rapidsai_public_repos | rapidsai_public_repos/plotly-dash-rapids-census-demo/.dockerignore | ./data/*
dask-worker-space
.vscode | 0 |
rapidsai_public_repos | rapidsai_public_repos/plotly-dash-rapids-census-demo/environment.yml | channels:
- rapidsai
- conda-forge
- nvidia
dependencies:
- python=3.10
- cudatoolkit=11.8
- cudf=23.06
- dask-cudf=23.06
- dask-cuda=23.06
- dash
- jupyterlab
- jupyter-dash
- jupyterlab-dash
- dash-html-components
- dash-core-components
- dash-daq
- dash-bootstrap-components
- datashader>=0.15
- pyproj
- bokeh
| 0 |
rapidsai_public_repos | rapidsai_public_repos/plotly-dash-rapids-census-demo/Dockerfile | ARG RAPIDS_VERSION=22.12
ARG CUDA_VERSION=11.5
ARG LINUX_VERSION=ubuntu20.04
ARG PYTHON_VERSION=3.9
FROM nvcr.io/nvidia/rapidsai/rapidsai-core:${RAPIDS_VERSION}-cuda${CUDA_VERSION}-base-${LINUX_VERSION}-py${PYTHON_VERSION}
WORKDIR /rapids/
RUN mkdir plotly_census_demo
WORKDIR /rapids/plotly_census_demo
RUN mkdir data
WORKDIR /rapids/plotly_census_demo/data
RUN curl https://data.rapids.ai/viz-data/total_population_dataset.parquet -o total_population_dataset.parquet
WORKDIR /rapids/plotly_census_demo
COPY . .
RUN source activate rapids && conda remove --force cuxfilter && mamba env update --file environment_for_docker.yml
ENTRYPOINT ["bash","./entrypoint.sh"] | 0 |
rapidsai_public_repos | rapidsai_public_repos/plotly-dash-rapids-census-demo/environment_for_docker.yml | channels:
- conda-forge
dependencies:
- dash=2.5.1
- dash-html-components=2.0.0
- dash-core-components=2.0.0
- dash-daq=0.5.0
- dash-bootstrap-components=1.2.0
- datashader=0.14
| 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/plotly_demo/README.md | # Plotly-Dash + RAPIDS | Census 2020 Visualization
There are two versions for the same application, with all the views(described below) in both, single GPU and multi-GPU versions respectively.
Recommended GPU memory:
1. Single GPU version: 32GB+
2. Multi-GPU version: 2+ GPUs of 16GB+ each
```bash
# run and access single GPU version
cd plotly_demo
python app.py
# run and access multi GPU version
cd plotly_demo
python dask_app.py
```
## Snapshot Examples
### 1) Total Population View

### 2) Migrating In View

### 3) Stationary View

### 4) Migrating Out View

### 5) Net Migration View

#### Migration population to color mapping -
<b>Inward Migration</b>: Purple-Blue</br>
<b>Stationary</b>: Greens</br>
<b>Outward Migration</b>: Red Purples</br>
### 6) Population with Race view

#### Race to color mapping -
<b>White</b>: aqua</br>
<b>African American</b>: lime</br>
<b>American Indian</b>: yellow</br>
<b>Asian</b>: orange</br>
<b>Native Hawaiian</b>: blue</br>
<b>Other Race alone</b>: fuchsia</br>
<b>Two or More</b>: saddlebrown</br>
| 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/plotly_demo/app.py | import os
import time
import cudf
import dash_bootstrap_components as dbc
import dash_daq as daq
import numpy as np
import pandas as pd
from dash import Dash, ctx, dcc, html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from distributed import Client
from utils import *
import tarfile
# ### Dashboards start here
text_color = "#cfd8dc" # Material blue-grey 100
DATA_PATH = "../data"
DATA_PATH_STATE = f"{DATA_PATH}/state-wise-population"
DATA_PATH_TOTAL = f"{DATA_PATH}/total_population_dataset.parquet"
# Download the required states data
census_data_url = "https://data.rapids.ai/viz-data/total_population_dataset.parquet"
check_dataset(census_data_url, DATA_PATH_TOTAL)
census_state_data_url = "https://data.rapids.ai/viz-data/state-wise-population.tar.xz"
if not os.path.exists(DATA_PATH_STATE):
check_dataset(census_state_data_url, f"{DATA_PATH_STATE}.tar.xz")
print("Extracting state-wise-population.tar.xz ...")
with tarfile.open(f"{DATA_PATH_STATE}.tar.xz", "r:xz") as tar:
tar.extractall(DATA_PATH)
print("Done.")
state_files = os.listdir(DATA_PATH_STATE)
state_names = [os.path.splitext(f)[0] for f in state_files]
# add USA(combined dataset) to the list of states
state_names.append("USA")
(
data_center_3857,
data_3857,
data_4326,
data_center_4326,
selected_map_backup,
selected_race_backup,
selected_county_top_backup,
selected_county_bt_backup,
view_name_backup,
c_df,
gpu_enabled_backup,
dragmode_backup,
currently_loaded_state,
) = ([], [], [], [], None, None, None, None, None, None, None, "pan", None)
app = Dash(__name__)
application = app.server
app.layout = html.Div(
children=[
html.Div(
children=[
html.H1(
children=[
"Census 2020 Net Migration Visualization",
html.A(
html.Img(
src="assets/rapids-logo.png",
style={
"float": "right",
"height": "45px",
"marginRight": "1%",
"marginTop": "-7px",
},
),
href="https://rapids.ai/",
),
html.A(
html.Img(
src="assets/dash-logo.png",
style={"float": "right", "height": "30px"},
),
href="https://dash.plot.ly/",
),
],
style={"textAlign": "left"},
),
]
),
html.Div(
children=[
html.Div(
children=[
html.Div(
children=[
html.H4(
[
"Population Count and Query Time",
],
className="container_title",
),
dcc.Loading(
dcc.Graph(
id="indicator-graph",
figure=blank_fig(row_heights[3]),
config={"displayModeBar": False},
),
color="#b0bec5",
style={"height": f"{row_heights[3]}px"},
),
],
style={"height": f"{row_heights[0]}px"},
className="five columns pretty_container",
id="indicator-div",
),
html.Div(
children=[
html.Div(
children=[
html.Button(
"Clear All Selections",
id="clear-all",
className="reset-button",
),
]
),
html.H4(
[
"Options",
],
className="container_title",
),
html.Table(
[
html.Tr(
[
html.Td(
html.Div("GPU Acceleration"),
className="config-label",
),
html.Td(
html.Div(
[
daq.DarkThemeProvider(
daq.BooleanSwitch(
on=True,
color="#00cc96",
id="gpu-toggle",
)
),
dbc.Tooltip(
"Caution: Using CPU compute for more than 50 million points is not recommended.",
target="gpu-toggle",
placement="bottom",
autohide=True,
style={
"textAlign": "left",
"fontSize": "15px",
"color": "white",
"width": "350px",
"padding": "15px",
"borderRadius": "5px",
"backgroundColor": "#2a2a2e",
},
),
]
)
),
####### State Selection Dropdown ######
html.Td(
html.Div("Select State"),
style={"fontSize": "20px"},
),
html.Td(
dcc.Dropdown(
id="state-dropdown",
options=[
{"label": i, "value": i}
for i in state_names
],
value="USA",
),
style={
"width": "25%",
"height": "15px",
},
),
###### VIEWS ARE HERE ###########
html.Td(
html.Div("Data-Selection"),
style={"fontSize": "20px"},
), # className="config-label"
html.Td(
dcc.Dropdown(
id="view-dropdown",
options=[
{
"label": "Total Population",
"value": "total",
},
{
"label": "Migrating In",
"value": "in",
},
{
"label": "Stationary",
"value": "stationary",
},
{
"label": "Migrating Out",
"value": "out",
},
{
"label": "Net Migration",
"value": "net",
},
{
"label": "Population with Race",
"value": "race",
},
],
value="in",
searchable=False,
clearable=False,
),
style={
"width": "25%",
"height": "15px",
},
),
]
),
],
style={"width": "100%", "marginTop": "30px"},
),
# Hidden div inside the app that stores the intermediate value
html.Div(
id="datapoints-state-value",
style={"display": "none"},
),
],
style={"height": f"{row_heights[0]}px"},
className="seven columns pretty_container",
id="config-div",
),
]
),
##################### Map starts ###################################
html.Div(
children=[
html.Button(
"Clear Selection", id="reset-map", className="reset-button"
),
html.H4(
[
"Population Distribution of Individuals",
],
className="container_title",
),
dcc.Graph(
id="map-graph",
config={"displayModeBar": False},
figure=blank_fig(row_heights[1]),
),
# Hidden div inside the app that stores the intermediate value
html.Div(
id="intermediate-state-value", style={"display": "none"}
),
],
className="twelve columns pretty_container",
id="map-div",
style={"height": "50%"},
),
################# Bars start #########################
# Race start
html.Div(
children=[
html.Div(
children=[
html.Button(
"Clear Selection",
id="clear-race",
className="reset-button",
),
html.H4(
[
"Race Distribution",
],
className="container_title",
),
dcc.Graph(
id="race-histogram",
config={"displayModeBar": False},
figure=blank_fig(row_heights[2]),
),
],
className="one-third column pretty_container",
id="race-div",
), # County top starts
html.Div(
children=[
html.Button(
"Clear Selection",
id="clear-county-top",
className="reset-button",
),
html.H4(
[
"County-wise Top 15",
],
className="container_title",
),
dcc.Graph(
id="county-histogram-top",
config={"displayModeBar": False},
figure=blank_fig(row_heights[2]),
animate=False,
),
],
className=" one-third column pretty_container",
id="county-div-top",
),
# County bottom starts
html.Div(
children=[
html.Button(
"Clear Selection",
id="clear-county-bottom",
className="reset-button",
),
html.H4(
[
"County-wise Bottom 15",
],
className="container_title",
),
dcc.Graph(
id="county-histogram-bottom",
config={"displayModeBar": False},
figure=blank_fig(row_heights[2]),
animate=False,
),
],
className="one-third column pretty_container",
),
],
className="twelve columns",
)
############## End of Bars #####################
]
),
html.Div(
[
html.H4("Acknowledgements and Data Sources", style={"marginTop": "0"}),
dcc.Markdown(
"""\
- 2020 Population Census and 2010 Population Census to compute Migration Dataset, used with permission from IPUMS NHGIS, University of Minnesota, [www.nhgis.org](https://www.nhgis.org/) ( not for redistribution ).
- Base map layer provided by [Mapbox](https://www.mapbox.com/).
- Dashboard developed with [Plotly Dash](https://plotly.com/dash/).
- Geospatial point rendering developed with [Datashader](https://datashader.org/).
- GPU toggle accelerated with [RAPIDS cudf and dask_cudf](https://rapids.ai/) and [cupy](https://cupy.chainer.org/), CPU toggle with [pandas](https://pandas.pydata.org/).
- For source code and data workflow, visit our [GitHub](https://github.com/rapidsai/plotly-dash-rapids-census-demo/tree/master).
"""
),
],
style={
"width": "98%",
"marginRight": "0",
"padding": "10px",
},
className="twelve columns pretty_container",
),
],
)
# Clear/reset button callbacks
@app.callback(
Output("map-graph", "selectedData"),
[Input("reset-map", "n_clicks"), Input("clear-all", "n_clicks")],
)
def clear_map(*args):
return None
@app.callback(
Output("race-histogram", "selectedData"),
[Input("clear-race", "n_clicks"), Input("clear-all", "n_clicks")],
)
def clear_race_hist_selections(*args):
return None
@app.callback(
Output("county-histogram-top", "selectedData"),
[Input("clear-county-top", "n_clicks"), Input("clear-all", "n_clicks")],
)
def clear_county_hist_top_selections(*args):
return None
@app.callback(
Output("county-histogram-bottom", "selectedData"),
[Input("clear-county-bottom", "n_clicks"), Input("clear-all", "n_clicks")],
)
def clear_county_hist_bottom_selections(*args):
return None
@app.callback(
[
Output("indicator-graph", "figure"),
Output("map-graph", "figure"),
Output("map-graph", "config"),
Output("county-histogram-top", "figure"),
Output("county-histogram-top", "config"),
Output("county-histogram-bottom", "figure"),
Output("county-histogram-bottom", "config"),
Output("race-histogram", "figure"),
Output("race-histogram", "config"),
Output("intermediate-state-value", "children"),
],
[
Input("map-graph", "relayoutData"),
Input("map-graph", "selectedData"),
Input("race-histogram", "selectedData"),
Input("county-histogram-top", "selectedData"),
Input("county-histogram-bottom", "selectedData"),
Input("view-dropdown", "value"),
Input("state-dropdown", "value"),
Input("gpu-toggle", "on"),
],
[
State("intermediate-state-value", "children"),
],
)
def update_plots(
relayout_data,
selected_map,
selected_race,
selected_county_top,
selected_county_bottom,
view_name,
state_name,
gpu_enabled,
coordinates_backup,
):
global data_3857, data_center_3857, data_4326, data_center_4326, currently_loaded_state, selected_race_backup, selected_county_top_backup, selected_county_bt_backup
# condition to avoid reloading on tool update
if (
ctx.triggered_id == "map-graph"
and relayout_data
and list(relayout_data.keys()) == ["dragmode"]
):
raise PreventUpdate
# condition to avoid a bug in plotly where selectedData is reset following a box-select
if not (selected_race is not None and len(selected_race["points"]) == 0):
selected_race_backup = selected_race
elif ctx.triggered_id == "race-histogram":
raise PreventUpdate
# condition to avoid a bug in plotly where selectedData is reset following a box-select
if not (
selected_county_top is not None and len(selected_county_top["points"]) == 0
):
selected_county_top_backup = selected_county_top
elif ctx.triggered_id == "county-histogram-top":
raise PreventUpdate
# condition to avoid a bug in plotly where selectedData is reset following a box-select
if not (
selected_county_bottom is not None
and len(selected_county_bottom["points"]) == 0
):
selected_county_bt_backup = selected_county_bottom
elif ctx.triggered_id == "county-histogram-bottom":
raise PreventUpdate
df = read_dataset(state_name, gpu_enabled, currently_loaded_state)
t0 = time.time()
if coordinates_backup is not None:
coordinates_4326_backup, position_backup = coordinates_backup
else:
coordinates_4326_backup, position_backup = None, None
colorscale_name = "Viridis"
if data_3857 == [] or state_name != currently_loaded_state:
(
data_3857,
data_center_3857,
data_4326,
data_center_4326,
) = set_projection_bounds(df)
(
datashader_plot,
race_histogram,
county_top_histogram,
county_bottom_histogram,
n_selected_indicator,
coordinates_4326_backup,
position_backup,
) = build_updated_figures(
df,
relayout_data,
selected_map,
selected_race_backup,
selected_county_top_backup,
selected_county_bt_backup,
colorscale_name,
data_3857,
data_center_3857,
data_4326,
data_center_4326,
coordinates_4326_backup,
position_backup,
view_name,
)
barchart_config = {
"displayModeBar": True,
"modeBarButtonsToRemove": [
"zoom2d",
"pan2d",
"select2d",
"lasso2d",
"zoomIn2d",
"zoomOut2d",
"resetScale2d",
"hoverClosestCartesian",
"hoverCompareCartesian",
"toggleSpikelines",
],
}
compute_time = time.time() - t0
print(f"Query time: {compute_time}")
n_selected_indicator["data"].append(
{
"title": {"text": "Query Time"},
"type": "indicator",
"value": round(compute_time, 4),
"domain": {"x": [0.6, 0.85], "y": [0, 0.5]},
"number": {
"font": {
"color": text_color,
"size": "50px",
},
"suffix": " seconds",
},
}
)
datashader_plot["layout"]["dragmode"] = (
relayout_data["dragmode"]
if (relayout_data and "dragmode" in relayout_data)
else dragmode_backup
)
# update currently loaded state
currently_loaded_state = state_name
return (
n_selected_indicator,
datashader_plot,
{
"displayModeBar": True,
"modeBarButtonsToRemove": [
"lasso2d",
"zoomInMapbox",
"zoomOutMapbox",
"toggleHover",
],
},
county_top_histogram,
barchart_config,
county_bottom_histogram,
barchart_config,
race_histogram,
barchart_config,
(coordinates_4326_backup, position_backup),
)
def read_dataset(state_name, gpu_enabled, currently_loaded_state):
global c_df
if state_name != currently_loaded_state:
if state_name == "USA":
data_path = f"{DATA_PATH}/total_population_dataset.parquet"
else:
data_path = f"{DATA_PATH_STATE}/{state_name}.parquet"
c_df = load_dataset(data_path, "cudf" if gpu_enabled else "pandas")
return c_df
if __name__ == "__main__":
# Launch dashboard
app.run_server(
debug=True,
dev_tools_hot_reload=True,
host="0.0.0.0",
)
| 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/plotly_demo/colab_plotly_rapids_app.ipynb | # Download state-wise datasets
import os
if not os.path.exists("state-wise-population"):
!wget https://data.rapids.ai/viz-data/state-wise-population.tar.xz
!tar -xJf state-wise-population.tar.xz
!rm state-wise-population.tar.xz
# Optional: Download all US states combined dataset, uncomment the following lines to download the dataset
# if not os.path.exists("total_population_dataset.parquet"):
# !wget https://data.rapids.ai/viz-data/total_population_dataset.parquet# Download CSS and logo files from the original repo to use in the plotly dashboard
! mkdir -p assets && for file in s1.css dash-logo.png rapids-logo.png; do curl -o assets/$file https://raw.githubusercontent.com/rapidsai/plotly-dash-rapids-census-demo/main/plotly_demo/assets/$file; done
# Download utils.py file from the original repo to use the utility functions in the notebook
! mkdir -p utils && for file in __init__.py utils.py; do curl -o utils/$file https://raw.githubusercontent.com/rapidsai/plotly-dash-rapids-census-demo/main/plotly_demo/utils/$file; donefrom bokeh import palettes
from dash import Dash, ctx, dcc, html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from distributed import Client
from pyproj import Transformer
import cupy as cp
import dash_bootstrap_components as dbc
import dash_daq as daq
import datashader as ds
import datashader.transfer_functions as tf
import numpy as np
import pandas as pd
import pickle
import requests
import time
from jupyter_dash import JupyterDash # For running the app in a notebook environment
import cudf # RAPIDS cuDF is an implementation of Pandas-like Dataframe on GPU
from utils import * # Import utility functions from the utils.py fileDATA_PATH = "."
DATA_PATH_STATE = f"{DATA_PATH}/state-wise-population"
# Get list of state names from file names
state_files = os.listdir(DATA_PATH_STATE)
state_names = [os.path.splitext(f)[0] for f in state_files]
DEFAULT_STATE = state_names[0]
# append USA (combined) to state_names if it exists
if os.path.exists("total_population_dataset.parquet"):
state_names.append("USA")
(
data_center_3857,
data_3857,
data_4326,
data_center_4326,
selected_map_backup,
selected_race_backup,
selected_county_top_backup,
selected_county_bt_backup,
view_name_backup,
c_df,
gpu_enabled_backup,
dragmode_backup,
currently_loaded_state,
) = ([], [], [], [], None, None, None, None, None, None, None, "pan", None)app = JupyterDash(__name__)
# Create server variable with Flask server object for use with gunicorn
server = app.server# function to load dataset based on the selected dropdown value
def read_dataset(state_name, gpu_enabled, currently_loaded_state, dtype_changed=False):
global c_df
if state_name != currently_loaded_state or dtype_changed:
if state_name == "USA":
data_path = f"{DATA_PATH}/total_population_dataset.parquet"
else:
data_path = f"{DATA_PATH_STATE}/{state_name}.parquet"
c_df = load_dataset(data_path, "cudf" if gpu_enabled else "pandas")
return c_df# Describe the dashboards layout
app.layout = html.Div(
children=[
html.Div(
children=[
html.H1(
children=[
"Census 2020 Net Migration Visualization",
html.A(
html.Img(
src="assets/rapids-logo.png",
style={
"float": "right",
"height": "45px",
"marginRight": "1%",
"marginTop": "-7px",
},
),
href="https://rapids.ai/",
),
html.A(
html.Img(
src="assets/dash-logo.png",
style={"float": "right", "height": "30px"},
),
href="https://dash.plot.ly/",
),
],
style={"textAlign": "left"},
),
]
),
html.Div(
children=[
html.Div(
children=[
html.Div(
children=[
html.H4(
[
"Population Count and Query Time",
],
className="container_title",
),
dcc.Loading(
dcc.Graph(
id="indicator-graph",
figure=blank_fig(row_heights[3]),
config={"displayModeBar": False},
),
color="#b0bec5",
style={"height": f"{row_heights[3]}px"},
),
],
style={"height": f"{row_heights[0]}px"},
className="five columns pretty_container",
id="indicator-div",
),
html.Div(
children=[
html.Div(
children=[
html.Button(
"Clear All Selections",
id="clear-all",
className="reset-button",
),
]
),
html.H4(
[
"Options",
],
className="container_title",
),
html.Table(
[
html.Tr(
[
html.Td(
html.Div("GPU Acceleration"),
className="config-label",
),
html.Td(
html.Div(
[
daq.DarkThemeProvider(
daq.BooleanSwitch(
on=True,
color="#00cc96",
id="gpu-toggle",
)
),
dbc.Tooltip(
"Caution: Using CPU compute for more than 50 million points is not recommended.",
target="gpu-toggle",
placement="bottom",
autohide=True,
style={
"textAlign": "left",
"fontSize": "15px",
"color": "white",
"width": "350px",
"padding": "15px",
"borderRadius": "5px",
"backgroundColor": "#2a2a2e",
},
),
]
)
),
####### State Selection Dropdown ######
html.Td(
html.Div("Select State"),
style={"fontSize": "20px"},
),
html.Td(
dcc.Dropdown(
id="state-dropdown",
options=[
{"label": i, "value": i}
for i in state_names
],
value="CALIFORNIA",
),
style={
"width": "25%",
"height": "15px",
},
),
###### VIEWS ARE HERE ###########
html.Td(
html.Div("Data-Selection"),
style={"fontSize": "20px"},
), # className="config-label"
html.Td(
dcc.Dropdown(
id="view-dropdown",
options=[
{
"label": "Total Population",
"value": "total",
},
{
"label": "Migrating In",
"value": "in",
},
{
"label": "Stationary",
"value": "stationary",
},
{
"label": "Migrating Out",
"value": "out",
},
{
"label": "Net Migration",
"value": "net",
},
{
"label": "Population with Race",
"value": "race",
},
],
value="net",
searchable=False,
clearable=False,
),
style={
"width": "25%",
"height": "15px",
},
),
]
),
],
style={"width": "100%", "marginTop": "30px"},
),
# Hidden div inside the app that stores the intermediate value
html.Div(
id="datapoints-state-value",
style={"display": "none"},
),
],
style={"height": f"{row_heights[0]}px"},
className="seven columns pretty_container",
id="config-div",
),
]
),
##################### Map starts ###################################
html.Div(
children=[
html.Button(
"Clear Selection", id="reset-map", className="reset-button"
),
html.H4(
[
"Population Distribution of Individuals",
],
className="container_title",
),
dcc.Graph(
id="map-graph",
config={"displayModeBar": False},
figure=blank_fig(row_heights[1]),
),
# Hidden div inside the app that stores the intermediate value
html.Div(
id="intermediate-state-value", style={"display": "none"}
),
],
className="twelve columns pretty_container",
id="map-div",
style={"height": "50%"},
),
################# Bars start #########################
# Race start
html.Div(
children=[
html.Div(
children=[
html.Button(
"Clear Selection",
id="clear-race",
className="reset-button",
),
html.H4(
[
"Race Distribution",
],
className="container_title",
),
dcc.Graph(
id="race-histogram",
config={"displayModeBar": False},
figure=blank_fig(row_heights[2]),
),
],
className="one-third column pretty_container",
id="race-div",
), # County top starts
html.Div(
children=[
html.Button(
"Clear Selection",
id="clear-county-top",
className="reset-button",
),
html.H4(
[
"County-wise Top 15",
],
className="container_title",
),
dcc.Graph(
id="county-histogram-top",
config={"displayModeBar": False},
figure=blank_fig(row_heights[2]),
animate=False,
),
],
className=" one-third column pretty_container",
id="county-div-top",
),
# County bottom starts
html.Div(
children=[
html.Button(
"Clear Selection",
id="clear-county-bottom",
className="reset-button",
),
html.H4(
[
"County-wise Bottom 15",
],
className="container_title",
),
dcc.Graph(
id="county-histogram-bottom",
config={"displayModeBar": False},
figure=blank_fig(row_heights[2]),
animate=False,
),
],
className="one-third column pretty_container",
),
],
className="twelve columns",
)
############## End of Bars #####################
]
),
html.Div(
[
html.H4("Acknowledgements and Data Sources", style={"marginTop": "0"}),
dcc.Markdown(
"""\
- 2020 Population Census and 2010 Population Census to compute Migration Dataset, used with permission from IPUMS NHGIS, University of Minnesota, [www.nhgis.org](https://www.nhgis.org/) ( not for redistribution ).
- Base map layer provided by [Mapbox](https://www.mapbox.com/).
- Dashboard developed with [Plotly Dash](https://plotly.com/dash/).
- Geospatial point rendering developed with [Datashader](https://datashader.org/).
- GPU toggle accelerated with [RAPIDS cudf and dask_cudf](https://rapids.ai/) and [cupy](https://cupy.chainer.org/), CPU toggle with [pandas](https://pandas.pydata.org/).
- For source code and data workflow, visit our [GitHub](https://github.com/rapidsai/plotly-dash-rapids-census-demo/tree/master).
"""
),
],
style={
"width": "98%",
"marginRight": "0",
"padding": "10px",
},
className="twelve columns pretty_container",
),
],
)
# Clear/reset button callbacks
@app.callback(
Output("map-graph", "selectedData"),
[Input("reset-map", "n_clicks"), Input("clear-all", "n_clicks")],
)
def clear_map(*args):
return None
@app.callback(
Output("race-histogram", "selectedData"),
[Input("clear-race", "n_clicks"), Input("clear-all", "n_clicks")],
)
def clear_race_hist_selections(*args):
return None
@app.callback(
Output("county-histogram-top", "selectedData"),
[Input("clear-county-top", "n_clicks"), Input("clear-all", "n_clicks")],
)
def clear_county_hist_top_selections(*args):
return None
@app.callback(
Output("county-histogram-bottom", "selectedData"),
[Input("clear-county-bottom", "n_clicks"), Input("clear-all", "n_clicks")],
)
def clear_county_hist_bottom_selections(*args):
return None
@app.callback(
[
Output("indicator-graph", "figure"),
Output("map-graph", "figure"),
Output("map-graph", "config"),
Output("county-histogram-top", "figure"),
Output("county-histogram-top", "config"),
Output("county-histogram-bottom", "figure"),
Output("county-histogram-bottom", "config"),
Output("race-histogram", "figure"),
Output("race-histogram", "config"),
Output("intermediate-state-value", "children"),
],
[
Input("map-graph", "relayoutData"),
Input("map-graph", "selectedData"),
Input("race-histogram", "selectedData"),
Input("county-histogram-top", "selectedData"),
Input("county-histogram-bottom", "selectedData"),
Input("view-dropdown", "value"),
Input("state-dropdown", "value"),
Input("gpu-toggle", "on"),
],
[
State("intermediate-state-value", "children"),
],
# prevent_initial_call=True
)
def update_plots(
relayout_data,
selected_map,
selected_race,
selected_county_top,
selected_county_bottom,
view_name,
state_name,
gpu_enabled,
coordinates_backup,
):
global data_3857, data_center_3857, data_4326, data_center_4326
global currently_loaded_state, selected_race_backup, selected_county_top_backup, selected_county_bt_backup
# condition to avoid reloading on tool update
if (
ctx.triggered_id == "map-graph"
and relayout_data
and list(relayout_data.keys()) == ["dragmode"]
):
raise PreventUpdate
# condition to avoid a bug in plotly where selectedData is reset following a box-select
if not (selected_race is not None and len(selected_race["points"]) == 0):
selected_race_backup = selected_race
elif ctx.triggered_id == "race-histogram":
raise PreventUpdate
# condition to avoid a bug in plotly where selectedData is reset following a box-select
if not (
selected_county_top is not None and len(selected_county_top["points"]) == 0
):
selected_county_top_backup = selected_county_top
elif ctx.triggered_id == "county-histogram-top":
raise PreventUpdate
# condition to avoid a bug in plotly where selectedData is reset following a box-select
if not (
selected_county_bottom is not None
and len(selected_county_bottom["points"]) == 0
):
selected_county_bt_backup = selected_county_bottom
elif ctx.triggered_id == "county-histogram-bottom":
raise PreventUpdate
df = read_dataset(state_name, gpu_enabled, currently_loaded_state, dtype_changed=ctx.triggered_id == "gpu-toggle")
t0 = time.time()
if coordinates_backup is not None:
coordinates_4326_backup, position_backup = coordinates_backup
else:
coordinates_4326_backup, position_backup = None, None
colorscale_name = "Viridis"
if data_3857 == [] or state_name != currently_loaded_state:
(
data_3857,
data_center_3857,
data_4326,
data_center_4326,
) = set_projection_bounds(df)
(
datashader_plot,
race_histogram,
county_top_histogram,
county_bottom_histogram,
n_selected_indicator,
coordinates_4326_backup,
position_backup,
) = build_updated_figures(
df,
relayout_data,
selected_map,
selected_race_backup,
selected_county_top_backup,
selected_county_bt_backup,
colorscale_name,
data_3857,
data_center_3857,
data_4326,
data_center_4326,
coordinates_4326_backup,
position_backup,
view_name,
)
barchart_config = {
"displayModeBar": True,
"modeBarButtonsToRemove": [
"zoom2d",
"pan2d",
"select2d",
"lasso2d",
"zoomIn2d",
"zoomOut2d",
"resetScale2d",
"hoverClosestCartesian",
"hoverCompareCartesian",
"toggleSpikelines",
],
}
compute_time = time.time() - t0
print(f"Query time: {compute_time}")
n_selected_indicator["data"].append(
{
"title": {"text": "Query Time"},
"type": "indicator",
"value": round(compute_time, 4),
"domain": {"x": [0.6, 0.85], "y": [0, 0.5]},
"number": {
"font": {
"color": text_color,
"size": "50px",
},
"suffix": " seconds",
},
}
)
datashader_plot["layout"]["dragmode"] = (
relayout_data["dragmode"]
if (relayout_data and "dragmode" in relayout_data)
else dragmode_backup
)
# update currently loaded state
currently_loaded_state = state_name
return (
n_selected_indicator,
datashader_plot,
{
"displayModeBar": True,
"modeBarButtonsToRemove": [
"lasso2d",
"zoomInMapbox",
"zoomOutMapbox",
"toggleHover",
],
},
county_top_histogram,
barchart_config,
county_bottom_histogram,
barchart_config,
race_histogram,
barchart_config,
(coordinates_4326_backup, position_backup),
)
app.run_server(debug=False) | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.