repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/cuvs/docs | rapidsai_public_repos/cuvs/docs/source/conf.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION.
import os
import sys
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath("sphinxext"))
from github_link import make_linkcode_resolve # noqa
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"numpydoc",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.linkcode",
"IPython.sphinxext.ipython_console_highlighting",
"IPython.sphinxext.ipython_directive",
"breathe",
"recommonmark",
"sphinx_markdown_tables",
"sphinx_copybutton"
]
breathe_default_project = "RAFT"
breathe_projects = {
"RAFT": "../../cpp/doxygen/_xml/",
}
ipython_mplbackend = "str"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# generate autosummary even if no references
# autosummary_generate = True
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = {".rst": "restructuredtext", ".md": "markdown"}
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "raft"
copyright = "2023, NVIDIA Corporation"
author = "NVIDIA Corporation"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '24.02'
# The full version, including alpha/beta/rc tags.
release = '24.02.00'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pydata_sphinx_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"external_links": [],
# https://github.com/pydata/pydata-sphinx-theme/issues/1220
"icon_links": [],
"github_url": "https://github.com/rapidsai/raft",
"twitter_url": "https://twitter.com/rapidsai",
"show_toc_level": 1,
"navbar_align": "right",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_js_files = []
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "raftdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "raft.tex", "RAFT Documentation", "NVIDIA Corporation", "manual"),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "raft", "RAFT Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"raft",
"RAFT Documentation",
author,
"raft",
"One line description of project.",
"Miscellaneous",
),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/", None),
}
# Config numpydoc
numpydoc_show_inherited_class_members = False
numpydoc_class_members_toctree = False
def setup(app):
app.add_css_file("references.css")
app.add_css_file("https://docs.rapids.ai/assets/css/custom.css")
app.add_js_file(
"https://docs.rapids.ai/assets/js/custom.js", loading_method="defer"
)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve(
"pylibraft",
"https://github.com/rapidsai/raft"
"raft/blob/{revision}/python/pylibraft"
"{package}/{path}#L{lineno}",
)
# Set the default role for interpreted code (anything surrounded in `single
# backticks`) to be a python object. See
# https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-default_role
default_role = "py:obj"
| 0 |
rapidsai_public_repos/cuvs/docs | rapidsai_public_repos/cuvs/docs/source/build.md | # Installation
RAFT currently provides libraries for C++ and Python. The C++ libraries, including the header-only and optional shared library, can be installed with Conda.
Both the C++ and Python APIs require CMake to build from source.
## Table of Contents
- [Install C++ and Python through Conda](#installing-c-and-python-through-conda)
- [Installing Python through Pip](#installing-python-through-pip)
- [Building C++ and Python from source](#building-c-and-python-from-source)
- [CUDA/GPU requirements](#cudagpu-requirements)
- [Build dependencies](#build-dependencies)
- [Required](#required)
- [Optional](#optional)
- [Conda environment scripts](#conda-environment-scripts)
- [Header-only C++](#header-only-c)
- [C++ shared library](#c-shared-library-optional)
- [ccache and sccache](#ccache-and-sccache)
- [C++ tests](#c-tests)
- [C++ primitives microbenchmarks](#c-primitives-microbenchmarks)
- [Python libraries](#python-libraries)
- [Using CMake directly](#using-cmake-directly)
- [Build documentation](#build-documentation)
- [Using RAFT in downstream projects](#using-raft-c-in-downstream-projects)
- [CMake targets](#cmake-targets)
------
## Installing C++ and Python through Conda
The easiest way to install RAFT is through conda and several packages are provided.
- `libraft-headers` C++ headers
- `libraft` (optional) C++ shared library containing pre-compiled template instantiations and runtime API.
- `pylibraft` (optional) Python library
- `raft-dask` (optional) Python library for deployment of multi-node multi-GPU algorithms that use the RAFT `raft::comms` abstraction layer in Dask clusters.
- `raft-ann-bench` (optional) Benchmarking tool for easily producing benchmarks that compare RAFT's vector search algorithms against other state-of-the-art implementations.
- `raft-ann-bench-cpu` (optional) Reproducible benchmarking tool similar to above, but doesn't require CUDA to be installed on the machine. Can be used to test in environments with competitive CPUs.
Use the following command, depending on your CUDA version, to install all of the RAFT packages with conda (replace `rapidsai` with `rapidsai-nightly` to install more up-to-date but less stable nightly packages). `mamba` is preferred over the `conda` command.
```bash
# for CUDA 11.8
mamba install -c rapidsai -c conda-forge -c nvidia raft-dask pylibraft cuda-version=11.8
```
```bash
# for CUDA 12.0
mamba install -c rapidsai -c conda-forge -c nvidia raft-dask pylibraft cuda-version=12.0
```
Note that the above commands will also install `libraft-headers` and `libraft`.
You can also install the conda packages individually using the `mamba` command above. For example, if you'd like to install RAFT's headers and pre-compiled shared library to use in your project:
```bash
# for CUDA 12.0
mamba install -c rapidsai -c conda-forge -c nvidia libraft libraft-headers cuda-version=12.0
```
If installing the C++ APIs Please see [using libraft](https://docs.rapids.ai/api/raft/nightly/using_libraft/) for more information on using the pre-compiled shared library. You can also refer to the [example C++ template project](https://github.com/rapidsai/raft/tree/branch-24.02/cpp/template) for a ready-to-go CMake configuration that you can drop into your project and build against installed RAFT development artifacts above.
## Installing Python through Pip
`pylibraft` and `raft-dask` both have packages that can be [installed through pip](https://rapids.ai/pip.html#install).
For CUDA 11 packages:
```bash
pip install pylibraft-cu11 --extra-index-url=https://pypi.nvidia.com
pip install raft-dask-cu11 --extra-index-url=https://pypi.nvidia.com
```
And CUDA 12 packages:
```bash
pip install pylibraft-cu12 --extra-index-url=https://pypi.nvidia.com
pip install raft-dask-cu12 --extra-index-url=https://pypi.nvidia.com
```
These packages statically build RAFT's pre-compiled instantiations, so the C++ headers and pre-compiled shared library won't be readily available to use in your code.
## Building C++ and Python from source
### CUDA/GPU Requirements
- cmake 3.26.4+
- GCC 9.3+ (9.5.0+ recommended)
- CUDA Toolkit 11.2+
- NVIDIA driver 450.80.02+
- Pascal architecture or better (compute capability >= 6.0)
### Build Dependencies
In addition to the libraries included with cudatoolkit 11.0+, there are some other dependencies below for building RAFT from source. Many of the dependencies are optional and depend only on the primitives being used. All of these can be installed with cmake or [rapids-cpm](https://github.com/rapidsai/rapids-cmake#cpm) and many of them can be installed with [conda](https://anaconda.org).
#### Required
- [RMM](https://github.com/rapidsai/rmm) corresponding to RAFT version.
- [Thrust](https://github.com/NVIDIA/thrust) v1.17 / [CUB](https://github.com/NVIDIA/cub)
- [cuCollections](https://github.com/NVIDIA/cuCollections) - Used in `raft::sparse::distance` API.
- [CUTLASS](https://github.com/NVIDIA/cutlass) v2.9.1 - Used in `raft::distance` API.
#### Optional
- [NCCL](https://github.com/NVIDIA/nccl) - Used in `raft::comms` API and needed to build `raft-dask`.
- [UCX](https://github.com/openucx/ucx) - Used in `raft::comms` API and needed to build `raft-dask`.
- [Googletest](https://github.com/google/googletest) - Needed to build tests
- [Googlebench](https://github.com/google/benchmark) - Needed to build benchmarks
- [Doxygen](https://github.com/doxygen/doxygen) - Needed to build docs
#### Conda environment scripts
Conda environment scripts are provided for installing the necessary dependencies to build both the C++ and Python libraries from source. It is preferred to use `mamba`, as it provides significant speedup over `conda`:
```bash
mamba env create --name rapids_raft -f conda/environments/all_cuda-120_arch-x86_64.yaml
mamba activate rapids_raft
```
All of RAFT's C++ APIs can be used header-only and optional pre-compiled shared libraries provide some host-accessible runtime APIs and template instantiations to accelerate compile times.
The process for building from source with CUDA 11 differs slightly in that your host system will also need to have CUDA toolkit installed which is greater than, or equal to, the version you install into you conda environment. Installing CUDA toolkit into your host system is necessary because `nvcc` is not provided with Conda's cudatoolkit dependencies for CUDA 11. The following example will install create and install dependencies for a CUDA 11.8 conda environment
```bash
mamba env create --name rapids_raft -f conda/environments/all_cuda-118_arch-x86_64.yaml
mamba activate rapids_raft
```
The recommended way to build and install RAFT from source is to use the `build.sh` script in the root of the repository. This script can build both the C++ and Python artifacts and provides CMake options for building and installing the headers, tests, benchmarks, and the pre-compiled shared library.
### Header-only C++
`build.sh` uses [rapids-cmake](https://github.com/rapidsai/rapids-cmake), which will automatically download any dependencies which are not already installed. It's important to note that while all the headers will be installed and available, some parts of the RAFT API depend on libraries like CUTLASS, which will need to be explicitly enabled in `build.sh`.
The following example will download the needed dependencies and install the RAFT headers into `$INSTALL_PREFIX/include/raft`.
```bash
./build.sh libraft
```
The `-n` flag can be passed to just have the build download the needed dependencies. Since RAFT's C++ headers are primarily used during build-time in downstream projects, the dependencies will never be installed by the RAFT build.
```bash
./build.sh libraft -n
```
Once installed, `libraft` headers (and dependencies which were downloaded and installed using `rapids-cmake`) can be uninstalled also using `build.sh`:
```bash
./build.sh libraft --uninstall
```
### C++ Shared Library (optional)
A shared library can be built for speeding up compile times. The shared library also contains a runtime API that allows you to invoke RAFT APIs directly from C++ source files (without `nvcc`). The shared library can also significantly improve re-compile times both while developing RAFT and using its APIs to develop applications. Pass the `--compile-lib` flag to `build.sh` to build the library:
```bash
./build.sh libraft --compile-lib
```
In above example the shared library is installed by default into `$INSTALL_PREFIX/lib`. To disable this, pass `-n` flag.
Once installed, the shared library, headers (and any dependencies downloaded and installed via `rapids-cmake`) can be uninstalled using `build.sh`:
```bash
./build.sh libraft --uninstall
```
### ccache and sccache
`ccache` and `sccache` can be used to better cache parts of the build when rebuilding frequently, such as when working on a new feature. You can also use `ccache` or `sccache` with `build.sh`:
```bash
./build.sh libraft --cache-tool=ccache
```
### C++ Tests
Compile the tests using the `tests` target in `build.sh`.
```bash
./build.sh libraft tests
```
Test compile times can be improved significantly by using the optional shared libraries. If installed, they will be used automatically when building the tests but `--compile-libs` can be used to add additional compilation units and compile them with the tests.
```bash
./build.sh libraft tests --compile-lib
```
The tests are broken apart by algorithm category, so you will find several binaries in `cpp/build/` named `*_TEST`.
For example, to run the distance tests:
```bash
./cpp/build/DISTANCE_TEST
```
It can take sometime to compile all of the tests. You can build individual tests by providing a semicolon-separated list to the `--limit-tests` option in `build.sh`:
```bash
./build.sh libraft tests -n --limit-tests=NEIGHBORS_TEST;DISTANCE_TEST;MATRIX_TEST
```
### C++ Primitives Microbenchmarks
The benchmarks are broken apart by algorithm category, so you will find several binaries in `cpp/build/` named `*_PRIMS_BENCH`.
```bash
./build.sh libraft bench-prims
```
It can take sometime to compile all of the benchmarks. You can build individual benchmarks by providing a semicolon-separated list to the `--limit-bench-prims` option in `build.sh`:
```bash
./build.sh libraft bench-prims -n --limit-bench=NEIGHBORS_PRIMS_BENCH;DISTANCE_PRIMS_BENCH;LINALG_PRIMS_BENCH
```
In addition to microbenchmarks for individual primitives, RAFT contains a reproducible benchmarking tool for evaluating the performance of RAFT's vector search algorithms against the existing state-of-the-art. Please refer to the [RAFT ANN Benchmarks](https://docs.rapids.ai/api/raft/nightly/raft_ann_benchmarks/) guide for more information on this tool.
### Python libraries
The Python libraries can be built and installed using the `build.sh` script:
```bash
# to build pylibraft
./build.sh libraft pylibraft --compile-lib
# to build raft-dask (depends on pylibraft)
./build.sh libraft pylibraft raft-dask --compile-lib
```
`setup.py` can also be used to build the Python libraries manually:
```bash
cd python/raft-dask
python setup.py build_ext --inplace
python setup.py install
cd python/pylibraft
python setup.py build_ext --inplace
python setup.py install
```
Python tests are automatically installed with the corresponding libraries. To run Python tests:
```bash
cd python/raft-dask
py.test -s -v
cd python/pylibraft
py.test -s -v
```
The Python packages can also be uninstalled using the `build.sh` script:
```bash
./build.sh pylibraft raft-dask --uninstall
```
### Using CMake directly
When building RAFT from source, the `build.sh` script offers a nice wrapper around the `cmake` commands to ease the burdens of manually configuring the various available cmake options. When more fine-grained control over the CMake configuration is desired, the `cmake` command can be invoked directly as the below example demonstrates.
The `CMAKE_INSTALL_PREFIX` installs RAFT into a specific location. The example below installs RAFT into the current Conda environment:
```bash
cd cpp
mkdir build
cd build
cmake -D BUILD_TESTS=ON -DRAFT_COMPILE_LIBRARY=ON -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX ../
make -j<parallel_level> install
```
RAFT's CMake has the following configurable flags available:
| Flag | Possible Values | Default Value | Behavior |
|---------------------------------|----------------------| --- |------------------------------------------------------------------------------|
| BUILD_TESTS | ON, OFF | ON | Compile Googletests |
| BUILD_PRIMS_BENCH | ON, OFF | OFF | Compile benchmarks |
| BUILD_ANN_BENCH | ON, OFF | OFF | Compile end-to-end ANN benchmarks |
| CUDA_ENABLE_KERNELINFO | ON, OFF | OFF | Enables `kernelinfo` in nvcc. This is useful for `compute-sanitizer` |
| CUDA_ENABLE_LINEINFO | ON, OFF | OFF | Enable the -lineinfo option for nvcc |
| CUDA_STATIC_RUNTIME | ON, OFF | OFF | Statically link the CUDA runtime |
| DETECT_CONDA_ENV | ON, OFF | ON | Enable detection of conda environment for dependencies |
| raft_FIND_COMPONENTS | compiled distributed | | Configures the optional components as a space-separated list |
| RAFT_COMPILE_LIBRARY | ON, OFF | ON if either BUILD_TESTS or BUILD_PRIMS_BENCH is ON; otherwise OFF | Compiles all `libraft` shared libraries (these are required for Googletests) |
| RAFT_ENABLE_CUBLAS_DEPENDENCY | ON, OFF | ON | Link against cublas library in `raft::raft` |
| RAFT_ENABLE_CUSOLVER_DEPENDENCY | ON, OFF | ON | Link against cusolver library in `raft::raft` |
| RAFT_ENABLE_CUSPARSE_DEPENDENCY | ON, OFF | ON | Link against cusparse library in `raft::raft` |
| RAFT_ENABLE_CUSOLVER_DEPENDENCY | ON, OFF | ON | Link against curand library in `raft::raft` |
| RAFT_NVTX | ON, OFF | OFF | Enable NVTX Markers |
### Build documentation
The documentation requires that the C++ and Python libraries have been built and installed. The following will build the docs along with the C++ and Python packages:
```
./build.sh libraft pylibraft raft-dask docs --compile-lib
```
## Using RAFT C++ in downstream projects
There are a few different strategies for including RAFT in downstream projects, depending on whether the [required build dependencies](#build-dependencies) have already been installed and are available on the `lib` and `include` search paths.
When using the GPU parts of RAFT, you will need to enable CUDA support in your CMake project declaration:
```cmake
project(YOUR_PROJECT VERSION 0.1 LANGUAGES CXX CUDA)
```
Note that some additional compiler flags might need to be added when building against RAFT. For example, if you see an error like this `The experimental flag '--expt-relaxed-constexpr' can be used to allow this.`. The necessary flags can be set with CMake:
```cmake
target_compile_options(your_target_name PRIVATE $<$<COMPILE_LANGUAGE:CUDA>:--expt-extended-lambda --expt-relaxed-constexpr>)
```
Further, it's important that the language level be set to at least C++ 17. This can be done with cmake:
```cmake
set_target_properties(your_target_name
PROPERTIES CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON)
```
The [C++ example template project](https://github.com/rapidsai/raft/tree/HEAD/cpp/template) provides an end-to-end buildable example of what a `CMakeLists.txt` that uses RAFT should look like. The items below point out some of the needed details.
#### CMake Targets
The `raft::raft` CMake target is made available when including RAFT into your CMake project but additional CMake targets can be made available by adding to the `COMPONENTS` option in CMake's `find_package(raft)` (refer to [CMake docs](https://cmake.org/cmake/help/latest/command/find_package.html#basic-signature) to learn more). The components should be separated by spaces. The `raft::raft` target will always be available. Note that the `distributed` component also exports additional dependencies.
| Component | Target | Description | Base Dependencies |
|-------------|---------------------|----------------------------------------------------------|----------------------------------------|
| n/a | `raft::raft` | Full RAFT header library | CUDA toolkit, RMM, NVTX, CCCL, CUTLASS |
| compiled | `raft::compiled` | Pre-compiled template instantiations and runtime library | raft::raft |
| distributed | `raft::distributed` | Dependencies for `raft::comms` APIs | raft::raft, UCX, NCCL | 0 |
rapidsai_public_repos/cuvs/docs | rapidsai_public_repos/cuvs/docs/source/index.rst | RAPIDS RAFT: Reusable Accelerated Functions and Tools for Vector Search and More
================================================================================
.. image:: ../../img/raft-tech-stack-vss.png
:width: 800
:alt: RAFT Tech Stack
Useful Resources
################
.. _raft_reference: https://docs.rapids.ai/api/raft/stable/
- `Example Notebooks <https://github.com/rapidsai/raft/tree/HEAD/notebooks>`_: Example Jupyter notebooks
- `RAPIDS Community <https://rapids.ai/community.html>`_: Get help, contribute, and collaborate.
- `GitHub repository <https://github.com/rapidsai/raft>`_: Download the RAFT source code.
- `Issue tracker <https://github.com/rapidsai/raft/issues>`_: Report issues or request features.
What is RAFT?
#############
RAFT contains fundamental widely-used algorithms and primitives for machine learning and information retrieval. The algorithms are CUDA-accelerated and form building blocks for more easily writing high performance applications.
By taking a primitives-based approach to algorithm development, RAFT
- accelerates algorithm construction time
- reduces the maintenance burden by maximizing reuse across projects, and
- centralizes core reusable computations, allowing future optimizations to benefit all algorithms that use them.
While not exhaustive, the following general categories help summarize the accelerated building blocks that RAFT contains:
.. list-table::
:widths: 25 50
:header-rows: 1
* - Category
- Examples
* - Nearest Neighbors
- pairwise distances, vector search, epsilon neighborhoods, neighborhood graph construction
* - Data Formats
- sparse & dense, conversions, data generation
* - Dense Operations
- linear algebra, matrix and vector operations, slicing, norms, factorization, least squares, svd & eigenvalue problems
* - Sparse Operations
- linear algebra, eigenvalue problems, slicing, norms, reductions, factorization, symmetrization, components & labeling
* - Basic Clustering
- spectral clustering, hierarchical clustering, k-means
* - Solvers
- combinatorial optimization, iterative solvers
* - Statistics
- sampling, moments and summary statistics, metrics
* - Tools & Utilities
- common utilities for developing CUDA applications, multi-node multi-gpu infrastructure
.. toctree::
:maxdepth: 1
:caption: Contents:
quick_start.md
build.md
cpp_api.rst
pylibraft_api.rst
using_libraft.md
vector_search_tutorial.md
raft_ann_benchmarks.md
raft_dask_api.rst
using_raft_comms.rst
developer_guide.md
contributing.md
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| 0 |
rapidsai_public_repos/cuvs/docs | rapidsai_public_repos/cuvs/docs/source/developer_guide.md | # Developer Guide
## General
Please start by reading the [Contributor Guide](contributing.md).
## Performance
1. In performance critical sections of the code, favor `cudaDeviceGetAttribute` over `cudaDeviceGetProperties`. See corresponding CUDA devblog [here](https://devblogs.nvidia.com/cuda-pro-tip-the-fast-way-to-query-device-properties/) to know more.
2. If an algo requires you to launch GPU work in multiple cuda streams, do not create multiple `raft::resources` objects, one for each such work stream. Instead, use the stream pool configured on the given `raft::resources` instance's `raft::resources::get_stream_from_stream_pool()` to pick up the right cuda stream. Refer to the section on [CUDA Resources](#resource-management) and the section on [Threading](#threading-model) for more details. TIP: use `raft::resources::get_stream_pool_size()` to know how many such streams are available at your disposal.
## Local Development
Developing features and fixing bugs for the RAFT library itself is straightforward and only requires building and installing the relevant RAFT artifacts.
The process for working on a CUDA/C++ feature which might span RAFT and one or more consuming libraries can vary slightly depending on whether the consuming project relies on a source build (as outlined in the [BUILD](BUILD.md#install_header_only_cpp) docs). In such a case, the option `CPM_raft_SOURCE=/path/to/raft/source` can be passed to the cmake of the consuming project in order to build the local RAFT from source. The PR with relevant changes to the consuming project can also pin the RAFT version temporarily by explicitly changing the `FORK` and `PINNED_TAG` arguments to the RAFT branch containing their changes when invoking `find_and_configure_raft`. The pin should be reverted after the changed is merged to the RAFT project and before it is merged to the dependent project(s) downstream.
If building a feature which spans projects and not using the source build in cmake, the RAFT changes (both C++ and Python) will need to be installed into the environment of the consuming project before they can be used. The ideal integration of RAFT into consuming projects will enable both the source build in the consuming project only for this case but also rely on a more stable packaging (such as conda packaging) otherwise.
## Threading Model
With the exception of the `raft::resources`, RAFT algorithms should maintain thread-safety and are, in general,
assumed to be single threaded. This means they should be able to be called from multiple host threads so
long as different instances of `raft::resources` are used.
Exceptions are made for algorithms that can take advantage of multiple CUDA streams within multiple host threads
in order to oversubscribe or increase occupancy on a single GPU. In these cases, the use of multiple host
threads within RAFT algorithms should be used only to maintain concurrency of the underlying CUDA streams.
Multiple host threads should be used sparingly, be bounded, and should steer clear of performing CPU-intensive
computations.
A good example of an acceptable use of host threads within a RAFT algorithm might look like the following
```cpp
#include <raft/core/resources.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/cuda_stream_pool.hpp>
raft::resources res;
...
sync_stream(res);
...
int n_streams = get_stream_pool_size(res);
#pragma omp parallel for num_threads(n_threads)
for(int i = 0; i < n; i++) {
int thread_num = omp_get_thread_num() % n_threads;
cudaStream_t s = get_stream_from_stream_pool(res, thread_num);
... possible light cpu pre-processing ...
my_kernel1<<<b, tpb, 0, s>>>(...);
...
... some possible async d2h / h2d copies ...
my_kernel2<<<b, tpb, 0, s>>>(...);
...
sync_stream(res, s);
... possible light cpu post-processing ...
}
```
In the example above, if there is no CPU pre-processing at the beginning of the for-loop, an event can be registered in
each of the streams within the for-loop to make them wait on the stream from the handle. If there is no CPU post-processing
at the end of each for-loop iteration, `sync_stream(res, s)` can be replaced with a single `sync_stream_pool(res)`
after the for-loop.
To avoid compatibility issues between different threading models, the only threading programming allowed in RAFT is OpenMP.
Though RAFT's build enables OpenMP by default, RAFT algorithms should still function properly even when OpenMP has been
disabled. If the CPU pre- and post-processing were not needed in the example above, OpenMP would not be needed.
The use of threads in third-party libraries is allowed, though they should still avoid depending on a specific OpenMP runtime.
## Public Interface
### General guidelines
Functions exposed via the C++ API must be stateless. Things that are OK to be exposed on the interface:
1. Any [POD](https://en.wikipedia.org/wiki/Passive_data_structure) - see [std::is_pod](https://en.cppreference.com/w/cpp/types/is_pod) as a reference for C++11 POD types.
2. `raft::resources` - since it stores resource-related state which has nothing to do with model/algo state.
3. Avoid using pointers to POD types (explicitly putting it out, even though it can be considered as a POD) and pass the structures by reference instead.
Internal to the C++ API, these stateless functions are free to use their own temporary classes, as long as they are not exposed on the interface.
4. Accept single- (`raft::span`) and multi-dimensional views (`raft::mdspan`) and validate their metadata wherever possible.
5. Prefer `std::optional` for any optional arguments (e.g. do not accept `nullptr`)
6. All public APIs should be lightweight wrappers around calls to private APIs inside the `detail` namespace.
### API stability
Since RAFT is a core library with multiple consumers, it's important that the public APIs maintain stability across versions and any changes to them are done with caution, adding new functions and deprecating the old functions over a couple releases as necessary.
### Stateless C++ APIs
Using the IVF-PQ algorithm as an example, the following way of exposing its API would be wrong according to the guidelines in this section, since it exposes a non-POD C++ class object in the C++ API:
```cpp
template <typename value_t, typename idx_t>
class ivf_pq {
ivf_pq_params params_;
raft::resources const& res_;
public:
ivf_pq(raft::resources const& res);
void train(raft::device_matrix<value_t, idx_t, raft::row_major> dataset);
void search(raft::device_matrix<value_t, idx_t, raft::row_major> queries,
raft::device_matrix<value_t, idx_t, raft::row_major> out_inds,
raft::device_matrix<value_t, idx_t, raft::row_major> out_dists);
};
```
An alternative correct way to expose this could be:
```cpp
namespace raft::ivf_pq {
template<typename value_t, typename value_idx>
void ivf_pq_train(raft::resources const& res, const raft::ivf_pq_params ¶ms, raft::ivf_pq_index &index,
raft::device_matrix<value_t, idx_t, raft::row_major> dataset);
template<typename value_t, typename value_idx>
void ivf_pq_search(raft::resources const& res, raft::ivf_pq_params const¶ms, raft::ivf_pq_index const & index,
raft::device_matrix<value_t, idx_t, raft::row_major> queries,
raft::device_matrix<value_t, idx_t, raft::row_major> out_inds,
raft::device_matrix<value_t, idx_t, raft::row_major> out_dists);
}
```
### Other functions on state
These guidelines also mean that it is the responsibility of C++ API to expose methods to load and store (aka marshalling) such a data structure. Further continuing the IVF-PQ example, the following methods could achieve this:
```cpp
namespace raft::ivf_pq {
void save(raft::ivf_pq_index const& model, std::ostream &os);
void load(raft::ivf_pq_index& model, std::istream &is);
}
```
## Coding style
### Code Formatting
#### Using pre-commit hooks
RAFT uses [pre-commit](https://pre-commit.com/) to execute all code linters and formatters. These
tools ensure a consistent code format throughout the project. Using pre-commit ensures that linter
versions and options are aligned for all developers. Additionally, there is a CI check in place to
enforce that committed code follows our standards.
To use `pre-commit`, install via `conda` or `pip`:
```bash
conda install -c conda-forge pre-commit
```
```bash
pip install pre-commit
```
Then run pre-commit hooks before committing code:
```bash
pre-commit run
```
By default, pre-commit runs on staged files (only changes and additions that will be committed).
To run pre-commit checks on all files, execute:
```bash
pre-commit run --all-files
```
Optionally, you may set up the pre-commit hooks to run automatically when you make a git commit. This can be done by running:
```bash
pre-commit install
```
Now code linters and formatters will be run each time you commit changes.
You can skip these checks with `git commit --no-verify` or with the short version `git commit -n`.
#### Summary of pre-commit hooks
The following section describes some of the core pre-commit hooks used by the repository.
See `.pre-commit-config.yaml` for a full list.
C++/CUDA is formatted with [`clang-format`](https://clang.llvm.org/docs/ClangFormat.html).
RAFT relies on `clang-format` to enforce code style across all C++ and CUDA source code. The coding style is based on the [Google style guide](https://google.github.io/styleguide/cppguide.html#Formatting). The only digressions from this style are the following.
1. Do not split empty functions/records/namespaces.
2. Two-space indentation everywhere, including the line continuations.
3. Disable reflowing of comments.
The reasons behind these deviations from the Google style guide are given in comments [here](https://github.com/rapidsai/raft/blob/branch-24.02/cpp/.clang-format).
[`doxygen`](https://doxygen.nl/) is used as documentation generator and also as a documentation linter.
In order to run doxygen as a linter on C++/CUDA code, run
```bash
./ci/checks/doxygen.sh
```
Python code runs several linters including [Black](https://black.readthedocs.io/en/stable/),
[isort](https://pycqa.github.io/isort/), and [flake8](https://flake8.pycqa.org/en/latest/).
RAFT also uses [codespell](https://github.com/codespell-project/codespell) to find spelling
mistakes, and this check is run as a pre-commit hook. To apply the suggested spelling fixes,
you can run `codespell -i 3 -w .` from the repository root directory.
This will bring up an interactive prompt to select which spelling fixes to apply.
### #include style
[include_checker.py](https://github.com/rapidsai/raft/blob/branch-24.02/cpp/scripts/include_checker.py) is used to enforce the include style as follows:
1. `#include "..."` should be used for referencing local files only. It is acceptable to be used for referencing files in a sub-folder/parent-folder of the same algorithm, but should never be used to include files in other algorithms or between algorithms and the primitives or other dependencies.
2. `#include <...>` should be used for referencing everything else
Manually, run the following to bulk-fix include style issues:
```bash
python ./cpp/scripts/include_checker.py --inplace [cpp/include cpp/test ... list of folders which you want to fix]
```
### Copyright header
[copyright.py](https://github.com/rapidsai/raft/blob/branch-24.02/ci/checks/copyright.py) checks the Copyright header for all git-modified files
Manually, you can run the following to bulk-fix the header if only the years need to be updated:
```bash
python ./ci/checks/copyright.py --update-current-year
```
Keep in mind that this only applies to files tracked by git and having been modified.
## Error handling
Call CUDA APIs via the provided helper macros `RAFT_CUDA_TRY`, `RAFT_CUBLAS_TRY` and `RAFT_CUSOLVER_TRY`. These macros take care of checking the return values of the used API calls and generate an exception when the command is not successful. If you need to avoid an exception, e.g. inside a destructor, use `RAFT_CUDA_TRY_NO_THROW`, `RAFT_CUBLAS_TRY_NO_THROW ` and `RAFT_CUSOLVER_TRY_NO_THROW`. These macros log the error but do not throw an exception.
## Logging
### Introduction
Anything and everything about logging is defined inside [logger.hpp](https://github.com/rapidsai/raft/blob/branch-24.02/cpp/include/raft/core/logger.hpp). It uses [spdlog](https://github.com/gabime/spdlog) underneath, but this information is transparent to all.
### Usage
```cpp
#include <raft/core/logger.hpp>
// Inside your method or function, use any of these macros
RAFT_LOG_TRACE("Hello %s!", "world");
RAFT_LOG_DEBUG("Hello %s!", "world");
RAFT_LOG_INFO("Hello %s!", "world");
RAFT_LOG_WARN("Hello %s!", "world");
RAFT_LOG_ERROR("Hello %s!", "world");
RAFT_LOG_CRITICAL("Hello %s!", "world");
```
### Changing logging level
There are 7 logging levels with each successive level becoming quieter:
1. RAFT_LEVEL_TRACE
2. RAFT_LEVEL_DEBUG
3. RAFT_LEVEL_INFO
4. RAFT_LEVEL_WARN
5. RAFT_LEVEL_ERROR
6. RAFT_LEVEL_CRITICAL
7. RAFT_LEVEL_OFF
Pass one of these as per your needs into the `set_level()` method as follows:
```cpp
raft::logger::get().set_level(RAFT_LEVEL_WARN);
// From now onwards, this will print only WARN and above kind of messages
```
### Changing logging pattern
Pass the [format string](https://github.com/gabime/spdlog/wiki/3.-Custom-formatting) as follows in order use a different logging pattern than the default.
```cpp
raft::logger::get.set_pattern(YourFavoriteFormat);
```
One can also use the corresponding `get_pattern()` method to know the current format as well.
### Temporarily changing the logging pattern
Sometimes, we need to temporarily change the log pattern (eg: for reporting decision tree structure). This can be achieved in a RAII-like approach as follows:
```cpp
{
PatternSetter _(MyNewTempFormat);
// new log format is in effect from here onwards
doStuff();
// once the above temporary object goes out-of-scope, the old format will be restored
}
```
### Tips
* Do NOT end your logging messages with a newline! It is automatically added by spdlog.
* The `RAFT_LOG_TRACE()` is by default not compiled due to the `RAFT_ACTIVE_LEVEL` macro setup, for performance reasons. If you need it to be enabled, change this macro accordingly during compilation time
## Common Design Considerations
1. Use the `hpp` extension for files which can be compiled with `gcc` against the CUDA-runtime. Use the `cuh` extension for files which require `nvcc` to be compiled. `hpp` can also be used for functions marked `__host__ __device__` only if proper checks are in place to remove the `__device__` designation when not compiling with `nvcc`.
2. When additional classes, structs, or general POCO types are needed to be used for representing data in the public API, place them in a new file called `<primitive_name>_types.hpp`. This tells users they are safe to expose these types on their own public APIs without bringing in device code. At a minimum, the definitions for these types, at least, should not require `nvcc`. In general, these classes should only store very simple state and should not perform their own computations. Instead, new functions should be exposed on the public API which accept these objects, reading or updating their state as necessary.
3. Documentation for public APIs should be well documented, easy to use, and it is highly preferred that they include usage instructions.
4. Before creating a new primitive, check to see if one exists already. If one exists but the API isn't flexible enough to include your use-case, consider first refactoring the existing primitive. If that is not possible without an extreme number of changes, consider how the public API could be made more flexible. If the new primitive is different enough from all existing primitives, consider whether an existing public API could invoke the new primitive as an option or argument. If the new primitive is different enough from what exists already, add a header for the new public API function to the appropriate subdirectory and namespace.
## Header organization of expensive function templates
RAFT is a heavily templated library. Several core functions are expensive to compile and we want to prevent duplicate compilation of this functionality. To limit build time, RAFT provides a precompiled library (libraft.so) where expensive function templates are instantiated for the most commonly used template parameters. To prevent (1) accidental instantiation of these templates and (2) unnecessary dependency on the internals of these templates, we use a split header structure and define macros to control template instantiation. This section describes the macros and header structure.
**Macros.** We define the macros `RAFT_COMPILED` and `RAFT_EXPLICIT_INSTANTIATE_ONLY`. The `RAFT_COMPILED` macro is defined by `CMake` when compiling code that (1) is part of `libraft.so` or (2) is linked with `libraft.so`. It indicates that a precompiled `libraft.so` is present at runtime.
The `RAFT_EXPLICIT_INSTANTIATE_ONLY` macro is defined by `CMake` during compilation of `libraft.so` itself. When defined, it indicates that implicit instantiations of expensive function templates are forbidden (they result in a compiler error). In the RAFT project, we additionally define this macro during compilation of the tests and benchmarks.
Below, we summarize which combinations of `RAFT_COMPILED` and `RAFT_EXPLICIT_INSTANTIATE_ONLY` are used in practice and what the effect of the combination is.
| RAFT_COMPILED | RAFT_EXPLICIT_INSTANTIATE_ONLY | Which targets |
|---------------|--------------------------------|------------------------------------------------------------------------------------------------------|
| defined | defined | `raft::compiled`, RAFT tests, RAFT benchmarks |
| defined | | Downstream libraries depending on `libraft` like cuML, cuGraph. |
| | | Downstream libraries depending on `libraft-headers` like cugraph-ops. |
| RAFT_COMPILED | RAFT_EXPLICIT_INSTANTIATE_ONLY | Effect |
|---------------|--------------------------------|-------------------------------------------------------------------------------------------------------|
| defined | defined | Templates are precompiled. Compiler error on accidental instantiation of expensive function template. |
| defined | | Templates are precompiled. Implicit instantiation allowed. |
| | | Nothing precompiled. Implicit instantiation allowed. |
| | defined | Avoid this: nothing precompiled. Compiler error on any instantiation of expensive function template. |
**Header organization.** Any header file that defines an expensive function template (say `expensive.cuh`) should be split in three parts: `expensive.cuh`, `expensive-inl.cuh`, and `expensive-ext.cuh`. The file `expensive-inl.cuh` ("inl" for "inline") contains the template definitions, i.e., the actual code. The file `expensive.cuh` includes one or both of the other two files, depending on the values of the `RAFT_COMPILED` and `RAFT_EXPLICIT_INSTANTIATE_ONLY` macros. The file `expensive-ext.cuh` contains `extern template` instantiations. In addition, if `RAFT_EXPLICIT_INSTANTIATE_ONLY` is set, it contains template definitions to ensure that a compiler error is raised in case of accidental instantiation.
The dispatching by `expensive.cuh` is performed as follows:
``` c++
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
// If implicit instantiation is allowed, include template definitions.
#include "expensive-inl.cuh"
#endif
#ifdef RAFT_COMPILED
// Include extern template instantiations when RAFT is compiled.
#include "expensive-ext.cuh"
#endif
```
The file `expensive-inl.cuh` is unchanged:
``` c++
namespace raft {
template <typename T>
void expensive(T arg) {
// .. function body
}
} // namespace raft
```
The file `expensive-ext.cuh` contains the following:
``` c++
#include <raft/util/raft_explicit.cuh> // RAFT_EXPLICIT
#ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY
namespace raft {
// (1) define templates to raise an error in case of accidental instantiation
template <typename T> void expensive(T arg) RAFT_EXPLICIT;
} // namespace raft
#endif //RAFT_EXPLICIT_INSTANTIATE_ONLY
// (2) Provide extern template instantiations.
extern template void raft::expensive<int>(int);
extern template void raft::expensive<float>(float);
```
This header has two responsibilities: (1) define templates to raise an error in case of accidental instantiation and (2) provide `extern template` instantiations.
First, if `RAFT_EXPLICIT_INSTANTIATE_ONLY` is set, `expensive` is defined. This is done for two reasons: (1) to give a definition, because the definition in `expensive-inl.cuh` was skipped and (2) to indicate that the template should be explicitly instantiated by taging it with the `RAFT_EXPLICIT` macro. This macro defines the function body, and it ensures that an informative error message is generated when an implicit instantiation erroneously occurs. Finally, the `extern template` instantiations are listed.
To actually generate the code for the template instances, the file `src/expensive.cu` contains the following. Note that the only difference between the extern template instantiations in `expensive-ext.cuh` and these lines are the removal of the word `extern`:
``` c++
#include <raft/expensive-inl.cuh>
template void raft::expensive<int>(int);
template void raft::expensive<float>(float);
```
**Design considerations**:
1. In the `-ext.cuh` header, do not include implementation headers. Only include function parameter types and types that are used to instantiate the templates. If a primitive takes custom parameter types, define them in a separate header called `<primitive_name>_types.hpp`. (see [Common Design Considerations](https://github.com/rapidsai/raft/blob/7b065aff81a0b1976e2a9e2f3de6690361a1111b/docs/source/developer_guide.md#common-design-considerations)).
2. Keep docstrings in the `-inl.cuh` header, as it is closer to the code. Remove docstrings from template definitions in the `-ext.cuh` header. Make sure to explicitly include public APIs in the RAFT API docs. That is, add `#include <raft/expensive.cuh>` to the docs in `docs/source/cpp_api/expensive.rst` (instead of `#include <raft/expensive-inl.cuh>`).
3. The order of inclusion in `expensive.cuh` is extremely important. If `RAFT_EXPLICIT_INSTANTIATE_ONLY` is not defined, but `RAFT_COMPILED` is defined, then we must include the template definitions before the `extern template` instantiations.
4. If a header file defines multiple expensive templates, it can be that one of them is not instantiated. In this case, **do define** the template with `RAFT_EXPLICIT` in the `-ext` header. This way, when the template is instantiated, the developer gets a helpful error message instead of a confusing "function not found".
This header structure was proposed in [issue #1416](https://github.com/rapidsai/raft/issues/1416), which contains more background on the motivation of this structure and the mechanics of C++ template instantiation.
## Testing
It's important for RAFT to maintain a high test coverage of the public APIs in order to minimize the potential for downstream projects to encounter unexpected build or runtime behavior as a result of changes.
A well-defined public API can help maintain compile-time stability but means more focus should be placed on testing the functional requirements and verifying execution on the various edge cases within RAFT itself. Ideally, bug fixes and new features should be able to be made to RAFT independently of the consuming projects.
## Documentation
Public APIs always require documentation since those will be exposed directly to users. For C++, we use [doxygen](http://www.doxygen.nl) and for Python/cython we use [pydoc](https://docs.python.org/3/library/pydoc.html). In addition to summarizing the purpose of each class / function in the public API, the arguments (and relevant templates) should be documented along with brief usage examples.
## Asynchronous operations and stream ordering
All RAFT algorithms should be as asynchronous as possible avoiding the use of the default stream (aka as NULL or `0` stream). Implementations that require only one CUDA Stream should use the stream from `raft::resources`:
```cpp
#include <raft/core/resources.hpp>
#include <raft/core/resource/cuda_stream.hpp>
void foo(const raft::resources& res, ...)
{
cudaStream_t stream = get_cuda_stream(res);
}
```
When multiple streams are needed, e.g. to manage a pipeline, use the internal streams available in `raft::resources` (see [CUDA Resources](#cuda-resources)). If multiple streams are used all operations still must be ordered according to `raft::resource::get_cuda_stream()` (from `raft/core/resource/cuda_stream.hpp`). Before any operation in any of the internal CUDA streams is started, all previous work in `raft::resource::get_cuda_stream()` must have completed. Any work enqueued in `raft::resource::get_cuda_stream()` after a RAFT function returns should not start before all work enqueued in the internal streams has completed. E.g. if a RAFT algorithm is called like this:
```cpp
#include <raft/core/resources.hpp>
#include <raft/core/resource/cuda_stream.hpp>
void foo(const double* srcdata, double* result)
{
cudaStream_t stream;
CUDA_RT_CALL( cudaStreamCreate( &stream ) );
raft::resources res;
set_cuda_stream(res, stream);
...
RAFT_CUDA_TRY( cudaMemcpyAsync( srcdata, h_srcdata.data(), n*sizeof(double), cudaMemcpyHostToDevice, stream ) );
raft::algo(raft::resources, dopredict, srcdata, result, ... );
RAFT_CUDA_TRY( cudaMemcpyAsync( h_result.data(), result, m*sizeof(int), cudaMemcpyDeviceToHost, stream ) );
...
}
```
No work in any stream should start in `raft::algo` before the `cudaMemcpyAsync` in `stream` launched before the call to `raft::algo` is done. And all work in all streams used in `raft::algo` should be done before the `cudaMemcpyAsync` in `stream` launched after the call to `raft::algo` starts.
This can be ensured by introducing interstream dependencies with CUDA events and `cudaStreamWaitEvent`. For convenience, the header `raft/core/device_resources.hpp` provides the class `raft::stream_syncer` which lets all `raft::resources` internal CUDA streams wait on `raft::resource::get_cuda_stream()` in its constructor and in its destructor and lets `raft::resource::get_cuda_stream()` wait on all work enqueued in the `raft::resources` internal CUDA streams. The intended use would be to create a `raft::stream_syncer` object as the first thing in an entry function of the public RAFT API:
```cpp
namespace raft {
void algo(const raft::resources& res, ...)
{
raft::streamSyncer _(res);
}
}
```
This ensures the stream ordering behavior described above.
### Using Thrust
To ensure that thrust algorithms are executed in the intended stream the `thrust::cuda::par` execution policy should be used. To ensure that thrust algorithms allocate temporary memory via the provided device memory allocator, use the `rmm::exec_policy` available in `raft/core/resource/thrust_policy.hpp`, which can be used through `raft::resources`:
```cpp
#include <raft/core/resources.hpp>
#include <raft/core/resource/thrust_policy.hpp>
void foo(const raft::resources& res, ...)
{
auto execution_policy = get_thrust_policy(res);
thrust::for_each(execution_policy, ... );
}
```
## Resource Management
Do not create reusable CUDA resources directly in implementations of RAFT algorithms. Instead, use the existing resources in `raft::resources` to avoid constant creation and deletion of reusable resources such as CUDA streams, CUDA events or library handles. Please file a feature request if a resource handle is missing in `raft::resources`.
The resources can be obtained like this
```cpp
#include <raft/core/resources.hpp>
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream_pool.hpp>
void foo(const raft::resources& h, ...)
{
cublasHandle_t cublasHandle = get_cublas_handle(h);
const int num_streams = get_stream_pool_size(h);
const int stream_idx = ...
cudaStream_t stream = get_stream_from_stream_pool(stream_idx);
...
}
```
The example below shows one way to create `n_stream` number of internal cuda streams with an `rmm::stream_pool` which can later be used by the algos inside RAFT.
```cpp
#include <raft/core/resources.hpp>
#include <raft/core/resource/cuda_stream_pool.hpp>
#include <rmm/cuda_stream_pool.hpp>
int main(int argc, char** argv)
{
int n_streams = argc > 1 ? atoi(argv[1]) : 0;
raft::resources res;
set_cuda_stream_pool(res, std::make_shared<rmm::cuda_stream_pool>(n_streams));
foo(res, ...);
}
```
## Multi-GPU
The multi-GPU paradigm of RAFT is **O**ne **P**rocess per **G**PU (OPG). Each algorithm should be implemented in a way that it can run with a single GPU without any specific dependencies to a particular communication library. A multi-GPU implementation should use the methods offered by the class `raft::comms::comms_t` from [raft/core/comms.hpp] for inter-rank/GPU communication. It is the responsibility of the user of cuML to create an initialized instance of `raft::comms::comms_t`.
E.g. with a CUDA-aware MPI, a RAFT user could use code like this to inject an initialized instance of `raft::comms::mpi_comms` into a `raft::resources`:
```cpp
#include <mpi.h>
#include <raft/core/resources.hpp>
#include <raft/comms/mpi_comms.hpp>
#include <raft/algo.hpp>
...
int main(int argc, char * argv[])
{
MPI_Init(&argc, &argv);
int rank = -1;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
int local_rank = -1;
{
MPI_Comm local_comm;
MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, rank, MPI_INFO_NULL, &local_comm);
MPI_Comm_rank(local_comm, &local_rank);
MPI_Comm_free(&local_comm);
}
cudaSetDevice(local_rank);
mpi_comms raft_mpi_comms;
MPI_Comm_dup(MPI_COMM_WORLD, &raft_mpi_comms);
{
raft::resources res;
initialize_mpi_comms(res, raft_mpi_comms);
...
raft::algo(res, ... );
}
MPI_Comm_free(&raft_mpi_comms);
MPI_Finalize();
return 0;
}
```
A RAFT developer can assume the following:
* A instance of `raft::comms::comms_t` was correctly initialized.
* All processes that are part of `raft::comms::comms_t` call into the RAFT algorithm cooperatively.
The initialized instance of `raft::comms::comms_t` can be accessed from the `raft::resources` instance:
```cpp
#include <raft/core/resources.hpp>
#include <raft/core/resource/comms.hpp>
void foo(const raft::resources& res, ...)
{
const raft::comms_t& communicator = get_comms(res);
const int rank = communicator.get_rank();
const int size = communicator.get_size();
...
}
```
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/_static/references.css |
/* Fix references to not look like parameters */
dl.citation > dt.label {
display: unset !important;
float: left !important;
border: unset !important;
background: unset !important;
padding: unset !important;
margin: unset !important;
font-size: unset !important;
line-height: unset !important;
padding-right: 0.5rem !important;
}
/* Add opening bracket */
dl.citation > dt.label > span::before {
content: "[";
}
/* Add closing bracket */
dl.citation > dt.label > span::after {
content: "]";
} | 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/sphinxext/github_link.py | # This contains code with copyright by the scikit-learn project, subject to the
# license in /thirdparty/LICENSES/LICENSE.scikit_learn
import inspect
import os
import re
import subprocess
import sys
from functools import partial
from operator import attrgetter
orig = inspect.isfunction
# See https://opendreamkit.org/2017/06/09/CythonSphinx/
def isfunction(obj):
orig_val = orig(obj)
new_val = hasattr(type(obj), "__code__")
if (orig_val != new_val):
return new_val
return orig_val
inspect.isfunction = isfunction
REVISION_CMD = 'git rev-parse --short HEAD'
source_regex = re.compile(r"^File: (.*?) \(starting at line ([0-9]*?)\)$",
re.MULTILINE)
def _get_git_revision():
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except (subprocess.CalledProcessError, OSError):
print('Failed to execute git to get revision')
return None
return revision.decode('utf-8')
def _linkcode_resolve(domain, info, package, url_fmt, revision):
"""Determine a link to online source for a class/method/function
This is called by sphinx.ext.linkcode
An example with a long-untouched module that everyone has
>>> _linkcode_resolve('py', {'module': 'tty',
... 'fullname': 'setraw'},
... package='tty',
... url_fmt='http://hg.python.org/cpython/file/'
... '{revision}/Lib/{package}/{path}#L{lineno}',
... revision='xxxx')
'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'
"""
if revision is None:
return
if domain not in ('py', 'pyx'):
return
if not info.get('module') or not info.get('fullname'):
return
class_name = info['fullname'].split('.')[0]
module = __import__(info['module'], fromlist=[class_name])
obj = attrgetter(info['fullname'])(module)
# Unwrap the object to get the correct source
# file in case that is wrapped by a decorator
obj = inspect.unwrap(obj)
fn: str = None
lineno: str = None
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
# Possibly Cython code. Search docstring for source
m = source_regex.search(obj.__doc__)
if (m is not None):
source_file = m.group(1)
lineno = m.group(2)
# fn is expected to be the absolute path.
fn = os.path.relpath(source_file, start=package)
print("{}:{}".format(
os.path.abspath(os.path.join("..", "python", "cuml", fn)),
lineno))
else:
return
else:
# Test if we are absolute or not (pyx are relative)
if (not os.path.isabs(fn)):
# Should be relative to docs right now
fn = os.path.abspath(os.path.join("..", "python", fn))
# Convert to relative from module root
fn = os.path.relpath(fn,
start=os.path.dirname(
__import__(package).__file__))
# Get the line number if we need it. (Can work without it)
if (lineno is None):
try:
lineno = inspect.getsourcelines(obj)[1]
except Exception:
# Can happen if its a cyfunction. See if it has `__code__`
if (hasattr(obj, "__code__")):
lineno = obj.__code__.co_firstlineno
else:
lineno = ''
return url_fmt.format(revision=revision,
package=package,
path=fn,
lineno=lineno)
def make_linkcode_resolve(package, url_fmt):
"""Returns a linkcode_resolve function for the given URL format
revision is a git commit reference (hash or name)
package is the name of the root module of the package
url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
'blob/{revision}/{package}/'
'{path}#L{lineno}')
"""
revision = _get_git_revision()
return partial(_linkcode_resolve,
revision=revision,
package=package,
url_fmt=url_fmt)
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/python_api/distance.rst | Distance
========
This page provides `pylibraft` class references for the publicly-exposed elements of the `pylibraft.distance` package. RAFT's
distances have been highly optimized and support a wide assortment of different distance measures.
.. role:: py(code)
:language: python
:class: highlight
.. autofunction:: pylibraft.distance.pairwise_distance
.. autofunction:: pylibraft.distance.fused_l2_nn_argmin
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/python_api/neighbors.rst | Neighbors
=========
This page provides pylibraft class references for the publicly-exposed elements of the neighbors package.
.. role:: py(code)
:language: python
:class: highlight
Brute Force
###########
.. autofunction:: pylibraft.neighbors.brute_force.knn
CAGRA
#####
.. autoclass:: pylibraft.neighbors.cagra.IndexParams
:members:
.. autofunction:: pylibraft.neighbors.cagra.build
.. autoclass:: pylibraft.neighbors.cagra.SearchParams
:members:
.. autofunction:: pylibraft.neighbors.cagra.search
Serializer Methods
------------------
.. autofunction:: pylibraft.neighbors.cagra.save
.. autofunction:: pylibraft.neighbors.cagra.load
IVF-Flat
########
.. autoclass:: pylibraft.neighbors.ivf_flat.IndexParams
:members:
.. autofunction:: pylibraft.neighbors.ivf_flat.build
.. autofunction:: pylibraft.neighbors.ivf_flat.extend
.. autoclass:: pylibraft.neighbors.ivf_flat.SearchParams
:members:
.. autofunction:: pylibraft.neighbors.ivf_flat.search
Serializer Methods
------------------
.. autofunction:: pylibraft.neighbors.ivf_flat.save
.. autofunction:: pylibraft.neighbors.ivf_flat.load
IVF-PQ
######
.. autoclass:: pylibraft.neighbors.ivf_pq.IndexParams
:members:
.. autofunction:: pylibraft.neighbors.ivf_pq.build
.. autofunction:: pylibraft.neighbors.ivf_pq.extend
.. autoclass:: pylibraft.neighbors.ivf_pq.SearchParams
:members:
.. autofunction:: pylibraft.neighbors.ivf_pq.search
Serializer Methods
------------------
.. autofunction:: pylibraft.neighbors.ivf_pq.save
.. autofunction:: pylibraft.neighbors.ivf_pq.load
Candidate Refinement
--------------------
.. autofunction:: pylibraft.neighbors.refine
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/python_api/matrix.rst | Matrix
======
This page provides `pylibraft` class references for the publicly-exposed elements of the `pylibraft.matrix` package.
.. role:: py(code)
:language: python
:class: highlight
.. autofunction:: pylibraft.matrix.select_k
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/python_api/cluster.rst | Cluster
=======
This page provides pylibraft class references for the publicly-exposed elements of the `pylibraft.cluster` package.
.. role:: py(code)
:language: python
:class: highlight
KMeans
######
.. autoclass:: pylibraft.cluster.kmeans.KMeansParams
:members:
.. autofunction:: pylibraft.cluster.kmeans.fit
.. autofunction:: pylibraft.cluster.kmeans.cluster_cost
.. autofunction:: pylibraft.cluster.kmeans.compute_new_centroids
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/python_api/common.rst | Common
======
This page provides `pylibraft` class references for the publicly-exposed elements of the `pylibraft.common` package.
.. role:: py(code)
:language: python
:class: highlight
Basic Vocabulary
################
.. autoclass:: pylibraft.common.DeviceResources
:members:
.. autoclass:: pylibraft.common.Stream
:members:
.. autoclass:: pylibraft.common.device_ndarray
:members:
Interruptible
#############
.. autofunction:: pylibraft.common.interruptible.cuda_interruptible
.. autofunction:: pylibraft.common.interruptible.synchronize
.. autofunction:: pylibraft.common.interruptible.cuda_yield
CUDA Array Interface Helpers
############################
.. autoclass:: pylibraft.common.cai_wrapper
:members:
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/cluster_kmeans.rst | K-Means
=======
.. role:: py(code)
:language: c++
:class: highlight
``#include <raft/cluster/kmeans.cuh>``
.. doxygennamespace:: raft::cluster::kmeans
:project: RAFT
:members:
:content-only:
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/neighbors_epsilon_neighborhood.rst | Epsilon Neighborhood
====================
.. role:: py(code)
:language: c++
:class: highlight
``#include <raft/neighbors/epsilon_neighborhood.cuh>``
namespace *raft::neighbors::epsilon_neighborhood*
.. doxygengroup:: epsilon_neighbors
:project: RAFT
:members:
:content-only:
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/distance.rst | Distance
========
This page provides C++ class references for the publicly-exposed elements of the `raft/distance` package. RAFT's
distances have been highly optimized and support a wide assortment of different distance measures.
.. role:: py(code)
:language: c++
:class: highlight
Distance Types
--------------
``#include <raft/distance/distance_types.hpp>``
namespace *raft::distance*
.. doxygenenum:: raft::distance::DistanceType
:project: RAFT
.. toctree::
:maxdepth: 2
:caption: Contents:
distance_pairwise.rst
distance_1nn.rst
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/neighbors.rst | Neighbors
=========
This page provides C++ class references for the publicly-exposed elements of the neighbors package.
.. role:: py(code)
:language: c++
:class: highlight
.. toctree::
:maxdepth: 2
:caption: Contents:
neighbors_brute_force.rst
neighbors_ivf_flat.rst
neighbors_ivf_pq.rst
neighbors_epsilon_neighborhood.rst
neighbors_ball_cover.rst
neighbors_cagra.rst | 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/neighbors_ivf_pq.rst | IVF-PQ
======
.. role:: py(code)
:language: c++
:class: highlight
``#include <raft/neighbors/ivf_pq.cuh>``
namespace *raft::neighbors::ivf_pq*
.. doxygengroup:: ivf_pq
:project: RAFT
:members:
:content-only:
Serializer Methods
------------------
``#include <raft/neighbors/ivf_pq_serialize.cuh>``
namespace *raft::neighbors::ivf_pq*
.. doxygengroup:: ivf_pq_serialize
:project: RAFT
:members:
:content-only:
Candidate Refinement
--------------------
``#include <raft/neighbors/refine.cuh>``
namespace *raft::neighbors*
.. doxygengroup:: ann_refine
:project: RAFT
:members:
:content-only:
Helper Methods
--------------
``#include <raft/neighbors/ivf_pq_helpers.cuh>``
namespace *raft::neighbors::ivf_pq::helpers*
.. doxygengroup:: ivf_pq_helpers
:project: RAFT
:members:
:content-only: | 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/stats_clustering.rst | Clustering Model Scoring
========================
.. role:: py(code)
:language: c++
:class: highlight
Adjusted Rand Index
-------------------
``#include <raft/stats/adjusted_rand_index.cuh>``
namespace *raft::stats*
.. doxygengroup:: stats_adj_rand_index
:project: RAFT
:members:
:content-only:
Completeness Score
------------------
``#include <raft/stats/completeness_score.cuh>``
namespace *raft::stats*
.. doxygengroup:: stats_completeness
:project: RAFT
:members:
:content-only:
Cluster Dispersion
------------------
``#include <raft/stats/dispersion.cuh>``
namespace *raft::stats*
.. doxygengroup:: stats_cluster_dispersion
:project: RAFT
:members:
:content-only:
Rand Index
----------
``#include <raft/stats/rand_index.cuh>``
namespace *raft::stats*
.. doxygengroup:: stats_rand_index
:project: RAFT
:members:
:content-only:
Silhouette Score
----------------
``#include <raft/stats/silhouette_score.cuh>``
namespace *raft::stats*
.. doxygengroup:: stats_silhouette_score
:project: RAFT
:members:
:content-only:
V Measure
---------
``#include <raft/stats/v_measure.cuh>``
namespace *raft::stats*
.. doxygengroup:: stats_vmeasure
:project: RAFT
:members:
:content-only:
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/sparse_types.rst | Sparse Types
============
.. role:: py(code)
:language: c++
:class: highlight
``#include <raft/core/sparse_types.hpp>``
.. doxygengroup:: sparse_types
:project: RAFT
:members:
:content-only:
.. toctree::
:maxdepth: 2
:caption: Contents:
sparse_types_coo_matrix.rst
sparse_types_csr_matrix.rst
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/neighbors_ball_cover.rst | Random Ball Cover
=================
.. role:: py(code)
:language: c++
:class: highlight
``#include <raft/neighbors/ball_cover.cuh>``
namespace *raft::neighbors::ball_cover*
.. doxygengroup:: random_ball_cover
:project: RAFT
:members:
:content-only:
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/cluster.rst | Cluster
=======
This page provides C++ API references for the publicly-exposed elements of the `raft/cluster` headers. RAFT provides
fundamental clustering algorithms which are, themselves, considered reusable building blocks for other algorithms.
.. role:: py(code)
:language: c++
:class: highlight
.. toctree::
:maxdepth: 2
:caption: Contents:
cluster_kmeans.rst
cluster_kmeans_balanced.rst
cluster_slhc.rst
cluster_spectral.rst | 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/distance_pairwise.rst | Pairwise Distance
=================
.. role:: py(code)
:language: c++
:class: highlight
``#include <raft/distance/distance.cuh>``
namespace *raft::distance*
.. doxygengroup:: distance_mdspan
:project: RAFT
:members:
:content-only:
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/stats_probability.rst | Probability & Information Theory
================================
.. role:: py(code)
:language: c++
:class: highlight
Contingency Matrix
------------------
``#include <raft/stats/contingency_matrix.cuh>``
namespace *raft::stats*
.. doxygengroup:: contingency_matrix
:project: RAFT
:members:
:content-only:
Entropy
-------
``#include <raft/stats/entropy.cuh>``
namespace *raft::stats*
.. doxygengroup:: stats_entropy
:project: RAFT
:members:
:content-only:
KL-Divergence
-------------
``#include <raft/stats/kl_divergence.cuh>``
namespace *raft::stats*
.. doxygengroup:: kl_divergence
:project: RAFT
:members:
:content-only:
Mutual Information
------------------
``#include <raft/stats/mutual_info_score.cuh>``
namespace *raft::stats*
.. doxygengroup:: stats_mutual_info
:project: RAFT
:members:
:content-only:
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/neighbors_cagra.rst | CAGRA
=====
CAGRA is a graph-based nearest neighbors implementation with state-of-the art query performance for both small- and large-batch sized search.
Please note that the CAGRA implementation is currently experimental and the API is subject to change from release to release. We are currently working on promoting CAGRA to a top-level stable API within RAFT.
.. role:: py(code)
:language: c++
:class: highlight
``#include <raft/neighbors/cagra.cuh>``
namespace *raft::neighbors::cagra*
.. doxygengroup:: cagra
:project: RAFT
:members:
:content-only:
Serializer Methods
------------------
``#include <raft/neighbors/cagra_serialize.cuh>``
namespace *raft::neighbors::cagra*
.. doxygengroup:: cagra_serialize
:project: RAFT
:members:
:content-only:
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/sparse_neighbors.rst | Sparse Neighbors
================
.. doxygennamespace:: raft::sparse::neighbors
:project: RAFT
:members:
:content-only:
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/sparse_distance.rst | Sparse Distance
===============
.. doxygennamespace:: raft::sparse::distance
:project: RAFT
:members:
:content-only:
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/cluster_spectral.rst | Spectral Clustering
===================
.. role:: py(code)
:language: c++
:class: highlight
``#include <raft/spectral/partition.cuh>``
.. doxygennamespace:: raft::spectral
:project: RAFT
:members:
:content-only:
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/distance_1nn.rst | 1-Nearest Neighbors
===================
.. role:: py(code)
:language: c++
:class: highlight
``#include <raft/distance/fused_l2_nn.cuh>``
namespace *raft::distance*
.. doxygengroup:: fused_l2_nn
:project: RAFT
:members:
:content-only:
``#include <raft/distance/fused_l2_nn.cuh>``
namespace *raft::distance*
.. doxygengroup:: masked_nn
:project: RAFT
:members:
:content-only:
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/neighbors_brute_force.rst | Brute-Force
===========
.. role:: py(code)
:language: c++
:class: highlight
``#include <raft/neighbors/brute_force.cuh>``
namespace *raft::neighbors::brute_force*
.. doxygengroup:: brute_force_knn
:project: RAFT
:members:
:content-only:
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/neighbors_ivf_flat.rst | IVF-Flat
========
.. role:: py(code)
:language: c++
:class: highlight
``#include <raft/neighbors/ivf_flat.cuh>``
namespace *raft::neighbors::ivf_flat*
.. doxygengroup:: ivf_flat
:project: RAFT
:members:
:content-only:
Serializer Methods
------------------
``#include <raft/neighbors/ivf_flat_serialize.cuh>``
namespace *raft::neighbors::ivf_flat*
.. doxygengroup:: ivf_flat_serialize
:project: RAFT
:members:
:content-only:
Helper Methods
--------------
``#include <raft/neighbors/ivf_flat_helpers.cuh>``
namespace *raft::neighbors::ivf_flat::helpers*
.. doxygengroup:: ivf_flat_helpers
:project: RAFT
:members:
:content-only: | 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/stats_neighborhood.rst | Neighborhood Model Scoring
==========================
.. role:: py(code)
:language: c++
:class: highlight
Trustworthiness
---------------
``#include <raft/stats/trustworthiness.cuh>``
namespace *raft::stats*
.. doxygengroup:: stats_trustworthiness
:project: RAFT
:members:
:content-only:
Neighborhood Recall
-------------------
``#include <raft/stats/neighborhood_recall.cuh>``
namespace *raft::stats*
.. doxygengroup:: stats_neighborhood_recall
:project: RAFT
:members:
:content-only:
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/cluster_slhc.rst | Hierarchical Clustering
=======================
.. role:: py(code)
:language: c++
:class: highlight
``#include <raft/cluster/single_linkage.cuh>``
.. doxygennamespace:: raft::cluster::hierarchy
:project: RAFT
:members:
:content-only:
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/stats.rst | Stats
=====
This page provides C++ class references for the publicly-exposed elements of the stats package.
.. role:: py(code)
:language: c++
:class: highlight
.. toctree::
:maxdepth: 2
:caption: Contents:
stats_summary.rst
stats_probability.rst
stats_regression.rst
stats_classification.rst
stats_clustering.rst
stats_neighborhood.rst
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/cluster_kmeans_balanced.rst | K-Means
=======
.. role:: py(code)
:language: c++
:class: highlight
``#include <raft/cluster/kmeans_balanced.cuh>``
.. doxygennamespace:: raft::cluster::kmeans_balanced
:project: RAFT
:members:
:content-only:
| 0 |
rapidsai_public_repos/cuvs/docs/source | rapidsai_public_repos/cuvs/docs/source/cpp_api/sparse.rst | Sparse
======
Core to RAFT's computational patterns for sparse data is its vocabulary of sparse types.
.. role:: py(code)
:language: c++
:class: highlight
.. toctree::
:maxdepth: 2
:caption: Contents:
sparse_types.rst
sparse_distance.rst
sparse_linalg.rst
sparse_matrix.rst
sparse_neighbors.rst
sparse_solver.rst
| 0 |
rapidsai_public_repos/cuvs | rapidsai_public_repos/cuvs/ci/test_python.sh | #!/bin/bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
set -euo pipefail
. /opt/conda/etc/profile.d/conda.sh
rapids-logger "Generate Python testing dependencies"
rapids-dependency-file-generator \
--output conda \
--file_key test_python \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml
rapids-mamba-retry env create --force -f env.yaml -n test
# Temporarily allow unbound variables for conda activation.
set +u
conda activate test
set -u
rapids-logger "Downloading artifacts from previous jobs"
CPP_CHANNEL=$(rapids-download-conda-from-s3 cpp)
PYTHON_CHANNEL=$(rapids-download-conda-from-s3 python)
RAPIDS_TESTS_DIR=${RAPIDS_TESTS_DIR:-"${PWD}/test-results"}
RAPIDS_COVERAGE_DIR=${RAPIDS_COVERAGE_DIR:-"${PWD}/coverage-results"}
mkdir -p "${RAPIDS_TESTS_DIR}" "${RAPIDS_COVERAGE_DIR}"
rapids-print-env
rapids-mamba-retry install \
--channel "${CPP_CHANNEL}" \
--channel "${PYTHON_CHANNEL}" \
libcuvs cuvs
rapids-logger "Check GPU usage"
nvidia-smi
EXITCODE=0
trap "EXITCODE=1" ERR
set +e
rapids-logger "pytest cuvs"
pushd python/cuvs/cuvs
pytest \
--cache-clear \
--junitxml="${RAPIDS_TESTS_DIR}/junit-cuvs.xml" \
--cov-config=../.coveragerc \
--cov=cuvs \
--cov-report=xml:"${RAPIDS_COVERAGE_DIR}/cuvs-coverage.xml" \
--cov-report=term \
test
popd
rapids-logger "Test script exiting with value: $EXITCODE"
exit ${EXITCODE}
| 0 |
rapidsai_public_repos/cuvs | rapidsai_public_repos/cuvs/ci/test_cpp.sh | #!/bin/bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
set -euo pipefail
. /opt/conda/etc/profile.d/conda.sh
rapids-logger "Generate C++ testing dependencies"
rapids-dependency-file-generator \
--output conda \
--file_key test_cpp \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch)" | tee env.yaml
rapids-mamba-retry env create --force -f env.yaml -n test
# Temporarily allow unbound variables for conda activation.
set +u
conda activate test
set -u
CPP_CHANNEL=$(rapids-download-conda-from-s3 cpp)
RAPIDS_TESTS_DIR=${RAPIDS_TESTS_DIR:-"${PWD}/test-results"}/
mkdir -p "${RAPIDS_TESTS_DIR}"
rapids-print-env
rapids-mamba-retry install \
--channel "${CPP_CHANNEL}" \
libcuvs libcuvs-tests
rapids-logger "Check GPU usage"
nvidia-smi
EXITCODE=0
trap "EXITCODE=1" ERR
set +e
# Run libcuvs gtests from libcuvs-tests package
cd "$CONDA_PREFIX"/bin/gtests/libcuvs
ctest -j8 --output-on-failure
rapids-logger "Test script exiting with value: $EXITCODE"
exit ${EXITCODE}
| 0 |
rapidsai_public_repos/cuvs | rapidsai_public_repos/cuvs/ci/build_python.sh | #!/bin/bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
set -euo pipefail
source rapids-env-update
export CMAKE_GENERATOR=Ninja
rapids-print-env
rapids-logger "Begin py build"
CPP_CHANNEL=$(rapids-download-conda-from-s3 cpp)
version=$(rapids-generate-version)
git_commit=$(git rev-parse HEAD)
export RAPIDS_PACKAGE_VERSION=${version}
echo "${version}" > VERSION
package_dir="python"
for package_name in cuvs raft-dask; do
underscore_package_name=$(echo "${package_name}" | tr "-" "_")
sed -i "/^__git_commit__/ s/= .*/= \"${git_commit}\"/g" "${package_dir}/${package_name}/${underscore_package_name}/_version.py"
done
# TODO: Remove `--no-test` flags once importing on a CPU
# node works correctly
rapids-conda-retry mambabuild \
--no-test \
--channel "${CPP_CHANNEL}" \
conda/recipes/cuvs
# Build ann-bench for each cuda and python version
rapids-conda-retry mambabuild \
--no-test \
--channel "${CPP_CHANNEL}" \
--channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \
conda/recipes/cuda-ann-bench
# Build ann-bench-cpu only in CUDA 11 jobs since it only depends on python
# version
RAPIDS_CUDA_MAJOR="${RAPIDS_CUDA_VERSION%%.*}"
if [[ ${RAPIDS_CUDA_MAJOR} == "11" ]]; then
rapids-conda-retry mambabuild \
--no-test \
--channel "${CPP_CHANNEL}" \
--channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \
conda/recipes/cuda-ann-bench-cpu
fi
rapids-upload-conda-to-s3 python
| 0 |
rapidsai_public_repos/cuvs | rapidsai_public_repos/cuvs/ci/build_wheel.sh | #!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
set -euo pipefail
package_name=$1
package_dir=$2
underscore_package_name=$(echo "${package_name}" | tr "-" "_")
source rapids-configure-sccache
source rapids-date-string
version=$(rapids-generate-version)
git_commit=$(git rev-parse HEAD)
RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})"
# This is the version of the suffix with a preceding hyphen. It's used
# everywhere except in the final wheel name.
PACKAGE_CUDA_SUFFIX="-${RAPIDS_PY_CUDA_SUFFIX}"
# Patch project metadata files to include the CUDA version suffix and version override.
pyproject_file="${package_dir}/pyproject.toml"
version_file="${package_dir}/${underscore_package_name}/_version.py"
sed -i "s/name = \"${package_name}\"/name = \"${package_name}${PACKAGE_CUDA_SUFFIX}\"/g" ${pyproject_file}
echo "${version}" > VERSION
sed -i "/^__git_commit__ / s/= .*/= \"${git_commit}\"/g" ${version_file}
# For nightlies we want to ensure that we're pulling in alphas as well. The
# easiest way to do so is to augment the spec with a constraint containing a
# min alpha version that doesn't affect the version bounds but does allow usage
# of alpha versions for that dependency without --pre
alpha_spec=''
if ! rapids-is-release-build; then
alpha_spec=',>=0.0.0a0'
fi
if [[ $PACKAGE_CUDA_SUFFIX == "-cu12" ]]; then
sed -i "s/cuda-python[<=>\.,0-9a]*/cuda-python>=12.0,<13.0a0/g" ${pyproject_file}
sed -i "s/cupy-cuda11x/cupy-cuda12x/g" ${pyproject_file}
fi
cd "${package_dir}"
# Hardcode the output dir
python -m pip wheel . -w dist -vvv --no-deps --disable-pip-version-check
mkdir -p final_dist
python -m auditwheel repair -w final_dist dist/*
RAPIDS_PY_WHEEL_NAME="${underscore_package_name}_${RAPIDS_PY_CUDA_SUFFIX}" rapids-upload-wheels-to-s3 final_dist
| 0 |
rapidsai_public_repos/cuvs | rapidsai_public_repos/cuvs/ci/check_style.sh | #!/bin/bash
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
set -euo pipefail
rapids-logger "Create checks conda environment"
. /opt/conda/etc/profile.d/conda.sh
rapids-dependency-file-generator \
--output conda \
--file_key checks \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml
rapids-mamba-retry env create --force -f env.yaml -n checks
conda activate checks
# Run pre-commit checks
pre-commit run --all-files --show-diff-on-failure
| 0 |
rapidsai_public_repos/cuvs | rapidsai_public_repos/cuvs/ci/build_cpp.sh | #!/bin/bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
set -euo pipefail
source rapids-env-update
export CMAKE_GENERATOR=Ninja
rapids-print-env
version=$(rapids-generate-version)
rapids-logger "Begin cpp build"
RAPIDS_PACKAGE_VERSION=${version} rapids-conda-retry mambabuild conda/recipes/libcuvs
rapids-upload-conda-to-s3 cpp
| 0 |
rapidsai_public_repos/cuvs | rapidsai_public_repos/cuvs/ci/build_wheel_cuvs.sh | #!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
set -euo pipefail
# Set up skbuild options. Enable sccache in skbuild config options
export SKBUILD_CONFIGURE_OPTIONS="-DRAFT_BUILD_WHEELS=ON -DDETECT_CONDA_ENV=OFF -DFIND_RAFT_CPP=OFF"
ci/build_wheel.sh cuvs python/cuvs
| 0 |
rapidsai_public_repos/cuvs | rapidsai_public_repos/cuvs/ci/wheel_smoke_test_cuvs.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from scipy.spatial.distance import cdist
from pylibrat.common import Handle, Stream, device_ndarray
from cuvs.distance import pairwise_distance
if __name__ == "__main__":
metric = "euclidean"
n_rows = 1337
n_cols = 1337
input1 = np.random.random_sample((n_rows, n_cols))
input1 = np.asarray(input1, order="C").astype(np.float64)
output = np.zeros((n_rows, n_rows), dtype=np.float64)
expected = cdist(input1, input1, metric)
expected[expected <= 1e-5] = 0.0
input1_device = device_ndarray(input1)
output_device = None
s2 = Stream()
handle = Handle(stream=s2)
ret_output = pairwise_distance(
input1_device, input1_device, output_device, metric, handle=handle
)
handle.sync()
output_device = ret_output
actual = output_device.copy_to_host()
actual[actual <= 1e-5] = 0.0
assert np.allclose(expected, actual, rtol=1e-4)
| 0 |
rapidsai_public_repos/cuvs | rapidsai_public_repos/cuvs/ci/test_wheel_cuvs.sh | #!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
set -euo pipefail
mkdir -p ./dist
RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})"
RAPIDS_PY_WHEEL_NAME="cuvs_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./dist
# echo to expand wildcard before adding `[extra]` requires for pip
python -m pip install $(echo ./dist/cuvs*.whl)[test]
# Run smoke tests for aarch64 pull requests
if [[ "$(arch)" == "aarch64" && "${RAPIDS_BUILD_TYPE}" == "pull-request" ]]; then
python ./ci/wheel_smoke_test_cuvs.py
else
python -m pytest ./python/cuvs/cuvs/test
fi
| 0 |
rapidsai_public_repos/cuvs | rapidsai_public_repos/cuvs/ci/build_docs.sh | #!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
set -euo pipefail
rapids-logger "Create test conda environment"
. /opt/conda/etc/profile.d/conda.sh
rapids-dependency-file-generator \
--output conda \
--file_key docs \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml
rapids-mamba-retry env create --force -f env.yaml -n docs
conda activate docs
rapids-print-env
rapids-logger "Downloading artifacts from previous jobs"
CPP_CHANNEL=$(rapids-download-conda-from-s3 cpp)
PYTHON_CHANNEL=$(rapids-download-conda-from-s3 python)
rapids-mamba-retry install \
--channel "${CPP_CHANNEL}" \
--channel "${PYTHON_CHANNEL}" \
libcuvs \
libcuvs-headers \
cuvs \
raft-dask
export RAPIDS_VERSION_NUMBER="24.02"
export RAPIDS_DOCS_DIR="$(mktemp -d)"
rapids-logger "Build CPP docs"
pushd cpp/doxygen
doxygen Doxyfile
popd
rapids-logger "Build Python docs"
pushd docs
sphinx-build -b dirhtml source _html
sphinx-build -b text source _text
mkdir -p "${RAPIDS_DOCS_DIR}/raft/"{html,txt}
mv _html/* "${RAPIDS_DOCS_DIR}/raft/html"
mv _text/* "${RAPIDS_DOCS_DIR}/raft/txt"
popd
rapids-upload-docs
| 0 |
rapidsai_public_repos/cuvs/ci | rapidsai_public_repos/cuvs/ci/release/update-version.sh | #!/bin/bash
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
########################
# RAFT Version Updater #
########################
## Usage
# bash update-version.sh <new_version>
# Format is YY.MM.PP - no leading 'v' or trailing 'a'
NEXT_FULL_TAG=$1
# Get current version
CURRENT_TAG=$(git tag --merged HEAD | grep -xE '^v.*' | sort --version-sort | tail -n 1 | tr -d 'v')
CURRENT_MAJOR=$(echo $CURRENT_TAG | awk '{split($0, a, "."); print a[1]}')
CURRENT_MINOR=$(echo $CURRENT_TAG | awk '{split($0, a, "."); print a[2]}')
CURRENT_PATCH=$(echo $CURRENT_TAG | awk '{split($0, a, "."); print a[3]}')
CURRENT_SHORT_TAG=${CURRENT_MAJOR}.${CURRENT_MINOR}
#Get <major>.<minor> for next version
NEXT_MAJOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[1]}')
NEXT_MINOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[2]}')
NEXT_SHORT_TAG=${NEXT_MAJOR}.${NEXT_MINOR}
NEXT_UCX_PY_SHORT_TAG="$(curl -sL https://version.gpuci.io/rapids/${NEXT_SHORT_TAG})"
NEXT_UCX_PY_VERSION="${NEXT_UCX_PY_SHORT_TAG}.*"
# Need to distutils-normalize the original version
NEXT_SHORT_TAG_PEP440=$(python -c "from setuptools.extern import packaging; print(packaging.version.Version('${NEXT_SHORT_TAG}'))")
NEXT_UCX_PY_SHORT_TAG_PEP440=$(python -c "from setuptools.extern import packaging; print(packaging.version.Version('${NEXT_UCX_PY_SHORT_TAG}'))")
echo "Preparing release $CURRENT_TAG => $NEXT_FULL_TAG"
# Inplace sed replace; workaround for Linux and Mac
function sed_runner() {
sed -i.bak ''"$1"'' $2 && rm -f ${2}.bak
}
sed_runner "s/set(RAPIDS_VERSION .*)/set(RAPIDS_VERSION \"${NEXT_SHORT_TAG}\")/g" cpp/CMakeLists.txt
sed_runner "s/set(RAPIDS_VERSION .*)/set(RAPIDS_VERSION \"${NEXT_SHORT_TAG}\")/g" cpp/template/cmake/thirdparty/fetch_rapids.cmake
sed_runner "s/set(RAFT_VERSION .*)/set(RAFT_VERSION \"${NEXT_FULL_TAG}\")/g" cpp/CMakeLists.txt
sed_runner 's/'"cuvs_version .*)"'/'"cuvs_version ${NEXT_FULL_TAG})"'/g' python/cuvs/CMakeLists.txt
sed_runner 's/'"branch-.*\/RAPIDS.cmake"'/'"branch-${NEXT_SHORT_TAG}\/RAPIDS.cmake"'/g' fetch_rapids.cmake
# Centralized version file update
echo "${NEXT_FULL_TAG}" > VERSION
# Wheel testing script
sed_runner "s/branch-.*/branch-${NEXT_SHORT_TAG}/g" ci/test_wheel_raft_dask.sh
# Docs update
sed_runner 's/version = .*/version = '"'${NEXT_SHORT_TAG}'"'/g' docs/source/conf.py
sed_runner 's/release = .*/release = '"'${NEXT_FULL_TAG}'"'/g' docs/source/conf.py
DEPENDENCIES=(
dask-cuda
cuvs
cuvs-cu11
cuvs-cu12
rmm
rmm-cu11
rmm-cu12
rapids-dask-dependency
# ucx-py is handled separately below
)
for FILE in dependencies.yaml conda/environments/*.yaml; do
for DEP in "${DEPENDENCIES[@]}"; do
sed_runner "/-.* ${DEP}==/ s/==.*/==${NEXT_SHORT_TAG_PEP440}\.*/g" ${FILE};
done
sed_runner "/-.* ucx-py==/ s/==.*/==${NEXT_UCX_PY_SHORT_TAG_PEP440}\.*/g" ${FILE};
done
for FILE in python/*/pyproject.toml; do
for DEP in "${DEPENDENCIES[@]}"; do
sed_runner "/\"${DEP}==/ s/==.*\"/==${NEXT_SHORT_TAG_PEP440}.*\"/g" ${FILE}
done
sed_runner "/\"ucx-py==/ s/==.*\"/==${NEXT_UCX_PY_SHORT_TAG_PEP440}.*\"/g" ${FILE}
done
sed_runner "/^ucx_py_version:$/ {n;s/.*/ - \"${NEXT_UCX_PY_VERSION}\"/}" conda/recipes/raft-dask/conda_build_config.yaml
for FILE in .github/workflows/*.yaml; do
sed_runner "/shared-workflows/ s/@.*/@branch-${NEXT_SHORT_TAG}/g" "${FILE}"
done
sed_runner "s/RAPIDS_VERSION_NUMBER=\".*/RAPIDS_VERSION_NUMBER=\"${NEXT_SHORT_TAG}\"/g" ci/build_docs.sh
sed_runner "/^PROJECT_NUMBER/ s|\".*\"|\"${NEXT_SHORT_TAG}\"|g" cpp/doxygen/Doxyfile
sed_runner "/^set(RAFT_VERSION/ s|\".*\"|\"${NEXT_SHORT_TAG}\"|g" docs/source/build.md
sed_runner "s|branch-[0-9][0-9].[0-9][0-9]|branch-${NEXT_SHORT_TAG}|g" docs/source/build.md
sed_runner "/rapidsai\/raft/ s|branch-[0-9][0-9].[0-9][0-9]|branch-${NEXT_SHORT_TAG}|g" docs/source/developer_guide.md
sed_runner "s|:[0-9][0-9].[0-9][0-9]|:${NEXT_SHORT_TAG}|g" docs/source/raft_ann_benchmarks.md
sed_runner "s|branch-[0-9][0-9].[0-9][0-9]|branch-${NEXT_SHORT_TAG}|g" README.md
# .devcontainer files
find .devcontainer/ -type f -name devcontainer.json -print0 | while IFS= read -r -d '' filename; do
sed_runner "s@rapidsai/devcontainers:[0-9.]*@rapidsai/devcontainers:${NEXT_SHORT_TAG}@g" "${filename}"
sed_runner "s@rapidsai/devcontainers/features/ucx:[0-9.]*@rapidsai/devcontainers/features/ucx:${NEXT_SHORT_TAG_PEP440}@" "${filename}"
sed_runner "s@rapidsai/devcontainers/features/rapids-build-utils:[0-9.]*@rapidsai/devcontainers/features/rapids-build-utils:${NEXT_SHORT_TAG_PEP440}@" "${filename}"
done
| 0 |
rapidsai_public_repos/cuvs/ci | rapidsai_public_repos/cuvs/ci/checks/black_lists.sh | #!/bin/bash
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
##########################################
# RAFT black listed function call Tester #
##########################################
# PR_TARGET_BRANCH is set by the CI environment
git checkout --quiet $PR_TARGET_BRANCH
# Switch back to tip of PR branch
git checkout --quiet current-pr-branch
# Ignore errors during searching
set +e
# Disable history expansion to enable use of ! in perl regex
set +H
RETVAL=0
for black_listed in cudaDeviceSynchronize cudaMalloc cudaMallocManaged cudaFree cudaMallocHost cudaHostAlloc cudaFreeHost; do
TMP=`git --no-pager diff --ignore-submodules -w --minimal -U0 -S"$black_listed" $PR_TARGET_BRANCH | grep '^+' | grep -v '^+++' | grep "$black_listed"`
if [ "$TMP" != "" ]; then
for filename in `git --no-pager diff --ignore-submodules -w --minimal --name-only -S"$black_listed" $PR_TARGET_BRANCH`; do
basefilename=$(basename -- "$filename")
filext="${basefilename##*.}"
if [ "$filext" != "md" ] && [ "$filext" != "sh" ]; then
TMP2=`git --no-pager diff --ignore-submodules -w --minimal -U0 -S"$black_listed" $PR_TARGET_BRANCH -- $filename | grep '^+' | grep -v '^+++' | grep "$black_listed" | grep -vE "^\+[[:space:]]*/{2,}.*$black_listed"`
if [ "$TMP2" != "" ]; then
echo "=== ERROR: black listed function call $black_listed added to $filename ==="
git --no-pager diff --ignore-submodules -w --minimal -S"$black_listed" $PR_TARGET_BRANCH -- $filename
echo "=== END ERROR ==="
RETVAL=1
fi
fi
done
fi
done
for cond_black_listed in cudaMemcpy cudaMemset; do
TMP=`git --no-pager diff --ignore-submodules -w --minimal -U0 -S"$cond_black_listed" $PR_TARGET_BRANCH | grep '^+' | grep -v '^+++' | grep -P "$cond_black_listed(?!Async)"`
if [ "$TMP" != "" ]; then
for filename in `git --no-pager diff --ignore-submodules -w --minimal --name-only -S"$cond_black_listed" $PR_TARGET_BRANCH`; do
basefilename=$(basename -- "$filename")
filext="${basefilename##*.}"
if [ "$filext" != "md" ] && [ "$filext" != "sh" ]; then
TMP2=`git --no-pager diff --ignore-submodules -w --minimal -U0 -S"$cond_black_listed" $PR_TARGET_BRANCH -- $filename | grep '^+' | grep -v '^+++' | grep -P "$cond_black_listed(?!Async)" | grep -vE "^\+[[:space:]]*/{2,}.*$cond_black_listed"`
if [ "$TMP2" != "" ]; then
echo "=== ERROR: black listed function call $cond_black_listed added to $filename ==="
git --no-pager diff --ignore-submodules -w --minimal -S"$cond_black_listed" $PR_TARGET_BRANCH -- $filename
echo "=== END ERROR ==="
RETVAL=1
fi
fi
done
fi
done
exit $RETVAL
| 0 |
rapidsai_public_repos/cuvs/ci | rapidsai_public_repos/cuvs/ci/checks/copyright.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import re
import argparse
import io
import os
import sys
import git
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
# Add the scripts dir for gitutils
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR,
"../../cpp/scripts")))
# Now import gitutils. Ignore flake8 error here since there is no other way to
# set up imports
import gitutils # noqa: E402
FilesToCheck = [
re.compile(r"[.](cmake|cpp|cu|cuh|h|hpp|sh|pxd|py|pyx)$"),
re.compile(r"CMakeLists[.]txt$"),
re.compile(r"CMakeLists_standalone[.]txt$"),
re.compile(r"setup[.]cfg$"),
re.compile(r"meta[.]yaml$")
]
ExemptFiles = [
re.compile("cpp/include/cuvs/neighbors/detail/faiss_select/"),
re.compile("docs/source/sphinxext/github_link.py"),
re.compile("cpp/cmake/modules/FindAVX.cmake")
]
# this will break starting at year 10000, which is probably OK :)
CheckSimple = re.compile(
r"Copyright *(?:\(c\))? *(\d{4}),? *NVIDIA C(?:ORPORATION|orporation)")
CheckDouble = re.compile(
r"Copyright *(?:\(c\))? *(\d{4})-(\d{4}),? *NVIDIA C(?:ORPORATION|orporation)" # noqa: E501
)
def checkThisFile(f):
if isinstance(f, git.Diff):
if f.deleted_file or f.b_blob.size == 0:
return False
f = f.b_path
elif not os.path.exists(f) or os.stat(f).st_size == 0:
# This check covers things like symlinks which point to files that DNE
return False
for exempt in ExemptFiles:
if exempt.search(f):
return False
for checker in FilesToCheck:
if checker.search(f):
return True
return False
def modifiedFiles():
"""Get a set of all modified files, as Diff objects.
The files returned have been modified in git since the merge base of HEAD
and the upstream of the target branch. We return the Diff objects so that
we can read only the staged changes.
"""
repo = git.Repo()
# Use the environment variable TARGET_BRANCH or RAPIDS_BASE_BRANCH (defined in CI) if possible
target_branch = os.environ.get("TARGET_BRANCH", os.environ.get("RAPIDS_BASE_BRANCH"))
if target_branch is None:
# Fall back to the closest branch if not on CI
target_branch = repo.git.describe(
all=True, tags=True, match="branch-*", abbrev=0
).lstrip("heads/")
upstream_target_branch = None
if target_branch in repo.heads:
# Use the tracking branch of the local reference if it exists. This
# returns None if no tracking branch is set.
upstream_target_branch = repo.heads[target_branch].tracking_branch()
if upstream_target_branch is None:
# Fall back to the remote with the newest target_branch. This code
# path is used on CI because the only local branch reference is
# current-pr-branch, and thus target_branch is not in repo.heads.
# This also happens if no tracking branch is defined for the local
# target_branch. We use the remote with the latest commit if
# multiple remotes are defined.
candidate_branches = [
remote.refs[target_branch] for remote in repo.remotes
if target_branch in remote.refs
]
if len(candidate_branches) > 0:
upstream_target_branch = sorted(
candidate_branches,
key=lambda branch: branch.commit.committed_datetime,
)[-1]
else:
# If no remotes are defined, try to use the local version of the
# target_branch. If this fails, the repo configuration must be very
# strange and we can fix this script on a case-by-case basis.
upstream_target_branch = repo.heads[target_branch]
merge_base = repo.merge_base("HEAD", upstream_target_branch.commit)[0]
diff = merge_base.diff()
changed_files = {f for f in diff if f.b_path is not None}
return changed_files
def getCopyrightYears(line):
res = CheckSimple.search(line)
if res:
return int(res.group(1)), int(res.group(1))
res = CheckDouble.search(line)
if res:
return int(res.group(1)), int(res.group(2))
return None, None
def replaceCurrentYear(line, start, end):
# first turn a simple regex into double (if applicable). then update years
res = CheckSimple.sub(r"Copyright (c) \1-\1, NVIDIA CORPORATION", line)
res = CheckDouble.sub(
rf"Copyright (c) {start:04d}-{end:04d}, NVIDIA CORPORATION",
res,
)
return res
def checkCopyright(f, update_current_year):
"""Checks for copyright headers and their years."""
errs = []
thisYear = datetime.datetime.now().year
lineNum = 0
crFound = False
yearMatched = False
if isinstance(f, git.Diff):
path = f.b_path
lines = f.b_blob.data_stream.read().decode().splitlines(keepends=True)
else:
path = f
with open(f, encoding="utf-8") as fp:
lines = fp.readlines()
for line in lines:
lineNum += 1
start, end = getCopyrightYears(line)
if start is None:
continue
crFound = True
if start > end:
e = [
path,
lineNum,
"First year after second year in the copyright "
"header (manual fix required)",
None,
]
errs.append(e)
elif thisYear < start or thisYear > end:
e = [
path,
lineNum,
"Current year not included in the copyright header",
None,
]
if thisYear < start:
e[-1] = replaceCurrentYear(line, thisYear, end)
if thisYear > end:
e[-1] = replaceCurrentYear(line, start, thisYear)
errs.append(e)
else:
yearMatched = True
# copyright header itself not found
if not crFound:
e = [
path,
0,
"Copyright header missing or formatted incorrectly "
"(manual fix required)",
None,
]
errs.append(e)
# even if the year matches a copyright header, make the check pass
if yearMatched:
errs = []
if update_current_year:
errs_update = [x for x in errs if x[-1] is not None]
if len(errs_update) > 0:
lines_changed = ", ".join(str(x[1]) for x in errs_update)
print(f"File: {path}. Changing line(s) {lines_changed}")
for _, lineNum, __, replacement in errs_update:
lines[lineNum - 1] = replacement
with open(path, "w", encoding="utf-8") as out_file:
out_file.writelines(lines)
return errs
def getAllFilesUnderDir(root, pathFilter=None):
retList = []
for dirpath, dirnames, filenames in os.walk(root):
for fn in filenames:
filePath = os.path.join(dirpath, fn)
if pathFilter(filePath):
retList.append(filePath)
return retList
def checkCopyright_main():
"""
Checks for copyright headers in all the modified files. In case of local
repo, this script will just look for uncommitted files and in case of CI
it compares between branches "$PR_TARGET_BRANCH" and "current-pr-branch"
"""
retVal = 0
argparser = argparse.ArgumentParser(
"Checks for a consistent copyright header in git's modified files"
)
argparser.add_argument(
"--update-current-year",
dest="update_current_year",
action="store_true",
required=False,
help="If set, "
"update the current year if a header is already "
"present and well formatted.",
)
argparser.add_argument(
"--git-modified-only",
dest="git_modified_only",
action="store_true",
required=False,
help="If set, "
"only files seen as modified by git will be "
"processed.",
)
args, dirs = argparser.parse_known_args()
if args.git_modified_only:
files = [f for f in modifiedFiles() if checkThisFile(f)]
else:
files = []
for d in [os.path.abspath(d) for d in dirs]:
if not os.path.isdir(d):
raise ValueError(f"{d} is not a directory.")
files += getAllFilesUnderDir(d, pathFilter=checkThisFile)
errors = []
for f in files:
errors += checkCopyright(f, args.update_current_year)
if len(errors) > 0:
if any(e[-1] is None for e in errors):
print("Copyright headers incomplete in some of the files!")
for e in errors:
print(" %s:%d Issue: %s" % (e[0], e[1], e[2]))
print("")
n_fixable = sum(1 for e in errors if e[-1] is not None)
path_parts = os.path.abspath(__file__).split(os.sep)
file_from_repo = os.sep.join(path_parts[path_parts.index("ci") :])
if n_fixable > 0 and not args.update_current_year:
print(
f"You can run `python {file_from_repo} --git-modified-only "
"--update-current-year` and stage the results in git to "
f"fix {n_fixable} of these errors.\n"
)
retVal = 1
return retVal
if __name__ == "__main__":
sys.exit(checkCopyright_main())
| 0 |
rapidsai_public_repos/cuvs/thirdparty | rapidsai_public_repos/cuvs/thirdparty/LICENSES/LICENSE.pytorch | From PyTorch:
Copyright (c) 2016- Facebook, Inc (Adam Paszke)
Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
Copyright (c) 2011-2013 NYU (Clement Farabet)
Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
From Caffe2:
Copyright (c) 2016-present, Facebook Inc. All rights reserved.
All contributions by Facebook:
Copyright (c) 2016 Facebook Inc.
All contributions by Google:
Copyright (c) 2015 Google Inc.
All rights reserved.
All contributions by Yangqing Jia:
Copyright (c) 2015 Yangqing Jia
All rights reserved.
All contributions by Kakao Brain:
Copyright 2019-2020 Kakao Brain
All contributions by Cruise LLC:
Copyright (c) 2022 Cruise LLC.
All rights reserved.
All contributions from Caffe:
Copyright(c) 2013, 2014, 2015, the respective contributors
All rights reserved.
All other contributions:
Copyright(c) 2015, 2016 the respective contributors
All rights reserved.
Caffe2 uses a copyright model similar to Caffe: each contributor holds
copyright over their contributions to Caffe2. The project versioning records
all such contribution and copyright details. If a contributor wants to further
mark their specific copyright on a particular contribution, they should
indicate their copyright solely in the commit message of the change when it is
committed.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
and IDIAP Research Institute nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE. | 0 |
rapidsai_public_repos/cuvs/thirdparty | rapidsai_public_repos/cuvs/thirdparty/LICENSES/LICENSE.faiss | MIT License
Copyright (c) Facebook, Inc. and its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. | 0 |
rapidsai_public_repos/cuvs/thirdparty | rapidsai_public_repos/cuvs/thirdparty/LICENSES/LICENSE.ann-benchmark | MIT License
Copyright (c) 2018 Erik Bernhardsson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. | 0 |
rapidsai_public_repos/cuvs/thirdparty | rapidsai_public_repos/cuvs/thirdparty/LICENSES/LICENSE_Date_Nagi | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2020 KETAN DATE & RAKESH NAGI
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos | rapidsai_public_repos/gpuci-mgmt/README.md | # gpuci-mgmt
Mangement scripts for gpuCI
### [`/ci`](/ci)
gpuCI scripts for packer builds
### [`/init`](/init)
Scripts used for gpuCI node startup on AWS
### [`/templates`](/templates)
Packer build templates for AWS AMI images used in gpuCI
| 0 |
rapidsai_public_repos/gpuci-mgmt | rapidsai_public_repos/gpuci-mgmt/init/aws-nvme.sh | #!/bin/bash
#
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# AWS init script for gpuCI nodes with nvme drives on nodes
#
set -e
SCRIPT_NAME="$0"
function logger {
TS=`date +%F_%H-%M-%S`
echo "[$SCRIPT_NAME $TS] $@"
}
logger "Check if nvme is already mounted; if not format and mount"
INSTANCE_NVME=`sudo nvme list | grep "Amazon EC2 NVMe Instance Storage" | awk '{ print $1 }' | head -n1`
logger "Instance NVMe found - $INSTANCE_NVME"
if ! grep -qa "$INSTANCE_NVME /jenkins " /proc/mounts; then
logger "$INSTANCE_NVME not mounted, mounting and formatting"
sudo mkfs -t ext4 $INSTANCE_NVME && sudo mkdir -p /jenkins && sudo mount $INSTANCE_NVME /jenkins
else
logger "$INSTANCE_NVME already mounted"
fi
logger "Check mounts"
mount
df -h
logger "Ensure ubuntu user has full rights on directory for Jenkins work"
sudo chown -R ubuntu:ubuntu /jenkins
logger "Relocate /tmp to NVMe for faster perf"
if [ ! -d "/jenkins/tmp" ] ; then
logger "/tmp needs relocating"
sudo mv /tmp /jenkins
sudo ln -s /jenkins/tmp /tmp
logger "/tmp relocated to /jenkins"
else
logger "/jenkins/tmp already exists"
fi
logger "Override docker setup"
sudo service docker stop
if [ -f /etc/docker/daemon.json ]; then
sudo cat /etc/docker/daemon.json
fi
cat <<EOL > /tmp/daemon.json
{
"experimental": true
}
EOL
sudo mv /tmp/daemon.json /etc/docker/daemon.json
sudo cat /etc/docker/daemon.json
sudo service docker start
logger "Ensure docker system is clean"
set +e
docker system prune -f
# Setup hourly cron to prune images
sudo cat > /etc/docker/image-prune.sh <<EOF
#!/bin/bash
df -h -t ext4
docker images
docker image prune -a -f --filter "until=12h"
docker images
docker volume ls
docker volume prune -f
docker volume ls
docker container ls
docker container prune -f
docker container ls
df -h -t ext4
EOF
sudo chmod +x /etc/docker/image-prune.sh
sudo crontab -l > /tmp/existing-crons | true
sudo echo "0 */3 * * * /etc/docker/image-prune.sh" >> /tmp/existing-crons
sudo crontab /tmp/existing-crons
logger "Connect node to Jenkins"
wget https://gpuci.gpuopenanalytics.com/plugin/ec2/AMI-Scripts/ubuntu-ami-setup.sh
sudo sh ubuntu-ami-setup.sh gpuci.gpuopenanalytics.com
| 0 |
rapidsai_public_repos/gpuci-mgmt | rapidsai_public_repos/gpuci-mgmt/init/aws-ebs-gpu.sh | #!/bin/bash
#
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# AWS init script for gpuCI nodes with EBS only storage, nv-docker
#
set -e
SCRIPT_NAME="$0"
function logger {
TS=`date +%F_%H-%M-%S`
echo "[$SCRIPT_NAME $TS] $@"
}
logger "Check mounts"
mount
df -h
logger "Ensure jenkins user has full rights on directory for Jenkins work"
sudo mkdir -p /jenkins
sudo chown -R jenkins:jenkins /jenkins
logger "Override docker setup"
sudo service docker stop
if [ -f /etc/docker/daemon.json ]; then
sudo cat /etc/docker/daemon.json
fi
cat <<EOL > /tmp/daemon.json
{
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"experimental": true
}
EOL
sudo mv /tmp/daemon.json /etc/docker/daemon.json
sudo cat /etc/docker/daemon.json
sudo service docker start
logger "Ensure docker system is clean"
set +e
docker system prune -f
# Setup hourly cron to prune images
sudo cat > /etc/docker/image-prune.sh <<EOF
#!/bin/bash
df -h -t ext4
docker images
docker image prune -a -f --filter "until=12h"
docker images
docker volume ls
docker volume prune -f
docker volume ls
docker container ls
docker container prune -f
docker container ls
df -h -t ext4
EOF
sudo chmod +x /etc/docker/image-prune.sh
sudo crontab -l > /tmp/existing-crons | true
sudo echo "0 */3 * * * /etc/docker/image-prune.sh" >> /tmp/existing-crons
sudo crontab /tmp/existing-crons
logger "Connect node to Jenkins"
wget https://gpuci.gpuopenanalytics.com/plugin/ec2/AMI-Scripts/ubuntu-ami-setup.sh
sudo sh ubuntu-ami-setup.sh gpuci.gpuopenanalytics.com
| 0 |
rapidsai_public_repos/gpuci-mgmt | rapidsai_public_repos/gpuci-mgmt/init/aws-nvme-docker-gpu.sh | #!/bin/bash
#
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# AWS init script for gpuCI nodes with nvme drives on nodes and GPUs
#
set -e
SCRIPT_NAME="$0"
function logger {
TS=`date +%F_%H-%M-%S`
echo "[$SCRIPT_NAME $TS] $@"
}
logger "Check if nvme is already mounted; if not format and mount"
INSTANCE_NVME=`sudo nvme list | grep "Amazon EC2 NVMe Instance Storage" | awk '{ print $1 }' | head -n1`
logger "Instance NVMe found - $INSTANCE_NVME"
if ! grep -qa "$INSTANCE_NVME /jenkins " /proc/mounts; then
logger "$INSTANCE_NVME not mounted, mounting and formatting"
sudo mkfs -t ext4 $INSTANCE_NVME && sudo mkdir -p /jenkins && sudo mount $INSTANCE_NVME /jenkins
else
logger "$INSTANCE_NVME already mounted"
fi
logger "Check mounts"
mount
df -h
logger "Ensure jenkins user has full rights on directory for Jenkins work"
sudo mkdir -p /jenkins
sudo chown -R jenkins:jenkins /jenkins
logger "Relocate /tmp to NVMe for faster perf"
if [ ! -d "/jenkins/tmp" ] ; then
logger "/tmp needs relocating"
sudo mv /tmp /jenkins
sudo ln -s /jenkins/tmp /tmp
logger "/tmp relocated to /jenkins"
else
logger "/jenkins/tmp already exists"
fi
logger "Override docker setup"
sudo service docker stop
sudo cat /etc/docker/daemon.json
cat <<EOL > /tmp/daemon.json
{
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"experimental": true
}
EOL
sudo mv /tmp/daemon.json /etc/docker/daemon.json
if [ -f /etc/docker/daemon.json ]; then
sudo cat /etc/docker/daemon.json
fi
logger "Move docker to nvme on /jenkins"
if [ ! -d /jenkins/docker ] ; then
logger "Moving /var/lib/docker to /jenkins/docker"
sudo mv /var/lib/docker /jenkins/
sudo ln -s /jenkins/docker /var/lib/docker
else
logger "Docker is already moved"
fi
sudo service docker start
logger "Ensure docker system is clean"
set +e
docker system prune -f
# Setup hourly cron to prune images
sudo cat > /etc/docker/image-prune.sh <<EOF
#!/bin/bash
df -h -t ext4
docker images
docker image prune -a -f --filter "until=12h"
docker images
docker volume ls
docker volume prune -f
docker volume ls
docker container ls
docker container prune -f
docker container ls
df -h -t ext4
EOF
sudo chmod +x /etc/docker/image-prune.sh
sudo crontab -l > /tmp/existing-crons | true
sudo echo "0 */3 * * * /etc/docker/image-prune.sh" >> /tmp/existing-crons
sudo crontab /tmp/existing-crons
logger "Connect node to Jenkins"
wget https://gpuci.gpuopenanalytics.com/plugin/ec2/AMI-Scripts/ubuntu-ami-setup.sh
sudo sh ubuntu-ami-setup.sh gpuci.gpuopenanalytics.com
| 0 |
rapidsai_public_repos/gpuci-mgmt | rapidsai_public_repos/gpuci-mgmt/init/aws-ebs-nvdocker.sh | #!/bin/bash
#
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# AWS init script for gpuCI nodes with EBS only storage and nv-docker
#
# Update/upgrade image first; before unattended-upgrades runs
sudo apt-get update && sudo apt-get upgrade -y && sudo apt-get clean
# Check mounts
mount
df -h
# Ensure ubuntu user has full rights on directory for Jenkins work
sudo mkdir -p /jenkins
sudo chown -R ubuntu:ubuntu /jenkins
# Override docker setup
sudo service docker stop
if [ -f /etc/docker/daemon.json ]; then
sudo cat /etc/docker/daemon.json
fi
cat <<EOL > /tmp/daemon.json
{
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"experimental": true
}
EOL
sudo mv /tmp/daemon.json /etc/docker/daemon.json
sudo cat /etc/docker/daemon.json
sudo service docker start
# Ensure docker system is clean
docker system prune -f
# Setup hourly cron to prune images
sudo cat > /etc/docker/image-prune.sh <<EOF
#!/bin/bash
df -h -t ext4
docker images
docker image prune -a -f --filter "until=12h"
docker images
docker volume ls
docker volume prune -f
docker volume ls
docker container ls
docker container prune -f
docker container ls
df -h -t ext4
EOF
sudo chmod +x /etc/docker/image-prune.sh
sudo crontab -l > /tmp/existing-crons | true
sudo echo "0 */3 * * * /etc/docker/image-prune.sh" >> /tmp/existing-crons
sudo crontab /tmp/existing-crons
# Connect node to Jenkins
wget https://gpuci.gpuopenanalytics.com/plugin/ec2/AMI-Scripts/ubuntu-ami-setup.sh
sudo sh ubuntu-ami-setup.sh gpuci.gpuopenanalytics.com
| 0 |
rapidsai_public_repos/gpuci-mgmt | rapidsai_public_repos/gpuci-mgmt/init/aws-nvme-docker.sh | #!/bin/bash
#
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# AWS init script for gpuCI nodes with nvme drives on nodes
#
set -e
SCRIPT_NAME="$0"
function logger {
TS=`date +%F_%H-%M-%S`
echo "[$SCRIPT_NAME $TS] $@"
}
logger "Check if nvme is already mounted; if not format and mount"
INSTANCE_NVME=`sudo nvme list | grep "Amazon EC2 NVMe Instance Storage" | awk '{ print $1 }' | head -n1`
logger "Instance NVMe found - $INSTANCE_NVME"
if ! grep -qa "$INSTANCE_NVME /jenkins " /proc/mounts; then
logger "$INSTANCE_NVME not mounted, mounting and formatting"
sudo mkfs -t ext4 $INSTANCE_NVME && sudo mkdir -p /jenkins && sudo mount $INSTANCE_NVME /jenkins
else
logger "$INSTANCE_NVME already mounted"
fi
logger "Check mounts"
mount
df -h
logger "Ensure ubuntu user has full rights on directory for Jenkins work"
sudo chown -R ubuntu:ubuntu /jenkins
logger "Relocate /tmp to NVMe for faster perf"
if [ ! -d "/jenkins/tmp" ] ; then
logger "/tmp needs relocating"
sudo mv /tmp /jenkins
sudo ln -s /jenkins/tmp /tmp
logger "/tmp relocated to /jenkins"
else
logger "/jenkins/tmp already exists"
fi
logger "Override docker setup"
sudo service docker stop
if [ -f /etc/docker/daemon.json ]; then
sudo cat /etc/docker/daemon.json
fi
cat <<EOL > /tmp/daemon.json
{
"experimental": true
}
EOL
sudo mv /tmp/daemon.json /etc/docker/daemon.json
sudo cat /etc/docker/daemon.json
logger "Move docker to nvme on /jenkins"
if [ ! -d /jenkins/docker ] ; then
logger "Moving /var/lib/docker to /jenkins/docker"
sudo mv /var/lib/docker /jenkins/
sudo ln -s /jenkins/docker /var/lib/docker
else
logger "Docker is already moved"
fi
sudo service docker start
logger "Ensure docker system is clean"
set +e
docker system prune -f
# Setup hourly cron to prune images
sudo cat > /etc/docker/image-prune.sh <<EOF
#!/bin/bash
df -h -t ext4
docker images
docker image prune -a -f --filter "until=12h"
docker images
docker volume ls
docker volume prune -f
docker volume ls
docker container ls
docker container prune -f
docker container ls
df -h -t ext4
EOF
sudo chmod +x /etc/docker/image-prune.sh
sudo crontab -l > /tmp/existing-crons | true
sudo echo "0 */3 * * * /etc/docker/image-prune.sh" >> /tmp/existing-crons
sudo crontab /tmp/existing-crons
logger "Connect node to Jenkins"
wget https://gpuci.gpuopenanalytics.com/plugin/ec2/AMI-Scripts/ubuntu-ami-setup.sh
sudo apt update && sudo apt install dos2unix
dos2unix ubuntu-ami-setup.sh
sudo sh ubuntu-ami-setup.sh gpuci.gpuopenanalytics.com
| 0 |
rapidsai_public_repos/gpuci-mgmt | rapidsai_public_repos/gpuci-mgmt/init/gcp-docker.sh | #!/bin/bash
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# GCP init script for gpuCI nodes
#
set -e
SCRIPT_NAME="$0"
function logger {
TS=`date +%F_%H-%M-%S`
echo "[$SCRIPT_NAME $TS] $@"
}
logger "Update/upgrade image first; before unattended-upgrades runs"
sudo apt-get update && sudo apt-get upgrade -y && sudo apt-get clean
logger "Override docker setup"
sudo service docker stop
if [ -f /etc/docker/daemon.json ]; then
sudo cat /etc/docker/daemon.json
fi
cat <<EOL > /tmp/daemon.json
{
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
},
"experimental": true
}
EOL
sudo mv /tmp/daemon.json /etc/docker/daemon.json
sudo cat /etc/docker/daemon.json
logger "Add jenkins user to docker group"
sudo usermod -a -G docker jenkins
sudo service docker start
logger "Ensure docker system is clean"
set +e
docker system prune -f
# Setup hourly cron to prune images
sudo cat > /etc/docker/image-prune.sh <<EOF
#!/bin/bash
df -h -t ext4
docker images
docker image prune -a -f --filter "until=12h"
docker images
docker volume ls
docker volume prune -f
docker volume ls
docker container ls
docker container prune -f
docker container ls
df -h -t ext4
EOF
sudo chmod +x /etc/docker/image-prune.sh
sudo crontab -l > /tmp/existing-crons | true
sudo echo "0 */3 * * * /etc/docker/image-prune.sh" >> /tmp/existing-crons
sudo crontab /tmp/existing-crons
| 0 |
rapidsai_public_repos/gpuci-mgmt | rapidsai_public_repos/gpuci-mgmt/init/aws-ebs.sh | #!/bin/bash
#
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# AWS init script for gpuCI nodes with EBS only storage
#
set -e
SCRIPT_NAME="$0"
function logger {
TS=`date +%F_%H-%M-%S`
echo "[$SCRIPT_NAME $TS] $@"
}
logger "Check mounts"
mount
df -h
logger "Ensure ubuntu user has full rights on directory for Jenkins work"
sudo mkdir -p /jenkins
sudo chown -R ubuntu:ubuntu /jenkins
logger "Override docker setup"
sudo service docker stop
if [ -f /etc/docker/daemon.json ]; then
sudo cat /etc/docker/daemon.json
fi
cat <<EOL > /tmp/daemon.json
{
"experimental": true
}
EOL
sudo mv /tmp/daemon.json /etc/docker/daemon.json
sudo cat /etc/docker/daemon.json
sudo service docker start
logger "Ensure docker system is clean"
set +e
docker system prune -f
# Setup hourly cron to prune images
sudo cat > /etc/docker/image-prune.sh <<EOF
#!/bin/bash
df -h -t ext4
docker images
docker image prune -a -f --filter "until=12h"
docker images
docker volume ls
docker volume prune -f
docker volume ls
docker container ls
docker container prune -f
docker container ls
df -h -t ext4
EOF
sudo chmod +x /etc/docker/image-prune.sh
sudo crontab -l > /tmp/existing-crons | true
sudo echo "0 */3 * * * /etc/docker/image-prune.sh" >> /tmp/existing-crons
sudo crontab /tmp/existing-crons
logger "Connect node to Jenkins"
wget https://gpuci.gpuopenanalytics.com/plugin/ec2/AMI-Scripts/ubuntu-ami-setup.sh
sudo sh ubuntu-ami-setup.sh gpuci.gpuopenanalytics.com
| 0 |
rapidsai_public_repos/gpuci-mgmt | rapidsai_public_repos/gpuci-mgmt/templates/template-gce.json | {
"variables": {
"source_image_family": "ubuntu-2004-lts",
"machine_type": "e2-medium",
"type": "cpu",
"arch": "amd64",
"key_file": "gce-key.json"
},
"builders": [
{
"type": "googlecompute",
"account_file": "{{user `key_file`}}",
"project_id": "rapids-gpuci",
"machine_type": "{{user `machine_type`}}",
"source_image_family": "{{user `source_image_family`}}",
"zone": "us-central1-a",
"image_description": "gpuCI {{user `type` | upper}}-{{user `arch` | upper}} Ubuntu 20.04",
"image_name": "gpuci-{{user `type`}}-{{user `arch`}}-{{isotime | clean_resource_name}}",
"disk_size": 20,
"disk_type": "pd-ssd",
"ssh_username": "ubuntu",
"subnetwork": "gpuci-uscentral1",
"network": "gpuci-vpc",
"network_project_id": "rapids-gpuci"
}
],
"provisioners": [
{
"type": "shell",
"script": "bootstrap.sh",
"execute_command": "sudo env {{ .Vars }} {{ .Path }}"
},
{
"type": "ansible",
"playbook_file": "playbook.yml",
"groups": [
"{{user `type`}}"
],
"user": "ubuntu"
},
{
"type": "shell",
"script": "post_build.sh"
}
]
}
| 0 |
rapidsai_public_repos/gpuci-mgmt | rapidsai_public_repos/gpuci-mgmt/templates/README.md | # gpuCI AMI Templates
This directory contains a [packer](https://www.packer.io/) template for building gpuCI AMIs.
## Building the images
1. [Setup AWS credentials](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html)
2. [Ensure the security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#SecurityGroupRules) in the template allows your IP to SSH
3. Install packer (perhaps via `brew install packer`)
4. `packer build -var type=cpu template.json` or `packer build -var type=gpu template.json`
## Components
### Packer templates
#### EC2 builds
The main template is `template.json`, with the following variables:
**NOTE:** `arm64` only works for `cpu` type builds currently; `amd64` works for both `cpu` and `gpu` types
Variable | Values | Default | Purpose
--- | --- | --- | ---
`type` | `cpu` OR `gpu` | `cpu` | Type of image to build
`arch` | `amd64` OR `arm64` | `amd64` | Architecture of the build
`instance` | `t2.medium` for `amd64` OR<br> `a1.large` for `arm64` | `t2.medium` | Instance to use for build
Define these with `-var <variable>=<value>` when issuing the `packer build` command
#### Local docker builds
`docker.json` is a template which builds a docker image with the same scripts. This is useful for quick tests without having to wait for EC2 instances.
### Scripts
There are two scripts:
- `bootstrap.sh` - Bootstraps a python installation to allow Ansible to work
- `post_build.sh` - Outputs some debug information after the packer build completes
### Ansible
There is a single playbook (`playbook.yml`) which has two different groups to run different roles.
There are a few roles:
- `common` - Common packages/installs between CPU & GPU
- `cpu` - CPU specific installs
- `gpu` - GPU specific installs (NVIDIA drivers, CUDA, etc)
- `post_common` - Actions common to both CPU & GPU performed after type-specific role
| 0 |
rapidsai_public_repos/gpuci-mgmt | rapidsai_public_repos/gpuci-mgmt/templates/docker.json | {
"variables": {
"ansible_host": "default",
"ansible_connection": "docker",
"type": "cpu"
},
"builders": [
{
"type": "docker",
"image": "ubuntu:20.04",
"commit": true,
"run_command": [
"-d",
"-i",
"-t",
"--name",
"{{user `ansible_host`}}",
"{{.Image}}",
"/bin/bash"
]
}
],
"provisioners": [
{
"type": "shell",
"script": "bootstrap.sh"
},
{
"type": "ansible",
"playbook_file": "playbook.yml",
"user": "root",
"groups": [
"{{user `type`}}"
],
"extra_arguments": [
"--extra-vars",
"ansible_host={{user `ansible_host`}} ansible_connection={{user `ansible_connection`}}"
]
},
{
"type": "shell",
"script": "post_build.sh"
}
]
} | 0 |
rapidsai_public_repos/gpuci-mgmt | rapidsai_public_repos/gpuci-mgmt/templates/template.json | {
"variables": {
"aws_access_key": "",
"aws_secret_key": "",
"type": "cpu",
"arch": "amd64",
"instance": "t2.medium"
},
"builders": [
{
"type": "amazon-ebs",
"access_key": "{{user `aws_access_key`}}",
"secret_key": "{{user `aws_secret_key`}}",
"region": "us-east-2",
"source_ami_filter": {
"filters": {
"virtualization-type": "hvm",
"name": "ubuntu/images/hvm-ssd/ubuntu-focal-20.04-{{user `arch`}}-server-*",
"root-device-type": "ebs"
},
"owners": [
"099720109477"
],
"most_recent": true
},
"instance_type": "{{user `instance`}}",
"associate_public_ip_address": true,
"vpc_id": "vpc-81eb9ae9",
"subnet_id": "subnet-45f29e2d",
"security_group_id": "sg-011a953aa80956de1",
"ssh_username": "ubuntu",
"ami_name": "gpuci-{{user `type`}}-{{user `arch`}}-{{isotime | clean_resource_name}}",
"ami_description": "gpuCI {{user `type` | upper}}-{{user `arch` | upper}} Ubuntu 20.04",
"launch_block_device_mappings": [
{
"device_name": "/dev/sda1",
"volume_size": 20,
"volume_type": "gp2",
"delete_on_termination": true
}
],
"tags": {
"role": "gpuci",
"type": "{{user `type`}}",
"arch": "{{user `arch`}}",
"os": "ubuntu20.04"
}
}
],
"provisioners": [
{
"type": "shell",
"script": "bootstrap.sh",
"execute_command": "sudo env {{ .Vars }} {{ .Path }}"
},
{
"type": "ansible",
"playbook_file": "playbook.yml",
"groups": [
"{{user `type`}}"
],
"user": "ubuntu"
},
{
"type": "shell",
"script": "post_build.sh"
}
]
}
| 0 |
rapidsai_public_repos/gpuci-mgmt | rapidsai_public_repos/gpuci-mgmt/templates/playbook.yml | ---
- name: CPU
hosts: cpu
become: yes
roles:
- common
- cpu
- post_common
- name: GPU
hosts: gpu
become: yes
roles:
- common
- gpu
- post_common | 0 |
rapidsai_public_repos/gpuci-mgmt | rapidsai_public_repos/gpuci-mgmt/templates/bootstrap.sh | #!/bin/bash
set -ex
if [[ "$PACKER_BUILDER_TYPE" != "docker" ]]; then
sleep 30
fi
df -h
lsblk
apt-get update
apt-get -y upgrade
apt-get install -y python3-dev python3-pip | 0 |
rapidsai_public_repos/gpuci-mgmt | rapidsai_public_repos/gpuci-mgmt/templates/packer-ansible.dockerfile | FROM ubuntu:20.04
ENV PATH=/root:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
RUN apt-get update && \
apt-get install -y software-properties-common && \
apt-add-repository --yes --update ppa:ansible/ansible && \
apt-get update && \
apt-get install -y ansible unzip wget python3-distutils python3-apt && \
rm -rf /var/lib/apt/lists/*
RUN wget 'https://releases.hashicorp.com/packer/1.6.6/packer_1.6.6_linux_amd64.zip' && \
unzip 'packer_1.6.6_linux_amd64.zip' -d /usr/local/bin && \
rm -f packer*.zip
RUN wget https://bootstrap.pypa.io/get-pip.py && \
python3 get-pip.py && \
pip install requests
| 0 |
rapidsai_public_repos/gpuci-mgmt | rapidsai_public_repos/gpuci-mgmt/templates/post_build.sh | #!/bin/bash
set -ex
echo "========== Disk info =========="
df -h
lsblk
echo
echo "========== Python version =========="
python -V
echo
echo "========== Python3 version =========="
python3 -V
echo
echo "========== Pip list =========="
pip -V
pip list
echo
echo "========== Pip3 list =========="
pip3 -V
pip3 list
echo
echo "========== Apt list =========="
apt list --installed
echo | 0 |
rapidsai_public_repos/gpuci-mgmt | rapidsai_public_repos/gpuci-mgmt/templates/jenkins-update-ec2-ami.py | #!/usr/bin/python
import requests
import os
import sys
jenkins_base_url = os.environ['JENKINS_URL']
jenkins_auth_user = os.environ['JENKINS_AUTH_USER']
jenkins_auth_password = os.environ['JENKINS_AUTH_PASSWORD']
jenkins_crumb_name = ""
jenkins_crumb_value = ""
# shared session required to use crumbs
jenkins_session = requests.Session()
verify_ssl = True
# three amis generated from packer build job
gpu_ami_amd64 = os.getenv("GPU_AMI_AMD64")
cpu_ami_amd64 = os.getenv("CPU_AMI_AMD64")
cpu_ami_arm64 = os.getenv("CPU_AMI_ARM64")
def get_crumb_url():
request_url = jenkins_base_url.replace('https://', '');
if not request_url.endswith('/'):
request_url = f'{request_url}/'
return f'https://{jenkins_auth_user}:{jenkins_auth_password}@{request_url}crumbIssuer/api/json'
def get_jenkins_crumb():
global jenkins_crumb_name
global jenkins_crumb_value
if jenkins_crumb_value:
return jenkins_crumb_value
crumb_url = get_crumb_url()
r = jenkins_session.get(crumb_url, verify=verify_ssl)
jenkins_crumb_name = r.json()["crumbRequestField"]
jenkins_crumb_value = r.json()["crumb"]
return jenkins_crumb_value
def get_groovy_url():
groovy_url = jenkins_base_url.replace('https://', '');
if not groovy_url.endswith('/'):
groovy_url = f'{groovy_url}/'
return f'https://{jenkins_auth_user}:{jenkins_auth_password}@{groovy_url}scriptText'
def update_jenkins_ami_id(cpu_ami_amd64, cpu_ami_arm64, gpu_ami_amd64):
groovy_url = get_groovy_url()
groovy_script = """
import hudson.plugins.ec2.AmazonEC2Cloud;
import hudson.slaves.OfflineCause.ByCLI;
def is_arm(instance_class) {
arm_classes = ['m6g', 'c6g', 'r6g', 'a1']
for (klazz in arm_classes) {
if (instance_class.toLowerCase().contains(klazz))return true
}
return false
}
def is_cpu(display_name) {
display_name = display_name.toLowerCase()
cpu_descriptions = ["cpu", "builder", "runner"]
for (desc in cpu_descriptions) {
if (display_name.contains(desc)) {
return true
}
}
return false
}
Jenkins.instance.clouds.each { cloud ->
if (cloud instanceof AmazonEC2Cloud) {
cloud.getTemplates().each { agent ->
if (is_cpu(agent.getDisplayName())) {
agent.setAmi("%s")
if (is_arm(agent.type.toString())) agent.setAmi('%s')
}
if (agent.getDisplayName().toLowerCase().contains("gpu")) agent.setAmi("%s")
}
}
}
Jenkins.instance.save()
for (agent in hudson.model.Hudson.instance.slaves) {
if (agent.name.toLowerCase().contains("ec2")) {
println('Marking machine offline: ' + agent.name);
agent.getComputer().setTemporarilyOffline(true, new ByCLI("Machine taken offline due to outdated AMI."))
}
}
println "ami update complete"
""" % (cpu_ami_amd64, cpu_ami_arm64, gpu_ami_amd64)
payload = {'script': groovy_script, jenkins_crumb_name: jenkins_crumb_value}
headers = {jenkins_crumb_name: jenkins_crumb_value}
r = jenkins_session.post(groovy_url, verify=verify_ssl, data=payload, headers=headers)
if not r.status_code == 200:
print(f'HTTP POST to Jenkins URL {groovy_url} resulted in {r.status_code}')
print(r.headers)
print(r.text)
sys.exit(1)
final_line = r.text.strip().split("\n")[-1]
if not final_line == "ami update complete":
print(r.text)
return False
print(r.text)
return True
def main():
get_jenkins_crumb()
assert "ami" in cpu_ami_amd64
assert "ami" in cpu_ami_arm64
assert "ami" in gpu_ami_amd64
print("Updating AMIs and marking existing EC2 nodes offline...")
update_success = update_jenkins_ami_id(cpu_ami_amd64, cpu_ami_arm64, gpu_ami_amd64)
if update_success:
print("Jenkins AMIs have been updated.")
else:
print("Ran into an error when attempting to update the Jenkins AMI ID")
sys.exit(1)
if __name__ == '__main__':
main()
| 0 |
rapidsai_public_repos/gpuci-mgmt/templates/roles/gpu | rapidsai_public_repos/gpuci-mgmt/templates/roles/gpu/tasks/main.yml | ---
- include_vars: versions.yml
- name: Add key for NVIDIA CUDA repos
apt_key:
url: http://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub
state: present
- name: Add repo for NVIDIA CUDA drivers
apt_repository:
repo: "deb http://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64 /"
- name: Install CUDA
apt:
name:
- "cuda-libraries-{{ cuda_libraries_version }}"
- "cuda-drivers-{{ cuda_drivers_version }}"
- "cuda-compiler-{{ cuda_compiler_version }}"
state: present
install_recommends: no
update_cache: yes
- name: Update all packages to their latest version
apt:
name: "*"
state: latest
update_cache: yes
- name: Hold CUDA Packages
dpkg_selections:
name: "{{ item }}"
selection: hold
loop:
- "cuda-libraries-{{ cuda_libraries_version }}"
- "cuda-drivers-{{ cuda_drivers_version }}"
- "cuda-compiler-{{ cuda_compiler_version }}"
- name: Install nvidia docker apt key
apt_key:
url: https://nvidia.github.io/nvidia-docker/gpgkey
- name: Add nvidia-docker sources list
get_url:
url: https://nvidia.github.io/nvidia-docker/ubuntu20.04/nvidia-docker.list
dest: /etc/apt/sources.list.d/nvidia-docker.list
- name: Install nvidia-docker2
apt:
name: nvidia-docker2
state: latest
update_cache: yes
- name: Create jenkins user
user:
name: jenkins
groups:
- docker
- wheel
uid: 10000
append: yes
- name: Add authorized keys
authorized_key:
user: jenkins
key: '{{ item }}'
with_file:
- ../../common/files/jenkins-prod.id_rsa.pub
- ../../common/files/jenkins-ec2.id_rsa.pub
- name: Initialize git-lfs
command: git lfs install
become: yes
become_user: jenkins
- name: Reboot
reboot: {}
| 0 |
rapidsai_public_repos/gpuci-mgmt/templates/roles/gpu/tasks | rapidsai_public_repos/gpuci-mgmt/templates/roles/gpu/tasks/vars/versions.yml | ---
# ./vars/versions.yml
cuda_libraries_version: 11-5
cuda_drivers_version: 495
cuda_compiler_version: 11-5 | 0 |
rapidsai_public_repos/gpuci-mgmt/templates/roles/post_common | rapidsai_public_repos/gpuci-mgmt/templates/roles/post_common/tasks/main.yml | ---
- name: Add ubuntu to docker group
user:
name: ubuntu
groups:
- docker
- wheel | 0 |
rapidsai_public_repos/gpuci-mgmt/templates/roles/common | rapidsai_public_repos/gpuci-mgmt/templates/roles/common/handlers/main.yaml | ---
- name: restart chrony
service:
name: chrony
state: restarted
| 0 |
rapidsai_public_repos/gpuci-mgmt/templates/roles/common | rapidsai_public_repos/gpuci-mgmt/templates/roles/common/files/jenkins-ec2.id_rsa.pub | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCNjAPz9lETpd4yblvznDiwoaCTbZ6uEQzexQaaJRUnRGiVIEkggWXGiq7MWz2gEYIpUE5/sLPIJQK/QnNBvXSIojlyQFIIcXGFcQ3fcLTNF/GXjBEMLWO/voJt7IMXzEdlQKfdNtXB/YGRU9Yab9+4w36L/E8myXPEdHscwfJWv80k894GQIaPH3MzwlY1r+hGiE85Y3mCmc7g4n4np6jloBYspj+G6PQG8aYGfvLMuO/zRB2lifyU+7Ta/uLiltahktMZSzV8Ya85t6lNI14gzQnDazw/dxH4ZsTazAeEOU91UNgzt2yunrj0rTwkRSNRBJKAS5Q1De636YJp7I51 jenkins-ec2
| 0 |
rapidsai_public_repos/gpuci-mgmt/templates/roles/common | rapidsai_public_repos/gpuci-mgmt/templates/roles/common/files/chrony.conf | # Welcome to the chrony configuration file. See chrony.conf(5) for more
# information about usuable directives.
# This will use (up to):
# - 4 sources from ntp.ubuntu.com which some are ipv6 enabled
# - 2 sources from 2.ubuntu.pool.ntp.org which is ipv6 enabled as well
# - 1 source from [01].ubuntu.pool.ntp.org each (ipv4 only atm)
# This means by default, up to 6 dual-stack and up to 2 additional IPv4-only
# sources will be used.
# At the same time it retains some protection against one of the entries being
# down (compare to just using one of the lines). See (LP: #1754358) for the
# discussion.
#
# About using servers from the NTP Pool Project in general see (LP: #104525).
# Approved by Ubuntu Technical Board on 2011-02-08.
# See http://www.pool.ntp.org/join.html for more information.
# This directive specify the location of the file containing ID/key pairs for
# NTP authentication.
keyfile /etc/chrony/chrony.keys
# This directive specify the file into which chronyd will store the rate
# information.
driftfile /var/lib/chrony/chrony.drift
# Uncomment the following line to turn logging on.
#log tracking measurements statistics
# Log files location.
logdir /var/log/chrony
# Stop bad estimates upsetting machine clock.
maxupdateskew 100.0
# This directive enables kernel synchronisation (every 11 minutes) of the
# real-time clock. Note that it can’t be used along with the 'rtcfile' directive.
rtcsync
# Step the system clock instead of slewing it if the adjustment is larger than
# one second, but only in the first three clock updates.
makestep 1 3
#172.31.29.62 is private IP for jenkins-prod
server 172.31.29.62 iburst
| 0 |
rapidsai_public_repos/gpuci-mgmt/templates/roles/common | rapidsai_public_repos/gpuci-mgmt/templates/roles/common/files/jenkins-prod.id_rsa.pub | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCcbirkJsRrR1QKLZHzHkwYcG5RxsooeRL/TI8r8f8QbDgPNm3jfF07uUSxq45pzEAKRY+Kplnx0rM/RJvYpA/S3hJfB1KaAujAWxidBXaquBVA6Q20cYLYC1TddsFJ5O5L6mgL6AbWEMBCVn9XnBMimWxF9yq7KLIRw9fSE6errC/xHtdHrzeYRLu7VRO6HrjeQ0c9c1af0lJg23ZCLP5sVQyUUG0j/ZeGJeFUInuqN33rJJjGcxaTq+ywEgdlH+eUNcxCDyrAoDNgOJQUqAJtFYKDWKahSr7GP0s8sScusGPdmMbTyehNRgHFV/WZeB7TppOVz7lseHiOPc0cjJnd jenkins-prod | 0 |
rapidsai_public_repos/gpuci-mgmt/templates/roles/common | rapidsai_public_repos/gpuci-mgmt/templates/roles/common/tasks/main.yml | ---
- name: Install common packages
apt:
update_cache: yes
pkg:
- jq
- ansible
- openjdk-11-jdk-headless
- git
- git-lfs
- nvme-cli
- build-essential
- wget
- curl
- vim
- screen
- apt-transport-https
- ca-certificates
- curl
- software-properties-common
- unzip
- sudo
- chrony
- tzdata
- acl
- tar
- gzip
- name: Install gpuci-tools
unarchive:
src: https://github.com/rapidsai/gpuci-tools/releases/latest/download/tools.tar.gz
dest: /usr/local/bin
remote_src: yes
- name: Set timezone to PT
timezone:
name: America/Los_Angeles
- name: Configure Chrony
copy:
src: chrony.conf
dest: /etc/chrony/chrony.conf
mode: 0644
notify: restart chrony
- name: Initialize git-lfs
command: git lfs install
vars:
ansible_become: no
- name: Upgrade pip
pip:
executable: pip3
extra_args: --upgrade
name:
- pip
- name: Install pip packages
pip:
executable: pip3
extra_args: --ignore-installed
name:
- requests
- PyYaml
- jinja2
state: latest
- name: Make sure we have a 'wheel' group
group:
name: wheel
state: present
- name: Create /etc/sudoers.d/wheel
copy:
content: "%wheel ALL=(ALL) NOPASSWD: ALL"
dest: /etc/sudoers.d/wheel
group: root
owner: root
mode: '0440'
- name: Create ubuntu user
user:
name: ubuntu
uid: 1000
- name: Add authorized keys
authorized_key:
user: ubuntu
key: '{{ item }}'
with_file:
- jenkins-prod.id_rsa.pub
- jenkins-ec2.id_rsa.pub
- name: Create ubuntu home directory if not exists
file:
path: /home/ubuntu
state: directory
- name: Download awscli (x86_64)
unarchive:
src: https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip
dest: /home/ubuntu/
remote_src: yes
when: ansible_architecture == "x86_64"
- name: Download awscli (aarch64)
unarchive:
src: https://awscli.amazonaws.com/awscli-exe-linux-aarch64.zip
dest: /home/ubuntu/
remote_src: yes
when: ansible_architecture == "aarch64"
- name: Install awscli
command: /home/ubuntu/aws/install
- name: Add Docker GPG key
apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
state: present
- name: Add Docker apt repository (x86_64)
apt_repository:
repo: "deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable"
when: ansible_architecture == "x86_64"
- name: Add Docker apt repository (aarch64)
apt_repository:
repo: "deb [arch=arm64] https://download.docker.com/linux/ubuntu focal stable"
when: ansible_architecture == "aarch64"
- name: Install Docker CE
apt:
update_cache: yes
pkg:
- docker-ce
- docker-ce-cli
- name: Create rc.local
copy:
dest: /etc/rc.local
mode: '0555'
content: |
#!/bin/bash
- name: Configure python alternative
alternatives:
name: python
link: /usr/bin/python
path: /usr/bin/python3.8
| 0 |
rapidsai_public_repos/gpuci-mgmt/templates/roles/cpu | rapidsai_public_repos/gpuci-mgmt/templates/roles/cpu/tasks/main.yml | ---
| 0 |
rapidsai_public_repos/gpuci-mgmt/ci | rapidsai_public_repos/gpuci-mgmt/ci/templates/run.sh | #!/bin/bash
# Run script for packer AMI builds
set -e
cd templates
echo "========== CPU-amd64 Build =========="
/root/packer build -var type=cpu -machine-readable template.json | tee cpu_amd64_build.log
echo "========== GPU-amd64 Build =========="
/root/packer build -var type=gpu -machine-readable template.json | tee gpu_amd64_build.log
echo "========== CPU-arm64 Build =========="
/root/packer build -var type=cpu -var arch=arm64 -var instance=a1.large -machine-readable template.json | tee cpu_arm64_build.log
echo "========== Artifacts =========="
export CPU_AMI_AMD64=`cat cpu_amd64_build.log | grep "artifact" | grep ",id," | cut -d "," -f 6 | cut -d ":" -f 2`
export GPU_AMI_AMD64=`cat gpu_amd64_build.log | grep "artifact" | grep ",id," | cut -d "," -f 6 | cut -d ":" -f 2`
export CPU_AMI_ARM64=`cat cpu_arm64_build.log | grep "artifact" | grep ",id," | cut -d "," -f 6 | cut -d ":" -f 2`
echo "CPU-amd64 AMI: ${CPU_AMI_AMD64}"
echo "GPU-amd64 AMI: ${GPU_AMI_AMD64}"
echo "CPU-arm64 AMI: ${CPU_AMI_ARM64}"
| 0 |
rapidsai_public_repos | rapidsai_public_repos/ucxx/.pre-commit-config.yaml | repos:
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
# Use the config file specific to each subproject so that each
# project can specify its own first/third-party packages.
args: ["--config-root=python/", "--resolve-all-configs"]
files: python/.*
exclude: __init__.py$
types: [text]
types_or: [python, cython, pyi]
- repo: https://github.com/ambv/black
rev: 22.3.0
hooks:
- id: black
files: python/.*
- repo: https://github.com/PyCQA/flake8
rev: 5.0.4
hooks:
- id: flake8
args: ["--config=python/.flake8"]
files: python/.*\.py$
types: [python]
- id: flake8
args: ["--config=python/.flake8.cython"]
types: [cython]
additional_dependencies: ["flake8-force"]
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v16.0.6
hooks:
- id: clang-format
files: \.(h|cpp)$
types_or: [file]
args: ['-fallback-style=none', '-style=file', '-i']
- repo: https://github.com/cpplint/cpplint
rev: 1.6.1
hooks:
- id: cpplint
name: cpplint
# description: Check C++ code style using cpplint.py.
# entry: bash ./tools/codestyle/cpplint_pre_commit.hook
# language: system
files: \.(h|cpp)$
# exclude: path/to/myfile.h
- repo: https://github.com/rapidsai/dependency-file-generator
rev: v1.5.1
hooks:
- id: rapids-dependency-file-generator
args: ["--clean"]
default_language_version:
python: python3
| 0 |
rapidsai_public_repos | rapidsai_public_repos/ucxx/fetch_rapids.cmake | # =============================================================================
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/UCXX_RAPIDS.cmake)
file(DOWNLOAD https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-24.02/RAPIDS.cmake
${CMAKE_CURRENT_BINARY_DIR}/UCXX_RAPIDS.cmake
)
endif()
include(${CMAKE_CURRENT_BINARY_DIR}/UCXX_RAPIDS.cmake)
| 0 |
rapidsai_public_repos | rapidsai_public_repos/ucxx/README.md | # UCXX
UCXX is an object-oriented C++ interface for UCX, with native support for Python bindings.
## Building
### Environment setup
Before starting it is necessary to have the necessary dependencies installed. The simplest way to get started is to installed [Miniconda](https://docs.conda.io/en/latest/miniconda.html) and then to create and activate an environment with the provided development file:
```
$ conda env create -n ucxx -f conda/environments/ucxx-cuda118_arch-x86_64.yml
$ conda activate ucxx
```
#### Faster conda dependency resolution
The procedure aforementioned should complete without issues, but it may be slower than necessary. One alternative to speed up dependency resolution is to install [mamba](https://mamba.readthedocs.io/en/latest/) before creating the new environment. After installing Miniconda, mamba can be installed with:
```
$ conda install -c conda-forge mamba
```
After that, one can proceed as before, but simply replacing `conda` with `mamba` in the environment creation command:
```
$ mamba env create -n ucxx -f conda/environments/ucxx-cuda118_arch-x86_64.yml
$ conda activate ucxx
```
### Convenience Script
For convenience, we provide the `./build.sh` script. By default, it will build and install both C++ and Python libraries. For a detailed description on available options please check `./build.sh --help`.
Building C++ and Python libraries manually is also possible, see instructions on building [C++](#c) and [Python](#python).
Additionally, there is a `./build_and_run.sh` script that will call `./build.sh` to build everything as well as running C++ and Python tests and a few benchmarks. Similarly, details on existing options can be queried with `./build_and_run.sh`.
### C++
To build and install C++ library to `${CONDA_PREFIX}`, with both Python and RMM support, as well as building all tests run:
```
mkdir cpp/build
cd cpp/build
cmake .. -DCMAKE_INSTALL_PREFIX=${CONDA_PREFIX} \
-DBUILD_TESTS=ON \
-DCMAKE_BUILD_TYPE=Release \
-DUCXX_ENABLE_PYTHON=ON \
-DUCXX_ENABLE_RMM=ON
make -j install
```
### Python
```
cd python
python setup.py install
```
## Running benchmarks
### C++
Currently there is only one C++ benchmark with few options. It can be found under `cpp/build/benchmarks/ucxx_perftest` and for a full list of options `--help` argument can be used.
The benchmark is composed of two processes: a server and a client. The server must not specify an IP address or hostname and will bind to all available interfaces, whereas the client must specify the IP address or hostname where the server can be reached.
Below is an example of running a server first, followed by the client connecting to the server on the `localhost` (`127.0.0.1`). Both processes specify a list of parameters, which are the message size in bytes (`-s 8388608`), that allocations should be reused (`-r`), the number of iterations to perform (`-n 10`) and the progress mode (`-m polling`).
```
$ UCX_TCP_CM_REUSEADDR=y ./benchmarks/ucxx_perftest -s 800000000 -r -n 10 -m polling &
$ ./benchmarks/ucxx_perftest -s 800000000 -r -n 10 -m polling 127.0.0.1
```
It is recommended to use `UCX_TCP_CM_REUSEADDR=y` when binding to interfaces with TCP support to prevent waiting for the process' `TIME_WAIT` state to complete, which often takes 60 seconds after the server has terminated.
### Python
Benchmarks are available for both the Python "core" (synchronous) API and the "high-level" (asynchronous) API.
#### Synchronous
```python
# Thread progress without delayed notification NumPy transfer, 100 iterations
# of single buffer with 100 bytes
python -m ucxx.benchmarks.send_recv \
--backend ucxx-core \
--object_type numpy \
--n-iter 100 \
--n-bytes 100
# Blocking progress without delayed notification RMM transfer between GPUs 0
# and 3, 100 iterations of 2 buffers (using multi-buffer interface) each with
# 1 MiB
python -m ucxx.benchmarks.send_recv \
--backend ucxx-core \
--object_type rmm \
--server-dev 0 \
--client-dev 3 \
--n-iter 100 \
--n-bytes 100 \
--progress-mode blocking
```
#### Asynchronous
```python
# NumPy transfer, 100 iterations of 8 buffers (using multi-buffer interface)
# each with 100 bytes
python -m ucxx.benchmarks.send_recv \
--backend ucxx-async \
--object_type numpy \
--n-iter 100 \
--n-bytes 100 \
--n-buffers 8
# RMM transfer between GPUs 0 and 3, 100 iterations of 2 buffers (using
# multi-buffer interface) each with 1 MiB
python -m ucxx.benchmarks.send_recv \
--backend ucxx-async \
--object_type rmm \
--server-dev 0 \
--client-dev 3 \
--n-iter 100 \
--n-bytes 1MiB \
--n-buffers 2
# Polling progress mode without delayed notification NumPy transfer,
# 100 iterations of single buffer with 1 MiB
UCXPY_ENABLE_DELAYED_SUBMISSION=0 \
python -m ucxx.benchmarks.send_recv \
--backend ucxx-async \
--object_type numpy \
--n-iter 100 \
--n-bytes 1MiB \
--progress-mode polling
```
## Logging
Logging is independently available for both C++ and Python APIs. Since the Python interface uses the C++ backend, C++ logging can be enabled when running Python code as well.
### C++
The C++ interface reuses the UCX logger and provides the same log levels and can be enabled via the `UCXX_LOG_LEVEL` environment variable. However, it will not enable UCX logging, one must still set `UCX_LOG_LEVEL` for UCX logging. A few examples are below:
```
# Request trace log level
UCXX_LOG_LEVEL=TRACE_REQ
# Debug log level
UCXX_LOG_LEVEL=DEBUG
```
### Python
The UCXX Python interface uses the `logging` library included in Python. The only used levels currently are `INFO` and `DEBUG`, and can be enabled via the `UCXPY_LOG_LEVEL` environment variable. A few examples are below:
```
# Enable Python info log level
UCXPY_LOG_LEVEL=INFO
# Enable Python debug log level, UCXX request trace log level and UCX data log level
UCXPY_LOG_LEVEL=DEBUG UCXX_LOG_LEVEL=TRACE_REQ UCX_LOG_LEVEL=DATA
```
| 0 |
rapidsai_public_repos | rapidsai_public_repos/ucxx/build.sh | #!/bin/bash
# SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
# UCXX build script
# This script is used to build the component(s) in this repo from
# source, and can be called with various options to customize the
# build as needed (see the help output for details)
# Abort script on first error
set -e
NUMARGS=$#
ARGS=$*
# NOTE: ensure all dir changes are relative to the location of this
# script, and that this script resides in the repo dir!
REPODIR=$(cd $(dirname $0); pwd)
VALIDARGS="clean libucxx libucxx_python ucxx distributed_ucxx benchmarks tests examples -v -g -n -c --show_depr_warn -h"
HELP="$0 [clean] [libucxx] [libucxx_python] [ucxx] [distributed_ucxx] [benchmarks] [tests] [examples] [-vcgnh] [--cmake-args=\\\"<args>\\\"]
clean - remove all existing build artifacts and configuration (start
over)
libucxx - build the UCXX C++ module
libucxx_python - build the UCXX C++ Python support module
ucxx - build the ucxx Python package
distributed_ucxx - build the distributed_ucxx (Dask Distributed module) Python package
benchmarks - build benchmarks
tests - build tests
examples - build examples
-v - verbose build mode
-g - build for debug
-n - no install step
-c - create cpp/compile_commands.json
--show_depr_warn - show cmake deprecation warnings
--cmake-args=\\\"<args>\\\" - pass arbitrary list of CMake configuration options (escape all quotes in argument)
-h | --h[elp] - print this text
default action (no args) is to build and install 'libucxx' and 'libucxx_python', then 'ucxx' targets, and finally 'distributed_ucxx'
"
LIB_BUILD_DIR=${LIB_BUILD_DIR:=${REPODIR}/cpp/build}
UCXX_BUILD_DIR=${REPODIR}/python/build
BUILD_DIRS="${LIB_BUILD_DIR} ${UCXX_BUILD_DIR}"
# Set defaults for vars modified by flags to this script
VERBOSE_FLAG=""
BUILD_TYPE=Release
INSTALL_TARGET=install
BUILD_BENCHMARKS=OFF
BUILD_TESTS=OFF
BUILD_EXAMPLES=OFF
BUILD_DISABLE_DEPRECATION_WARNINGS=ON
BUILD_COMPILE_COMMANDS=OFF
UCXX_ENABLE_PYTHON=OFF
UCXX_ENABLE_RMM=OFF
# Set defaults for vars that may not have been defined externally
# FIXME: if INSTALL_PREFIX is not set, check PREFIX, then check
# CONDA_PREFIX, but there is no fallback from there!
INSTALL_PREFIX=${INSTALL_PREFIX:=${PREFIX:=${CONDA_PREFIX}}}
PARALLEL_LEVEL=${PARALLEL_LEVEL:=$(nproc)}
function hasArg {
(( ${NUMARGS} != 0 )) && (echo " ${ARGS} " | grep -q " $1 ")
}
function cmakeArgs {
# Check for multiple cmake args options
if [[ $(echo $ARGS | { grep -Eo "\-\-cmake\-args" || true; } | wc -l ) -gt 1 ]]; then
echo "Multiple --cmake-args options were provided, please provide only one: ${ARGS}"
exit 1
fi
# Check for cmake args option
if [[ -n $(echo $ARGS | { grep -E "\-\-cmake\-args" || true; } ) ]]; then
# There are possible weird edge cases that may cause this regex filter to output nothing and fail silently
# the true pipe will catch any weird edge cases that may happen and will cause the program to fall back
# on the invalid option error
EXTRA_CMAKE_ARGS=$(echo $ARGS | { grep -Eo "\-\-cmake\-args=\".+\"" || true; })
if [[ -n ${EXTRA_CMAKE_ARGS} ]]; then
# Remove the full EXTRA_CMAKE_ARGS argument from list of args so that it passes validArgs function
ARGS=${ARGS//$EXTRA_CMAKE_ARGS/}
# Filter the full argument down to just the extra string that will be added to cmake call
EXTRA_CMAKE_ARGS=$(echo $EXTRA_CMAKE_ARGS | grep -Eo "\".+\"" | sed -e 's/^"//' -e 's/"$//')
fi
fi
}
function buildAll {
((${NUMARGS} == 0 )) || !(echo " ${ARGS} " | grep -q " [^-]\+ ")
}
if hasArg -h || hasArg --h || hasArg --help; then
echo "${HELP}"
exit 0
fi
# Check for valid usage
if (( ${NUMARGS} != 0 )); then
# Check for cmake args
cmakeArgs
for a in ${ARGS}; do
if ! (echo " ${VALIDARGS} " | grep -q " ${a} "); then
echo "Invalid option or formatting, check --help: ${a}"
exit 1
fi
done
fi
# Process flags
if hasArg -v; then
VERBOSE_FLAG="-v"
fi
if hasArg -g; then
BUILD_TYPE=Debug
fi
if hasArg -n; then
INSTALL_TARGET=""
LIBUCXX_BUILD_DIR=${LIB_BUILD_DIR}
else
LIBUCXX_BUILD_DIR=${CONDA_PREFIX}/lib
fi
if hasArg -c; then
BUILD_COMPILE_COMMANDS=ON
fi
if hasArg benchmarks; then
BUILD_BENCHMARKS=ON
fi
if hasArg tests; then
BUILD_TESTS=ON
fi
if hasArg examples; then
BUILD_EXAMPLES=ON
fi
if hasArg --show_depr_warn; then
BUILD_DISABLE_DEPRECATION_WARNINGS=OFF
fi
if buildAll || hasArg libucxx_python; then
UCXX_ENABLE_PYTHON=ON
UCXX_ENABLE_RMM=ON
fi
# If clean given, run it prior to any other steps
if hasArg clean; then
# If the dirs to clean are mounted dirs in a container, the
# contents should be removed but the mounted dirs will remain.
# The find removes all contents but leaves the dirs, the rmdir
# attempts to remove the dirs but can fail safely.
for bd in ${BUILD_DIRS}; do
if [ -d ${bd} ]; then
find ${bd} -mindepth 1 -delete
rmdir ${bd} || true
fi
done
# Cleaning up python artifacts
find ${REPODIR}/python/ | grep -E "(__pycache__|\.pyc|\.pyo|\.so|\_skbuild$)" | xargs rm -rf
fi
################################################################################
# Configure, build, and install libucxxx
if buildAll || hasArg libucxx; then
CMAKE_GENERATOR="${CMAKE_GENERATOR:-Ninja}"
pwd
cmake -S $REPODIR/cpp -B ${LIB_BUILD_DIR} \
-G${CMAKE_GENERATOR} \
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \
-DBUILD_BENCHMARKS=${BUILD_BENCHMARKS} \
-DBUILD_TESTS=${BUILD_TESTS} \
-DBUILD_EXAMPLES=${BUILD_EXAMPLES} \
-DDISABLE_DEPRECATION_WARNINGS=${BUILD_DISABLE_DEPRECATION_WARNINGS} \
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
-DCMAKE_EXPORT_COMPILE_COMMANDS=${BUILD_COMPILE_COMMANDS} \
-DUCXX_ENABLE_PYTHON=${UCXX_ENABLE_PYTHON} \
-DUCXX_ENABLE_RMM=${UCXX_ENABLE_RMM} \
${EXTRA_CMAKE_ARGS}
cd ${LIB_BUILD_DIR}
compile_start=$(date +%s)
cmake --build . -j${PARALLEL_LEVEL} ${VERBOSE_FLAG}
compile_end=$(date +%s)
compile_total=$(( compile_end - compile_start ))
if [[ ${BUILD_COMPILE_COMMANDS} == "ON" ]]; then
cp compile_commands.json ..
fi
if [[ ${INSTALL_TARGET} != "" ]]; then
cmake --build . -j${PARALLEL_LEVEL} --target install ${VERBOSE_FLAG}
if [[ ${UCXX_ENABLE_PYTHON} == "ON" ]]; then
cmake --install . --component python
fi
if [[ ${BUILD_BENCHMARKS} == "ON" ]]; then
cmake --install . --component benchmarks
fi
if [[ ${BUILD_EXAMPLES} == "ON" ]]; then
cmake --install . --component examples
fi
if [[ ${BUILD_TESTS} == "ON" ]]; then
cmake --install . --component testing
fi
fi
fi
# Append `-DFIND_UCXX_CPP=ON` to EXTRA_CMAKE_ARGS unless a user specified the option.
if [[ "${EXTRA_CMAKE_ARGS}" != *"DFIND_UCXX_CPP"* ]]; then
EXTRA_CMAKE_ARGS="${EXTRA_CMAKE_ARGS} -DFIND_UCXX_CPP=ON"
fi
# Build and install the UCXX Python package
if buildAll || hasArg ucxx; then
cd ${REPODIR}/python/
SKBUILD_CONFIGURE_OPTIONS="-DCMAKE_PREFIX_PATH=${INSTALL_PREFIX} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_LIBRARY_PATH=${LIBCUDF_BUILD_DIR} -DCMAKE_CUDA_ARCHITECTURES=${CUDF_CMAKE_CUDA_ARCHITECTURES} ${EXTRA_CMAKE_ARGS}" \
SKBUILD_BUILD_OPTIONS="-j${PARALLEL_LEVEL:-1}" \
python setup.py build_ext --inplace
if [[ ${INSTALL_TARGET} != "" ]]; then
python setup.py install --single-version-externally-managed --record=record.txt
fi
fi
# Build and install the distributed_ucxx Python package
if buildAll || hasArg distributed_ucxx; then
cd ${REPODIR}/python/distributed-ucxx/
python setup.py build_ext --inplace
if [[ ${INSTALL_TARGET} != "" ]]; then
python setup.py install --single-version-externally-managed --record=record.txt
fi
fi
| 0 |
rapidsai_public_repos | rapidsai_public_repos/ucxx/dependencies.yaml | # Dependency list for https://github.com/rapidsai/dependency-file-generator
files:
all:
output: conda
matrix:
cuda: ["11.8", "12.0"]
arch: [x86_64]
includes:
- build_cpp
- build_python
- checks
- cudatoolkit
- dev
- py_version
- run_python
- test_cpp
- test_python
test_cpp:
output: none
includes:
- cudatoolkit
- test_cpp
test_python:
output: none
includes:
- cudatoolkit
- py_version
- test_python
checks:
output: none
includes:
- checks
- py_version
py_build:
output: pyproject
pyproject_dir: python
extras:
table: build-system
includes:
- build_python
py_run:
output: pyproject
pyproject_dir: python
extras:
table: project
includes:
- run_python
py_test:
output: pyproject
pyproject_dir: python
extras:
table: project.optional-dependencies
key: test
includes:
- test_python
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
build_cpp:
common:
- output_types: conda
packages:
- c-compiler
- cxx-compiler
- &cmake_ver cmake>=3.26.4
- fmt>=9.1.0,<10
- &gmock gmock>=1.13.0
- >est gtest>=1.13.0
- librmm==24.2.*
- ninja
- spdlog>=1.11.0,<1.12
build_python:
common:
- output_types: [conda, requirements, pyproject]
packages:
- *cmake_ver
- cython>=3.0.0
- ninja
- rmm==24.2.*
- scikit-build>=0.13.1
- setuptools
- tomli
- wheel
checks:
common:
- output_types: [conda, requirements]
packages:
- pre-commit
cudatoolkit:
specific:
- output_types: conda
matrices:
- matrix:
cuda: "11.2"
packages:
- cuda-version=11.2
- cudatoolkit
- matrix:
cuda: "11.4"
packages:
- cuda-version=11.4
- cudatoolkit
- matrix:
cuda: "11.5"
packages:
- cuda-version=11.5
- cudatoolkit
- matrix:
cuda: "11.8"
packages:
- cuda-version=11.8
- cudatoolkit
- matrix:
cuda: "12.0"
packages:
- cuda-version=12.0
- cuda-cudart-dev
dev:
common:
- output_types: [conda]
packages:
# These packages are useful for development but not otherwise required to build/run
# RAPIDS
- pip
- dask-cuda==24.2.*
- dask-cudf==24.2.*
# UCX Build
- libtool
- automake
- autoconf
# UCXX Build
- pkg-config
py_version:
specific:
- output_types: conda
matrices:
- matrix:
py: "3.9"
packages:
- python=3.9
- matrix:
py: "3.10"
packages:
- python=3.10
- matrix:
packages:
- python>=3.9,<3.11
run_python:
common:
- output_types: [conda, requirements, pyproject]
packages:
- numpy>=1.21
- pynvml>=11.4.1
- output_types: [conda]
packages:
- ucx
test_cpp:
common:
- output_types: conda
packages:
- *cmake_ver
- *gtest
- *gmock
test_python:
common:
- output_types: [conda, requirements, pyproject]
packages:
- cloudpickle
- cudf==24.2.*
- cupy
- dask
- distributed
- numba>=0.57.1
- pytest
- pytest-asyncio
- pytest-rerunfailures
| 0 |
rapidsai_public_repos | rapidsai_public_repos/ucxx/LICENSE | BSD 3-Clause License
Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 0 |
rapidsai_public_repos | rapidsai_public_repos/ucxx/build_and_run.sh | #!/bin/bash
# SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
NUMARGS=$#
ARGS=$*
# NOTE: ensure all dir changes are relative to the location of this
# script, and that this script resides in the repo dir!
REPODIR=$(cd $(dirname $0); pwd)
VALIDARGS="cpp_tests py_tests cpp_examples py_async_tests py_bench py_async_bench -v -g -n -c --show_depr_warn -h"
HELP="$0 [cpp_tests] [cpp_bench] [cpp_examples] [py_tests] [py_async_tests] [py_bench] [py_async_bench]
cpp_tests - run all C++ tests
cpp_bench - run C++ benchmarks
cpp_example - run C++ example
py_tests - run all Python core tests
py_async_tests - run all Python async tests
py_bench - run Python core benchmarks
py_async_bench - run Python async benchmarks
clean - remove all existing build artifacts and configuration (start
over)
libucxx - build the UCXX C++ module
libucxx_python - build the UCXX C++ Python support module
ucxx - build the ucxx Python package
tests - build tests
-v - verbose build mode
-g - build for debug
-n - no install step
-c - create cpp/compile_commands.json
--show_depr_warn - show cmake deprecation warnings
--cmake-args=\\\"<args>\\\" - pass arbitrary list of CMake configuration options (escape all quotes in argument)
-h | --h[elp] - print this text
default action (no args) is to build (with command below) and run all tests and benchmarks.
./build.sh libucxx libucxx_python ucxx tests
"
BUILD_ARGS=""
RUN_CPP_TESTS=0
RUN_CPP_BENCH=0
RUN_CPP_EXAMPLE=0
RUN_PY_TESTS=0
RUN_PY_ASYNC_TESTS=0
RUN_PY_BENCH=0
RUN_PY_ASYNC_BENCH=0
BINARY_PATH=${CONDA_PREFIX}/bin
function hasArg {
(( ${NUMARGS} != 0 )) && (echo " ${ARGS} " | grep -q " $1 ")
}
function runAll {
((${NUMARGS} == 0 )) || !(echo " ${ARGS} " | grep -q " [^-]\+ ")
}
if hasArg -h || hasArg --h || hasArg --help; then
echo "${HELP}"
exit 0
fi
if hasArg -v; then
BUILD_ARGS="${BUILD_ARGS} -v"
fi
if hasArg -g; then
BUILD_ARGS="${BUILD_ARGS} -g"
fi
if hasArg -n; then
BUILD_ARGS="${BUILD_ARGS} -n"
fi
if hasArg -c; then
BUILD_ARGS="${BUILD_ARGS} -c"
fi
if runAll || hasArg cpp_tests; then
RUN_CPP_TESTS=1
fi
if runAll || hasArg cpp_bench; then
RUN_CPP_BENCH=1
fi
if runAll || hasArg cpp_example; then
RUN_CPP_EXAMPLE=1
fi
if runAll || hasArg py_tests; then
RUN_PY_TESTS=1
fi
if runAll || hasArg py_async_tests; then
RUN_PY_ASYNC_TESTS=1
fi
if runAll || hasArg py_bench; then
RUN_PY_BENCH=1
fi
if runAll || hasArg py_async_bench; then
RUN_PY_ASYNC_BENCH=1
fi
# Exit if a building error occurs
set -e
(cd ${REPODIR}; ./build.sh ${BUILD_ARGS} libucxx libucxx_python ucxx benchmarks tests examples)
# Let all tests run even if they fail
set +e
run_cpp_benchmark() {
PROGRESS_MODE=$1
# UCX_TCP_CM_REUSEADDR=y to be able to bind immediately to the same port before
# `TIME_WAIT` timeout
CMD_LINE_SERVER="UCX_TCP_CM_REUSEADDR=y ${BINARY_PATH}/benchmarks/libucxx/ucxx_perftest -s 8388608 -r -n 20 -m ${PROGRESS_MODE}"
CMD_LINE_CLIENT="${BINARY_PATH}/benchmarks/libucxx/ucxx_perftest -s 8388608 -r -n 20 -m ${PROGRESS_MODE} 127.0.0.1"
echo -e "\e[1mRunning: \n - ${CMD_LINE_SERVER}\n - ${CMD_LINE_CLIENT}\e[0m"
UCX_TCP_CM_REUSEADDR=y ${BINARY_PATH}/benchmarks/libucxx/ucxx_perftest -s 8388608 -r -n 20 -m ${PROGRESS_MODE} &
${BINARY_PATH}/benchmarks/libucxx/ucxx_perftest -s 8388608 -r -n 20 -m ${PROGRESS_MODE} 127.0.0.1
}
run_cpp_example() {
PROGRESS_MODE=$1
# UCX_TCP_CM_REUSEADDR=y to be able to bind immediately to the same port before
# `TIME_WAIT` timeout
CMD_LINE="UCX_TCP_CM_REUSEADDR=y ${BINARY_PATH}/examples/libucxx/ucxx_example_basic -m ${PROGRESS_MODE}"
echo -e "\e[1mRunning: \n - ${CMD_LINE}\e[0m"
UCX_TCP_CM_REUSEADDR=y ${BINARY_PATH}/examples/libucxx/ucxx_example_basic -m ${PROGRESS_MODE}
}
run_tests_async() {
PROGRESS_MODE=$1
ENABLE_DELAYED_SUBMISSION=$2
ENABLE_PYTHON_FUTURE=$3
SKIP=$4
CMD_LINE="UCXPY_PROGRESS_MODE=${PROGRESS_MODE} UCXPY_ENABLE_DELAYED_SUBMISSION=${ENABLE_DELAYED_SUBMISSION} UCXPY_ENABLE_PYTHON_FUTURE=${ENABLE_PYTHON_FUTURE} pytest -vs python/ucxx/_lib_async/tests/"
if [ $SKIP -ne 0 ]; then
echo -e "\e[31;1mSkipping unstable test: ${CMD_LINE}\e[0m"
else
echo -e "\e[1mRunning: ${CMD_LINE}\e[0m"
UCXPY_PROGRESS_MODE=${PROGRESS_MODE} UCXPY_ENABLE_DELAYED_SUBMISSION=${ENABLE_DELAYED_SUBMISSION} UCXPY_ENABLE_PYTHON_FUTURE=${ENABLE_PYTHON_FUTURE} pytest -vs python/ucxx/_lib_async/tests/
fi
}
run_py_benchmark() {
BACKEND=$1
PROGRESS_MODE=$2
ASYNCIO_WAIT=$3
ENABLE_DELAYED_SUBMISSION=$4
ENABLE_PYTHON_FUTURE=$5
N_BUFFERS=$6
SLOW=$7
if [ $ASYNCIO_WAIT -ne 0 ]; then
ASYNCIO_WAIT="--asyncio-wait"
else
ASYNCIO_WAIT=""
fi
CMD_LINE="UCXPY_ENABLE_DELAYED_SUBMISSION=${ENABLE_DELAYED_SUBMISSION} UCXPY_ENABLE_PYTHON_FUTURE=${ENABLE_PYTHON_FUTURE} python -m ucxx.benchmarks.send_recv --backend ${BACKEND} -o cupy --reuse-alloc -d 0 -e 1 -n 8MiB --n-buffers $N_BUFFERS --progress-mode ${PROGRESS_MODE} ${ASYNCIO_WAIT}"
echo -e "\e[1mRunning: ${CMD_LINE}\e[0m"
if [ $SLOW -ne 0 ]; then
echo -e "\e[31;1mSLOW BENCHMARK: it may seem like a deadlock but will eventually complete.\e[0m"
fi
UCXPY_ENABLE_DELAYED_SUBMISSION=${ENABLE_DELAYED_SUBMISSION} UCXPY_ENABLE_PYTHON_FUTURE=${ENABLE_PYTHON_FUTURE} python -m ucxx.benchmarks.send_recv --backend ${BACKEND} -o cupy --reuse-alloc -d 0 -e 1 -n 8MiB --n-buffers $N_BUFFERS --progress-mode ${PROGRESS_MODE} ${ASYNCIO_WAIT}
}
if [[ $RUN_CPP_TESTS != 0 ]]; then
# UCX_TCP_CM_REUSEADDR=y to be able to bind immediately to the same port before
# `TIME_WAIT` timeout
UCX_TCP_CM_REUSEADDR=y ${BINARY_PATH}/gtests/libucxx/UCXX_TEST
fi
if [[ $RUN_CPP_BENCH != 0 ]]; then
# run_cpp_benchmark PROGRESS_MODE
run_cpp_benchmark polling
run_cpp_benchmark blocking
run_cpp_benchmark thread-polling
run_cpp_benchmark thread-blocking
run_cpp_benchmark wait
fi
if [[ $RUN_CPP_EXAMPLE != 0 ]]; then
# run_cpp_example PROGRESS_MODE
run_cpp_example polling
run_cpp_example blocking
run_cpp_example thread-polling
run_cpp_example thread-blocking
run_cpp_example wait
fi
if [[ $RUN_PY_TESTS != 0 ]]; then
echo -e "\e[1mRunning: pytest-vs python/ucxx/_lib/tests/\e[0m"
pytest -vs python/ucxx/_lib/tests/
fi
if [[ $RUN_PY_ASYNC_TESTS != 0 ]]; then
# run_tests_async PROGRESS_MODE ENABLE_DELAYED_SUBMISSION ENABLE_PYTHON_FUTURE SKIP
run_tests_async polling 0 0 0
run_tests_async polling 0 1 0
run_tests_async polling 1 0 1 # Delayed submission can't be used with polling
run_tests_async polling 1 1 1 # Delayed submission can't be used with polling
run_tests_async thread-polling 0 0 0
run_tests_async thread-polling 0 1 0
run_tests_async thread-polling 1 0 0
run_tests_async thread-polling 1 1 0
run_tests_async thread 0 0 0
run_tests_async thread 0 1 0
run_tests_async thread 1 0 0
run_tests_async thread 1 1 0
fi
if [[ $RUN_PY_BENCH != 0 ]]; then
# run_py_benchmark BACKEND PROGRESS_MODE ASYNCIO_WAIT ENABLE_DELAYED_SUBMISSION ENABLE_PYTHON_FUTURE NBUFFERS SLOW
run_py_benchmark ucxx-core blocking 0 0 0 1 0
run_py_benchmark ucxx-core polling 0 0 0 1 0
run_py_benchmark ucxx-core thread-polling 0 0 0 1 0
run_py_benchmark ucxx-core thread-polling 1 0 0 1 0
run_py_benchmark ucxx-core thread 0 0 0 1 0
run_py_benchmark ucxx-core thread 1 0 0 1 0
fi
if [[ $RUN_PY_ASYNC_BENCH != 0 ]]; then
for nbuf in 1 8; do
# run_py_benchmark BACKEND PROGRESS_MODE ASYNCIO_WAIT ENABLE_DELAYED_SUBMISSION ENABLE_PYTHON_FUTURE NBUFFERS SLOW
run_py_benchmark ucxx-async polling 0 0 0 ${nbuf} 0
run_py_benchmark ucxx-async polling 0 0 1 ${nbuf} 0
run_py_benchmark ucxx-async polling 0 1 0 ${nbuf} 0
run_py_benchmark ucxx-async polling 0 1 1 ${nbuf} 0
run_py_benchmark ucxx-async thread-polling 0 0 0 ${nbuf} 0
run_py_benchmark ucxx-async thread-polling 0 0 1 ${nbuf} 0
run_py_benchmark ucxx-async thread-polling 0 1 0 ${nbuf} 0
run_py_benchmark ucxx-async thread-polling 0 1 1 ${nbuf} 0
if [ ${nbuf} -eq 1 ]; then
run_py_benchmark ucxx-async thread 0 0 0 ${nbuf} 1
run_py_benchmark ucxx-async thread 0 0 1 ${nbuf} 1
run_py_benchmark ucxx-async thread 0 1 0 ${nbuf} 1
else
run_py_benchmark ucxx-async thread 0 0 0 ${nbuf} 0
run_py_benchmark ucxx-async thread 0 0 1 ${nbuf} 0
run_py_benchmark ucxx-async thread 0 1 0 ${nbuf} 0
fi
run_py_benchmark ucxx-async thread 0 1 1 ${nbuf} 0
done
fi
| 0 |
rapidsai_public_repos/ucxx | rapidsai_public_repos/ucxx/python/pyproject.toml | # SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
[build-system]
build-backend = "setuptools.build_meta"
requires = [
"cmake>=3.26.4",
"cython>=3.0.0",
"ninja",
"rmm==24.2.*",
"scikit-build>=0.13.1",
"setuptools",
"tomli",
"wheel",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../dependencies.yaml and run `rapids-dependency-file-generator`.
[project]
name = "ucxx"
version = "0.36.00"
description = "Python Bindings for the Unified Communication X library (UCX)"
readme = { file = "README.md", content-type = "text/markdown" }
authors = [
{ name = "NVIDIA Corporation" },
]
license = { text = "BSD-3-Clause" }
requires-python = ">=3.9"
dependencies = [
"numpy>=1.21",
"pynvml>=11.4.1",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../dependencies.yaml and run `rapids-dependency-file-generator`.
classifiers = [
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Hardware",
"Topic :: System :: Systems Administration",
"Programming Language :: Python :: 3",
]
[project.optional-dependencies]
test = [
"cloudpickle",
"cudf==24.2.*",
"cupy",
"dask",
"distributed",
"numba>=0.57.1",
"pytest",
"pytest-asyncio",
"pytest-rerunfailures",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../dependencies.yaml and run `rapids-dependency-file-generator`.
[project.urls]
Homepage = "https://github.com/rapidsai/ucxx"
[tool.isort]
line_length = 79
multi_line_output = 3
include_trailing_comma = true
force_grid_wrap = 0
combine_as_imports = true
order_by_type = true
known_dask = [
"dask",
"distributed",
]
known_rapids = [
"rmm",
"cudf",
]
known_first_party = [
"ucp",
]
default_section = "THIRDPARTY"
sections = [
"FUTURE",
"STDLIB",
"THIRDPARTY",
"DASK",
"RAPIDS",
"FIRSTPARTY",
"LOCALFOLDER",
]
skip = [
".eggs",
".git",
".hg",
".mypy_cache",
".tox",
".venv",
"build",
"dist",
"__init__.py",
]
[tool.setuptools]
license-files = ["LICENSE"]
zip-safe = false
| 0 |
rapidsai_public_repos/ucxx | rapidsai_public_repos/ucxx/python/.flake8 | [flake8]
ignore = E901,E225,E226,E227,E999,E203,W503
exclude =
.eggs,
*.egg,
build,
ucxx/__init__.py
max-line-length = 88
# Ignore black/flake8-pyi conflicts
per-file-ignores =
*.pyi:E301 E302 E704
| 0 |
rapidsai_public_repos/ucxx | rapidsai_public_repos/ucxx/python/CMakeLists.txt | # =================================================================================
# SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD 3-Clause License
# =================================================================================
cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR)
set(ucxx_version 0.36.00)
include(../fetch_rapids.cmake)
project(
ucxx-python
VERSION ${ucxx_version}
LANGUAGES # TODO: Building Python extension modules via the python_extension_module requires the C
# language to be enabled here. The test project that is built in scikit-build to verify
# various linking options for the python library is hardcoded to build with C, so until
# that is fixed we need to keep C.
C CXX
)
option(FIND_UCXX_CPP "Search for existing UCXX C++ installations before defaulting to local files"
OFF
)
# If the user requested it we attempt to find UCXX.
if(FIND_UCXX_CPP)
find_package(ucxx ${ucxx_version} REQUIRED COMPONENTS python)
else()
set(ucxx_FOUND OFF)
endif()
include(rapids-cython)
if(NOT ucxx_FOUND)
set(BUILD_TESTS OFF)
set(BUILD_BENCHMARKS OFF)
set(_exclude_from_all "")
add_subdirectory(../cpp ucxx-cpp ${_exclude_from_all})
# Since ucxx._lib requires access to libucxx, we place the library in the ucxx directory
# and modify the rpaths appropriately.
set(cython_lib_dir ucxx)
install(TARGETS ucxx DESTINATION ${cython_lib_dir})
endif()
rapids_cython_init()
add_subdirectory(ucxx/_lib)
if(DEFINED cython_lib_dir)
rapids_cython_add_rpath_entries(TARGET ucxx PATHS "${cython_lib_dir}")
endif()
| 0 |
rapidsai_public_repos/ucxx | rapidsai_public_repos/ucxx/python/.flake8.cython | #
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
[flake8]
filename = *.pyx, *.pxd
exclude = *.egg, build, docs, .git
ignore = E999, E225, E226, E227, W503, W504, E211
max-line-length = 88
# Rules ignored:
# E999: invalid syntax (works for Python, not Cython)
# E211: whitespace before '(' (used in multi-line imports)
# E225: Missing whitespace around operators (breaks cython casting syntax like <int>)
# E226: Missing whitespace around arithmetic operators (breaks cython pointer syntax like int*)
# E227: Missing whitespace around bitwise or shift operator (Can also break casting syntax)
# W503: line break before binary operator (breaks lines that start with a pointer)
# W504: line break after binary operator (breaks lines that end with a pointer)
| 0 |
rapidsai_public_repos/ucxx | rapidsai_public_repos/ucxx/python/setup.py | # SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
from setuptools import find_packages
from skbuild import setup
packages = find_packages(include=["ucxx*"])
setup(
packages=packages,
package_data={key: ["*.pxd"] for key in packages},
zip_safe=False,
)
| 0 |
rapidsai_public_repos/ucxx/python | rapidsai_public_repos/ucxx/python/distributed-ucxx/pyproject.toml | [build-system]
build-backend = "setuptools.build_meta"
requires = [
"setuptools>=64.0.0",
"tomli ; python_version < '3.11'",
]
[project]
name = "distributed-ucxx"
version = "0.36.00"
description = "UCX communication module for Dask Distributed"
readme = { file = "README.md", content-type = "text/markdown" }
authors = [
{ name = "NVIDIA Corporation" },
]
license = { text = "Apache-2.0" }
requires-python = ">=3.8"
dependencies = [
"dask >=2023.9.2",
"distributed >=2023.9.2",
"numba >=0.54",
]
classifiers = [
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
]
[project.optional-dependencies]
docs = [
"sphinx",
"sphinx-click>=2.7.1",
"sphinx-rtd-theme>=0.5.1",
]
test = [
"pytest",
"numpy",
]
[project.urls]
Homepage = "https://github.com/rapidsai/ucxx"
Documentation = "https://distributed.dask.org/"
Source = "https://github.com/rapidsai/ucxx"
[tool.coverage.run]
disable_warnings = [
"include-ignored",
]
include = [
"distributed_ucxx/*",
]
omit = [
"distributed_ucxx/tests/*",
]
[tool.isort]
line_length = 88
multi_line_output = 3
include_trailing_comma = true
force_grid_wrap = 0
combine_as_imports = true
order_by_type = true
known_dask = [
"dask",
"distributed",
]
known_rapids = [
"rmm",
"cudf",
"ucxx",
]
known_first_party = [
"distributed_ucxx",
]
default_section = "THIRDPARTY"
sections = [
"FUTURE",
"STDLIB",
"THIRDPARTY",
"DASK",
"RAPIDS",
"FIRSTPARTY",
"LOCALFOLDER",
]
skip = [
".eggs",
".git",
".hg",
".mypy_cache",
".tox",
".venv",
"build",
"dist",
"__init__.py",
]
[tool.setuptools]
license-files = ["LICENSE"]
[tool.setuptools.packages.find]
exclude = [
"docs",
"tests",
"docs.*",
"tests.*",
]
[tool.versioneer]
VCS = "git"
style = "pep440"
versionfile_source = "distributed_ucxx/_version.py"
versionfile_build = "distributed_ucxx/_version.py"
tag_prefix = "v"
parentdir_prefix = "distributed_ucxx-"
| 0 |
rapidsai_public_repos/ucxx/python | rapidsai_public_repos/ucxx/python/distributed-ucxx/README.md | # UCX Communication Module for Distributed
This is the UCX communication module for the Distributed framework. It is required to enable UCX communications.
| 0 |
rapidsai_public_repos/ucxx/python | rapidsai_public_repos/ucxx/python/distributed-ucxx/setup.py | # SPDX-FileCopyrightText: Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: BSD-3-Clause
from setuptools import find_packages, setup
setup(
include_package_data=True,
packages=find_packages(exclude=["tests", "tests.*"]),
entry_points={"distributed.comm.backends": ["ucxx=distributed_ucxx:UCXXBackend"]},
zip_safe=False,
)
| 0 |
rapidsai_public_repos/ucxx/python | rapidsai_public_repos/ucxx/python/distributed-ucxx/LICENSE | BSD 3-Clause License
Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 0 |
rapidsai_public_repos/ucxx/python/distributed-ucxx | rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx/utils_test.py | from __future__ import annotations
import asyncio
import logging
import sys
import pytest
from distributed.utils_test import ( # noqa: F401
check_thread_leak,
cleanup,
gen_test as distributed_gen_test,
loop,
loop_in_thread,
)
import ucxx
try:
from pytest_timeout import is_debugging
except ImportError:
def is_debugging() -> bool:
# The pytest_timeout logic is more sophisticated. Not only debuggers
# attach a trace callback but vendoring the entire logic is not worth it
return sys.gettrace() is not None
logger = logging.getLogger(__name__)
logging_levels = {
name: logger.level
for name, logger in logging.root.manager.loggerDict.items()
if isinstance(logger, logging.Logger)
}
def ucxx_exception_handler(event_loop, context):
"""UCX exception handler for `ucxx_loop` during test.
Prints the exception and its message.
Parameters
----------
loop: object
Reference to the running event loop
context: dict
Dictionary containing exception details.
"""
msg = context.get("exception", context["message"])
print(msg)
# Let's make sure that UCX gets time to cancel
# progress tasks before closing the event loop.
@pytest.fixture(scope="function")
def ucxx_loop():
"""Allows UCX to cancel progress tasks before closing event loop.
When UCX tasks are not completed in time (e.g., by unexpected Endpoint
closure), clean up tasks before closing the event loop to prevent unwanted
errors from being raised.
"""
event_loop = asyncio.new_event_loop()
event_loop.set_exception_handler(ucxx_exception_handler)
# Create and reset context before running. The first test that runs during the
# `pytest` process lifetime creates a `_DummyThread` instance which violates
# thread checking from `distributed.utils_test.check_thread_leak()`, if we
# instantiate a and reset a context before `yield loop`, that doesn't fail
# during the `check_thread_leak()` check below.
ucxx.core._get_ctx()
ucxx.reset()
with check_thread_leak():
yield loop
ucxx.reset()
event_loop.close()
# Reset also Distributed's UCX initialization, i.e., revert the effects of
# `distributed.comm.ucx.init_once()`.
import distributed_ucxx
distributed_ucxx.ucxx = None
def gen_test(**kwargs):
assert "clean_kwargs" not in kwargs
return distributed_gen_test(clean_kwargs={"threads": False}, **kwargs)
| 0 |
rapidsai_public_repos/ucxx/python/distributed-ucxx | rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx/distributed_patches.py | from distributed import Scheduler, Worker
from distributed.utils import log_errors
import ucxx
_scheduler_close = Scheduler.close
_worker_close = Worker.close
def _stop_notifier_thread_and_progress_tasks():
ucxx.stop_notifier_thread()
ucxx.core._get_ctx().progress_tasks.clear()
async def _scheduler_close_ucxx(*args, **kwargs):
scheduler = args[0] # args[0] == self
await _scheduler_close(*args, **kwargs)
is_ucxx = any([addr.startswith("ucxx") for addr in scheduler._start_address])
if is_ucxx:
_stop_notifier_thread_and_progress_tasks()
@log_errors
async def _worker_close_ucxx(*args, **kwargs):
# This patch is insufficient for `dask worker` when `--nworkers=1` (default) or
# `--no-nanny` is specified because there's no good way to detect that the
# `distributed.Worker.close()` method should stop the notifier thread.
worker = args[0] # args[0] == self
await _worker_close(*args, **kwargs)
if worker._protocol.startswith("ucxx") and worker.nanny is not None:
_stop_notifier_thread_and_progress_tasks()
Scheduler.close = _scheduler_close_ucxx
Worker.close = _worker_close_ucxx
| 0 |
rapidsai_public_repos/ucxx/python/distributed-ucxx | rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx/ucxx.py | """
:ref:`UCX`_ based communications for distributed.
See :ref:`communications` for more.
.. _UCX: https://github.com/openucx/ucx
"""
from __future__ import annotations
import functools
import logging
import os
import struct
import weakref
from collections.abc import Awaitable, Callable, Collection
from typing import TYPE_CHECKING, Any
from unittest.mock import patch
import dask
from dask.utils import parse_bytes
from distributed.comm.addressing import parse_host_port, unparse_host_port
from distributed.comm.core import Comm, CommClosedError, Connector, Listener
from distributed.comm.registry import Backend
from distributed.comm.utils import ensure_concrete_host, from_frames, to_frames
from distributed.diagnostics.nvml import (
CudaDeviceInfo,
get_device_index_and_uuid,
has_cuda_context,
)
from distributed.protocol.utils import host_array
from distributed.utils import ensure_ip, get_ip, get_ipv6, log_errors, nbytes
logger = logging.getLogger(__name__)
# In order to avoid double init when forking/spawning new processes (multiprocess),
# we make sure only to import and initialize UCXX once at first use. This is also
# required to ensure Dask configuration gets propagated to UCXX, which needs
# variables to be set before being imported.
if TYPE_CHECKING:
try:
import ucxx
except ImportError:
pass
else:
ucxx = None
device_array = None
pre_existing_cuda_context = False
cuda_context_created = False
multi_buffer = None
_warning_suffix = (
"This is often the result of a CUDA-enabled library calling a CUDA runtime "
"function before Dask-CUDA can spawn worker processes. Please make sure any such "
"function calls don't happen at import time or in the global scope of a program."
)
def _get_device_and_uuid_str(device_info: CudaDeviceInfo) -> str:
return f"{device_info.device_index} ({str(device_info.uuid)})"
def _warn_existing_cuda_context(device_info: CudaDeviceInfo, pid: int) -> None:
device_uuid_str = _get_device_and_uuid_str(device_info)
logger.warning(
f"A CUDA context for device {device_uuid_str} already exists "
f"on process ID {pid}. {_warning_suffix}"
)
def _warn_cuda_context_wrong_device(
device_info_expected: CudaDeviceInfo, device_info_actual: CudaDeviceInfo, pid: int
) -> None:
expected_device_uuid_str = _get_device_and_uuid_str(device_info_expected)
actual_device_uuid_str = _get_device_and_uuid_str(device_info_actual)
logger.warning(
f"Worker with process ID {pid} should have a CUDA context assigned to device "
f"{expected_device_uuid_str}, but instead the CUDA context is on device "
f"{actual_device_uuid_str}. {_warning_suffix}"
)
def synchronize_stream(stream=0):
import numba.cuda
ctx = numba.cuda.current_context()
cu_stream = numba.cuda.driver.drvapi.cu_stream(stream)
stream = numba.cuda.driver.Stream(ctx, cu_stream, None)
stream.synchronize()
def init_once():
global ucxx, device_array
global ucx_create_endpoint, ucx_create_listener
global pre_existing_cuda_context, cuda_context_created
global multi_buffer
if ucxx is not None:
return
# remove/process dask.ucx flags for valid ucx options
ucx_config, ucx_environment = _prepare_ucx_config()
# We ensure the CUDA context is created before initializing UCX. This can't
# be safely handled externally because communications in Dask start before
# preload scripts run.
# Precedence:
# 1. external environment
# 2. ucx_config (high level settings passed to ucxx.init)
# 3. ucx_environment (low level settings equivalent to environment variables)
ucx_tls = os.environ.get(
"UCX_TLS",
ucx_config.get("TLS", ucx_environment.get("UCX_TLS", "")),
)
if (
dask.config.get("distributed.comm.ucx.create-cuda-context") is True
# This is not foolproof, if UCX_TLS=all we might require CUDA
# depending on configuration of UCX, but this is better than
# nothing
or ("cuda" in ucx_tls and "^cuda" not in ucx_tls)
):
try:
import numba.cuda
except ImportError:
raise ImportError(
"CUDA support with UCX requires Numba for context management"
)
cuda_visible_device = get_device_index_and_uuid(
os.environ.get("CUDA_VISIBLE_DEVICES", "0").split(",")[0]
)
pre_existing_cuda_context = has_cuda_context()
if pre_existing_cuda_context.has_context:
_warn_existing_cuda_context(
pre_existing_cuda_context.device_info, os.getpid()
)
numba.cuda.current_context()
cuda_context_created = has_cuda_context()
if (
cuda_context_created.has_context
and cuda_context_created.device_info.uuid != cuda_visible_device.uuid
):
_warn_cuda_context_wrong_device(
cuda_visible_device, cuda_context_created.device_info, os.getpid()
)
multi_buffer = dask.config.get("distributed.comm.ucx.multi-buffer", default=False)
import ucxx as _ucxx
ucxx = _ucxx
with patch.dict(os.environ, ucx_environment):
# We carefully ensure that ucx_environment only contains things
# that don't override ucx_config or existing slots in the
# environment, so the user's external environment can safely
# override things here.
ucxx.init(options=ucx_config, env_takes_precedence=True)
pool_size_str = dask.config.get("distributed.rmm.pool-size")
# Find the function, `cuda_array()`, to use when allocating new CUDA arrays
try:
import rmm
def device_array(n):
return rmm.DeviceBuffer(size=n)
if pool_size_str is not None:
pool_size = parse_bytes(pool_size_str)
rmm.reinitialize(
pool_allocator=True, managed_memory=False, initial_pool_size=pool_size
)
except ImportError:
try:
import numba.cuda
def numba_device_array(n):
a = numba.cuda.device_array((n,), dtype="u1")
weakref.finalize(a, numba.cuda.current_context)
return a
device_array = numba_device_array
except ImportError:
def device_array(n):
raise RuntimeError(
"In order to send/recv CUDA arrays, Numba or RMM is required"
)
if pool_size_str is not None:
logger.warning(
"Initial RMM pool size defined, but RMM is not available. "
"Please consider installing RMM or removing the pool size option."
)
def _close_comm(ref):
"""Callback to close Dask Comm when UCX Endpoint closes or errors
Parameters
----------
ref: weak reference to a Dask UCX comm
"""
comm = ref()
if comm is not None:
comm._closed = True
class UCXX(Comm):
"""Comm object using UCXX.
Parameters
----------
ep : ucxx.Endpoint
The UCXX endpoint.
local_address : str
The local address, prefixed with `ucxx://` to use.
peer_address : str
The address of the remote peer, prefixed with `ucxx://` to use.
deserialize : bool, default True
Whether to deserialize data in :meth:`distributed.protocol.loads`
enable_close_callback: : bool, default True
Enable close callback, required so that the endpoint object is notified
when the remote endpoint closed or errored. This is required for proper
lifetime handling of UCX-Py, but should be disabled for UCXX.
Notes
-----
The read-write cycle uses the following pattern:
Each msg is serialized into a number of "data" frames. We prepend these
real frames with two additional frames
1. is_gpus: Boolean indicator for whether the frame should be
received into GPU memory. Packed in '?' format. Unpack with
``<n_frames>?`` format.
2. frame_size : Unsigned int describing the size of frame (in bytes)
to receive. Packed in 'Q' format, so a length-0 frame is equivalent
to an unsized frame. Unpacked with ``<n_frames>Q``.
The expected read cycle is
1. Read the frame describing if connection is closing and number of frames
2. Read the frame describing whether each data frame is gpu-bound
3. Read the frame describing whether each data frame is sized
4. Read all the data frames.
"""
def __init__( # type: ignore[no-untyped-def]
self,
ep,
local_addr: str,
peer_addr: str,
deserialize: bool = True,
enable_close_callback: bool = True,
):
super().__init__(deserialize=deserialize)
self._ep = ep
self._ep_handle = int(self._ep._ep.handle)
if local_addr:
assert local_addr.startswith("ucxx")
assert peer_addr.startswith("ucxx")
self._local_addr = local_addr
self._peer_addr = peer_addr
self.comm_flag = None
if enable_close_callback:
# When the UCX endpoint closes or errors the registered callback
# is called.
ref = weakref.ref(self)
self._ep.set_close_callback(functools.partial(_close_comm, ref))
self._closed = False
self._has_close_callback = True
else:
self._has_close_callback = False
logger.debug("UCX.__init__ %s", self)
@property
def local_address(self) -> str:
return self._local_addr
@property
def peer_address(self) -> str:
return self._peer_addr
@property
def same_host(self) -> bool:
"""Unlike in TCP, local_address can be blank"""
return super().same_host if self._local_addr else False
@log_errors
async def write(
self,
msg: dict,
serializers: Collection[str] | None = None,
on_error: str = "message",
) -> int:
if self.closed():
raise CommClosedError("Endpoint is closed -- unable to send message")
if serializers is None:
serializers = ("cuda", "dask", "pickle", "error")
# msg can also be a list of dicts when sending batched messages
frames = await to_frames(
msg,
serializers=serializers,
on_error=on_error,
allow_offload=self.allow_offload,
)
sizes = tuple(nbytes(f) for f in frames)
try:
if multi_buffer is True:
if any(hasattr(f, "__cuda_array_interface__") for f in frames):
synchronize_stream(0)
close = [struct.pack("?", False)]
await self.ep.send_multi(close + frames)
else:
nframes = len(frames)
cuda_frames = tuple(
hasattr(f, "__cuda_array_interface__") for f in frames
)
cuda_send_frames, send_frames = zip(
*(
(is_cuda, each_frame)
for is_cuda, each_frame in zip(cuda_frames, frames)
if nbytes(each_frame) > 0
)
)
# Send meta data
# Send close flag and number of frames (_Bool, int64)
await self.ep.send(struct.pack("?Q", False, nframes))
# Send which frames are CUDA (bool) and
# how large each frame is (uint64)
await self.ep.send(
struct.pack(nframes * "?" + nframes * "Q", *cuda_frames, *sizes)
)
# Send frames
# It is necessary to first synchronize the default stream before start
# sending We synchronize the default stream because UCX is not
# stream-ordered and syncing the default stream will wait for other
# non-blocking CUDA streams. Note this is only sufficient if the memory
# being sent is not currently in use on non-blocking CUDA streams.
if any(cuda_send_frames):
synchronize_stream(0)
for each_frame in send_frames:
await self.ep.send(each_frame)
return sum(sizes)
except ucxx.exceptions.UCXError:
self.abort()
raise CommClosedError("While writing, the connection was closed")
@log_errors
async def read(self, deserializers=("cuda", "dask", "pickle", "error")):
if deserializers is None:
deserializers = ("cuda", "dask", "pickle", "error")
if multi_buffer is True:
try:
# TODO: We don't know if any frames are CUDA, investigate whether
# we need to synchronize device here.
frames = await self.ep.recv_multi()
shutdown_frame = frames[0]
frames = frames[1:]
(shutdown,) = struct.unpack("?", shutdown_frame)
if shutdown: # The writer is closing the connection
raise CommClosedError("Connection closed by writer")
except BaseException as e:
# In addition to UCX exceptions, may be CancelledError or another
# "low-level" exception. The only safe thing to do is to abort.
# (See also https://github.com/dask/distributed/pull/6574).
self.abort()
raise CommClosedError(
f"Connection closed by writer.\nInner exception: {e!r}"
)
else:
try:
# Recv meta data
# Recv close flag and number of frames (_Bool, int64)
msg = host_array(struct.calcsize("?Q"))
await self.ep.recv(msg)
(shutdown, nframes) = struct.unpack("?Q", msg)
if shutdown: # The writer is closing the connection
raise CommClosedError("Connection closed by writer")
# Recv which frames are CUDA (bool) and
# how large each frame is (uint64)
header_fmt = nframes * "?" + nframes * "Q"
header = host_array(struct.calcsize(header_fmt))
await self.ep.recv(header)
header = struct.unpack(header_fmt, header)
cuda_frames, sizes = header[:nframes], header[nframes:]
except BaseException as e:
# In addition to UCX exceptions, may be CancelledError or another
# "low-level" exception. The only safe thing to do is to abort.
# (See also https://github.com/dask/distributed/pull/6574).
self.abort()
raise CommClosedError(
f"Connection closed by writer.\nInner exception: {e!r}"
)
else:
# Recv frames
frames = [
device_array(each_size) if is_cuda else host_array(each_size)
for is_cuda, each_size in zip(cuda_frames, sizes)
]
cuda_recv_frames, recv_frames = zip(
*(
(is_cuda, each_frame)
for is_cuda, each_frame in zip(cuda_frames, frames)
if nbytes(each_frame) > 0
)
)
# It is necessary to first populate `frames` with CUDA arrays and
# synchronize the default stream before starting receiving to ensure
# buffers have been allocated
if any(cuda_recv_frames):
synchronize_stream(0)
try:
for each_frame in recv_frames:
await self.ep.recv(each_frame)
except BaseException as e:
# In addition to UCX exceptions, may be CancelledError or another
# "low-level" exception. The only safe thing to do is to abort.
# (See also https://github.com/dask/distributed/pull/6574).
self.abort()
raise CommClosedError(
f"Connection closed by writer.\nInner exception: {e!r}"
)
try:
return await from_frames(
frames,
deserialize=self.deserialize,
deserializers=deserializers,
allow_offload=self.allow_offload,
)
except EOFError:
# Frames possibly garbled or truncated by communication error
self.abort()
raise CommClosedError("Aborted stream on truncated data")
async def close(self):
self._closed = True
if self._ep is not None:
try:
if multi_buffer is True:
await self.ep.send_multi([struct.pack("?", True)])
else:
await self.ep.send(struct.pack("?Q", True, 0))
except (
ucxx.exceptions.UCXError,
ucxx.exceptions.UCXCloseError,
ucxx.exceptions.UCXCanceledError,
ucxx.exceptions.UCXConnectionResetError,
):
# If the other end is in the process of closing,
# UCX will sometimes raise a `Input/output` error,
# which we can ignore.
pass
self.abort()
self._ep = None
def abort(self):
self._closed = True
if self._ep is not None:
self._ep.abort()
self._ep = None
def closed(self):
if self._has_close_callback is True:
return self._closed
if self._ep is not None:
# Even if the endpoint has not been closed or aborted by Dask, the lifetime
# of the underlying endpoint may have changed if the remote endpoint has
# disconnected. In that case there may still exist enqueued messages on its
# buffer to be received, even though sending is not possible anymore.
return not self._ep.is_alive()
else:
return True
@property
def ep(self):
if self._ep is not None:
return self._ep
else:
raise CommClosedError("UCX Endpoint is closed")
class UCXXConnector(Connector):
prefix = "ucxx://"
comm_class = UCXX
encrypted = False
async def connect(
self, address: str, deserialize: bool = True, **connection_args: Any
) -> UCXX:
logger.debug("UCXXConnector.connect: %s", address)
ip, port = parse_host_port(address)
init_once()
try:
ep = await ucxx.create_endpoint(ip, port)
except (
ucxx.exceptions.UCXCloseError,
ucxx.exceptions.UCXCanceledError,
ucxx.exceptions.UCXConnectionResetError,
ucxx.exceptions.UCXNotConnectedError,
):
raise CommClosedError("Connection closed before handshake completed")
return self.comm_class(
ep,
local_addr="",
peer_addr=self.prefix + address,
deserialize=deserialize,
)
class UCXXListener(Listener):
prefix = UCXXConnector.prefix
comm_class = UCXXConnector.comm_class
encrypted = UCXXConnector.encrypted
def __init__(
self,
address: str,
comm_handler: Callable[[UCXX], Awaitable[None]] | None = None,
deserialize: bool = False,
allow_offload: bool = True,
**connection_args: Any,
):
if not address.startswith(self.prefix):
address = f"{self.prefix}{address}"
self.ip, self._input_port = parse_host_port(address, default_port=0)
self.comm_handler = comm_handler
self.deserialize = deserialize
self.allow_offload = allow_offload
self._ep = None # type: ucxx.Endpoint
self.ucxx_server = None
self.connection_args = connection_args
@property
def port(self):
return self.ucxx_server.port
@property
def address(self):
return f"{self.prefix}{self.ip}:{self.port}"
async def start(self):
async def serve_forever(client_ep):
ucx = self.comm_class(
client_ep,
local_addr=self.address,
peer_addr=self.address,
deserialize=self.deserialize,
)
ucx.allow_offload = self.allow_offload
try:
await self.on_connection(ucx)
except CommClosedError:
logger.debug("Connection closed before handshake completed")
return
if self.comm_handler:
await self.comm_handler(ucx)
init_once()
self.ucxx_server = ucxx.create_listener(serve_forever, port=self._input_port)
def stop(self):
self.ucxx_server = None
def get_host_port(self):
# TODO: TCP raises if this hasn't started yet.
return self.ip, self.port
@property
def listen_address(self):
return self.prefix + unparse_host_port(*self.get_host_port())
@property
def contact_address(self):
host, port = self.get_host_port()
host = ensure_concrete_host(host) # TODO: ensure_concrete_host
return self.prefix + unparse_host_port(host, port)
@property
def bound_address(self):
# TODO: Does this become part of the base API? Kinda hazy, since
# we exclude in for inproc.
return self.get_host_port()
class UCXXBackend(Backend):
# I / O
def get_connector(self):
return UCXXConnector()
def get_listener(self, loc, handle_comm, deserialize, **connection_args):
return UCXXListener(loc, handle_comm, deserialize, **connection_args)
# Address handling
# This duplicates BaseTCPBackend
def get_address_host(self, loc):
return parse_host_port(loc)[0]
def get_address_host_port(self, loc):
return parse_host_port(loc)
def resolve_address(self, loc):
host, port = parse_host_port(loc)
return unparse_host_port(ensure_ip(host), port)
def get_local_address_for(self, loc):
host, port = parse_host_port(loc)
host = ensure_ip(host)
if ":" in host:
local_host = get_ipv6(host)
else:
local_host = get_ip(host)
return unparse_host_port(local_host, None)
def _prepare_ucx_config():
"""Translate dask config options to appropriate UCX config options
Returns
-------
tuple
Options suitable for passing to ``ucxxp.init`` and additional
UCX options that will be inserted directly into the environment
while calling ``ucxx.init``.
"""
# configuration of UCX can happen in two ways:
# 1) high level on/off flags which correspond to UCX configuration
# 2) explicitly defined UCX configuration flags in distributed.comm.ucx.environment
# High-level settings in (1) are preferred to settings in (2)
# Settings in the external environment override both
high_level_options = {}
# if any of the high level flags are set, as long as they are not Null/None,
# we assume we should configure basic TLS settings for UCX, otherwise we
# leave UCX to its default configuration
if any(
[
dask.config.get("distributed.comm.ucx.tcp"),
dask.config.get("distributed.comm.ucx.nvlink"),
dask.config.get("distributed.comm.ucx.infiniband"),
]
):
if dask.config.get("distributed.comm.ucx.rdmacm"):
tls = "tcp"
tls_priority = "rdmacm"
else:
tls = "tcp"
tls_priority = "tcp"
# CUDA COPY can optionally be used with ucx -- we rely on the user
# to define when messages will include CUDA objects. Note:
# defining only the Infiniband flag will not enable cuda_copy
if any(
[
dask.config.get("distributed.comm.ucx.nvlink"),
dask.config.get("distributed.comm.ucx.cuda-copy"),
]
):
tls = tls + ",cuda_copy"
if dask.config.get("distributed.comm.ucx.infiniband"):
tls = "rc," + tls
if dask.config.get("distributed.comm.ucx.nvlink"):
tls = tls + ",cuda_ipc"
high_level_options = {"TLS": tls, "SOCKADDR_TLS_PRIORITY": tls_priority}
# Pick up any other ucx environment settings
environment_options = {}
for k, v in dask.config.get("distributed.comm.ucx.environment", {}).items():
# {"some-name": value} is translated to {"UCX_SOME_NAME": value}
key = "_".join(map(str.upper, ("UCX", *k.split("-"))))
if (hl_key := key[4:]) in high_level_options:
logger.warning(
f"Ignoring {k}={v} ({key=}) in ucx.environment, "
f"preferring {hl_key}={high_level_options[hl_key]} "
"from high level options"
)
elif key in os.environ:
# This is only info because setting UCX configuration via
# environment variables is a reasonably common approach
logger.info(
f"Ignoring {k}={v} ({key=}) in ucx.environment, "
f"preferring {key}={os.environ[key]} from external environment"
)
else:
environment_options[key] = v
return high_level_options, environment_options
| 0 |
rapidsai_public_repos/ucxx/python/distributed-ucxx | rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx/__init__.py | from .ucxx import UCXXBackend, UCXXConnector, UCXXListener # noqa: F401
from . import distributed_patches # noqa: F401
__version__ = "0.36.00"
| 0 |
rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx | rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx/tests/test_ucxx.py | from __future__ import annotations
import asyncio
import os
from unittest.mock import patch
import pytest
import dask
from distributed import Client, Scheduler, wait
from distributed.comm import connect, listen, parse_address
from distributed.comm.core import CommClosedError
from distributed.comm.registry import get_backend
from distributed.deploy.local import LocalCluster
from distributed.diagnostics.nvml import (
device_get_count,
get_device_index_and_uuid,
get_device_mig_mode,
has_cuda_context,
)
from distributed.protocol import to_serialize
from distributed.utils import wait_for
from distributed.utils_test import inc
import ucxx
import distributed_ucxx # noqa: E402
from distributed_ucxx.utils_test import gen_test
pytestmark = pytest.mark.gpu
try:
HOST = ucxx.get_address()
except Exception:
HOST = "127.0.0.1"
def test_registered(ucxx_loop):
backend = get_backend("ucxx")
assert isinstance(backend, distributed_ucxx.UCXXBackend)
async def get_comm_pair(
listen_addr=f"ucxx://{HOST}", listen_args=None, connect_args=None, **kwargs
):
listen_args = listen_args or {}
connect_args = connect_args or {}
q = asyncio.queues.Queue()
async def handle_comm(comm):
await q.put(comm)
listener = listen(listen_addr, handle_comm, **listen_args, **kwargs)
async with listener:
comm = await connect(listener.contact_address, **connect_args, **kwargs)
serv_comm = await q.get()
return (comm, serv_comm)
@gen_test()
async def test_ping_pong(ucxx_loop):
com, serv_com = await get_comm_pair()
msg = {"op": "ping"}
await com.write(msg)
result = await serv_com.read()
assert result == msg
result["op"] = "pong"
await serv_com.write(result)
result = await com.read()
assert result == {"op": "pong"}
await com.close()
await serv_com.close()
@gen_test()
async def test_comm_objs(ucxx_loop):
comm, serv_comm = await get_comm_pair()
scheme, loc = parse_address(comm.peer_address)
assert scheme == "ucxx"
scheme, loc = parse_address(serv_comm.peer_address)
assert scheme == "ucxx"
assert comm.peer_address == serv_comm.local_address
@gen_test()
async def test_ucxx_specific(ucxx_loop):
"""
Test concrete UCXX API.
"""
# TODO:
# 1. ensure exceptions in handle_comm fail the test
# 2. Use dict in read / write, put seralization there.
# 3. Test peer_address
# 4. Test cleanup
address = f"ucxx://{HOST}:{0}"
async def handle_comm(comm):
msg = await comm.read()
msg["op"] = "pong"
await comm.write(msg)
await comm.read()
await comm.close()
assert comm.closed() is True
listener = await distributed_ucxx.UCXXListener(address, handle_comm)
host, port = listener.get_host_port()
assert host.count(".") == 3
assert port > 0
keys = []
async def client_communicate(key, delay=0):
# addr = "%s:%d" % (host, port)
comm = await connect(listener.contact_address)
# TODO: peer_address
# assert comm.peer_address == 'ucxx://' + addr
assert comm.extra_info == {}
msg = {"op": "ping", "data": key}
await comm.write(msg)
if delay:
await asyncio.sleep(delay)
msg = await comm.read()
assert msg == {"op": "pong", "data": key}
await comm.write({"op": "client closed"})
keys.append(key)
return comm
await client_communicate(key=1234, delay=0.5)
# Many clients at once
N = 2
futures = [client_communicate(key=i, delay=0.05) for i in range(N)]
await asyncio.gather(*futures)
assert set(keys) == {1234} | set(range(N))
listener.stop()
@gen_test()
async def test_ping_pong_data(ucxx_loop):
np = pytest.importorskip("numpy")
data = np.ones((10, 10))
com, serv_com = await get_comm_pair()
msg = {"op": "ping", "data": to_serialize(data)}
await com.write(msg)
result = await serv_com.read()
result["op"] = "pong"
data2 = result.pop("data")
np.testing.assert_array_equal(data2, data)
await serv_com.write(result)
result = await com.read()
assert result == {"op": "pong"}
await com.close()
await serv_com.close()
@gen_test()
async def test_ucxx_deserialize(ucxx_loop):
# Note we see this error on some systems with this test:
# `socket.gaierror: [Errno -5] No address associated with hostname`
# This may be due to a system configuration issue.
from distributed.comm.tests.test_comms import check_deserialize
await check_deserialize("tcp://")
@pytest.mark.parametrize(
"g",
[
lambda cudf: cudf.Series([1, 2, 3]),
lambda cudf: cudf.Series([], dtype=object),
lambda cudf: cudf.DataFrame([], dtype=object),
lambda cudf: cudf.DataFrame([1]).head(0),
lambda cudf: cudf.DataFrame([1.0]).head(0),
lambda cudf: cudf.DataFrame({"a": []}),
lambda cudf: cudf.DataFrame({"a": ["a"]}).head(0),
lambda cudf: cudf.DataFrame({"a": [1.0]}).head(0),
lambda cudf: cudf.DataFrame({"a": [1]}).head(0),
lambda cudf: cudf.DataFrame({"a": [1, 2, None], "b": [1.0, 2.0, None]}),
lambda cudf: cudf.DataFrame({"a": ["Check", "str"], "b": ["Sup", "port"]}),
],
)
@gen_test()
async def test_ping_pong_cudf(ucxx_loop, g):
# if this test appears after cupy an import error arises
# *** ImportError: /usr/lib/x86_64-linux-gnu/libstdc++.so.6: version `CXXABI_1.3.11'
# not found (required by python3.7/site-packages/pyarrow/../../../libarrow.so.12)
cudf = pytest.importorskip("cudf")
from cudf.testing._utils import assert_eq
cudf_obj = g(cudf)
com, serv_com = await get_comm_pair()
msg = {"op": "ping", "data": to_serialize(cudf_obj)}
await com.write(msg)
result = await serv_com.read()
cudf_obj_2 = result.pop("data")
assert result["op"] == "ping"
assert_eq(cudf_obj, cudf_obj_2)
await com.close()
await serv_com.close()
@pytest.mark.parametrize("shape", [(100,), (10, 10), (4947,)])
@gen_test()
async def test_ping_pong_cupy(ucxx_loop, shape):
cupy = pytest.importorskip("cupy")
com, serv_com = await get_comm_pair()
arr = cupy.random.random(shape)
msg = {"op": "ping", "data": to_serialize(arr)}
_, result = await asyncio.gather(com.write(msg), serv_com.read())
data2 = result.pop("data")
assert result["op"] == "ping"
cupy.testing.assert_array_equal(arr, data2)
await com.close()
await serv_com.close()
@pytest.mark.slow
@pytest.mark.parametrize("n", [int(1e9), int(2.5e9)])
@gen_test()
async def test_large_cupy(ucxx_loop, n, cleanup):
cupy = pytest.importorskip("cupy")
com, serv_com = await get_comm_pair()
arr = cupy.ones(n, dtype="u1")
msg = {"op": "ping", "data": to_serialize(arr)}
_, result = await asyncio.gather(com.write(msg), serv_com.read())
data2 = result.pop("data")
assert result["op"] == "ping"
cupy.testing.assert_array_equal(data2, arr)
await com.close()
await serv_com.close()
@gen_test()
async def test_ping_pong_numba(ucxx_loop):
np = pytest.importorskip("numpy")
numba = pytest.importorskip("numba")
import numba.cuda
arr = np.arange(10)
arr = numba.cuda.to_device(arr)
com, serv_com = await get_comm_pair()
msg = {"op": "ping", "data": to_serialize(arr)}
await com.write(msg)
result = await serv_com.read()
data2 = result.pop("data")
np.testing.assert_array_equal(data2, arr)
assert result["op"] == "ping"
@pytest.mark.parametrize("processes", [True, False])
@gen_test()
async def test_ucxx_localcluster(ucxx_loop, processes, cleanup):
async with LocalCluster(
protocol="ucxx",
host=HOST,
dashboard_address=":0",
n_workers=2,
threads_per_worker=1,
processes=processes,
asynchronous=True,
) as cluster:
async with Client(cluster, asynchronous=True) as client:
x = client.submit(inc, 1)
await x
assert x.key in cluster.scheduler.tasks
if not processes:
assert any(w.data == {x.key: 2} for w in cluster.workers.values())
assert len(cluster.scheduler.workers) == 2
@pytest.mark.slow
@gen_test(timeout=60)
async def test_stress(
ucxx_loop,
):
da = pytest.importorskip("dask.array")
chunksize = "10 MB"
async with LocalCluster(
protocol="ucxx",
dashboard_address=":0",
asynchronous=True,
host=HOST,
) as cluster:
async with Client(cluster, asynchronous=True):
rs = da.random.RandomState()
x = rs.random((10000, 10000), chunks=(-1, chunksize))
x = x.persist()
await wait(x)
for _ in range(10):
x = x.rechunk((chunksize, -1))
x = x.rechunk((-1, chunksize))
x = x.persist()
await wait(x)
@gen_test()
async def test_simple(
ucxx_loop,
):
async with LocalCluster(
protocol="ucxx", n_workers=2, threads_per_worker=2, asynchronous=True
) as cluster:
async with Client(cluster, asynchronous=True) as client:
assert cluster.scheduler_address.startswith("ucxx://")
assert await client.submit(lambda x: x + 1, 10) == 11
@pytest.mark.xfail(reason="If running on Docker, requires --pid=host")
@gen_test()
async def test_cuda_context(
ucxx_loop,
):
try:
device_info = get_device_index_and_uuid(
next(
filter(
lambda i: get_device_mig_mode(i)[0] == 0, range(device_get_count())
)
)
)
except StopIteration:
pytest.skip("No CUDA device in non-MIG mode available")
with patch.dict(
os.environ, {"CUDA_VISIBLE_DEVICES": device_info.uuid.decode("utf-8")}
):
with dask.config.set({"distributed.comm.ucx.create-cuda-context": True}):
async with LocalCluster(
protocol="ucxx", n_workers=1, asynchronous=True
) as cluster:
async with Client(cluster, asynchronous=True) as client:
assert cluster.scheduler_address.startswith("ucxx://")
ctx = has_cuda_context()
assert ctx.has_context and ctx.device_info == device_info
worker_cuda_context = await client.run(has_cuda_context)
assert len(worker_cuda_context) == 1
worker_cuda_context = list(worker_cuda_context.values())
assert (
worker_cuda_context[0].has_context
and worker_cuda_context[0].device_info == device_info
)
@gen_test()
async def test_transpose(
ucxx_loop,
):
da = pytest.importorskip("dask.array")
async with LocalCluster(
protocol="ucxx", n_workers=2, threads_per_worker=2, asynchronous=True
) as cluster:
async with Client(cluster, asynchronous=True):
assert cluster.scheduler_address.startswith("ucxx://")
x = da.ones((10000, 10000), chunks=(1000, 1000)).persist()
await x
y = (x + x.T).sum()
await y
@pytest.mark.parametrize("port", [0, 1234])
@gen_test()
async def test_ucxx_protocol(ucxx_loop, cleanup, port):
async with Scheduler(protocol="ucxx", port=port, dashboard_address=":0") as s:
assert s.address.startswith("ucxx://")
@gen_test()
@pytest.mark.skipif(
int(os.environ.get("UCXPY_ENABLE_PYTHON_FUTURE", "1")) != 0,
reason="Segfaults when Python futures are enabled",
)
async def test_ucxx_unreachable(
ucxx_loop,
):
with pytest.raises(OSError, match="Timed out trying to connect to"):
await Client("ucxx://255.255.255.255:12345", timeout=1, asynchronous=True)
@gen_test()
async def test_comm_closed_on_read_error():
reader, writer = await get_comm_pair()
# Depending on the UCP protocol selected, it may raise either
# `asyncio.TimeoutError` or `CommClosedError`, so validate either one.
with pytest.raises((asyncio.TimeoutError, CommClosedError)):
await wait_for(reader.read(), 0.01)
assert reader.closed()
@gen_test()
async def test_embedded_cupy_array(
ucxx_loop,
):
cupy = pytest.importorskip("cupy")
da = pytest.importorskip("dask.array")
async with LocalCluster(
protocol="ucxx", n_workers=1, threads_per_worker=1, asynchronous=True
) as cluster:
async with Client(cluster, asynchronous=True) as client:
assert cluster.scheduler_address.startswith("ucxx://")
a = cupy.arange(10000)
x = da.from_array(a, chunks=(10000,))
b = await client.compute(x)
cupy.testing.assert_array_equal(a, b)
| 0 |
rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx | rapidsai_public_repos/ucxx/python/distributed-ucxx/distributed_ucxx/tests/test_comms.py | import asyncio
import pytest
from distributed.comm import connect, listen, parse_address
from distributed.comm.registry import backends
from distributed_ucxx.utils_test import gen_test
#
# Test communications through the abstract API
#
async def check_client_server(
addr,
check_listen_addr=None,
check_contact_addr=None,
listen_args=None,
connect_args=None,
):
"""
Abstract client / server test.
"""
async def handle_comm(comm):
try:
scheme, loc = parse_address(comm.peer_address)
assert scheme == bound_scheme
msg = await comm.read()
assert msg["op"] == "ping"
msg["op"] = "pong"
await comm.write(msg)
msg = await comm.read()
assert msg["op"] == "foobar"
finally:
await comm.close()
# Arbitrary connection args should be ignored
listen_args = listen_args or {"xxx": "bar"}
connect_args = connect_args or {"xxx": "foo"}
listener = await listen(addr, handle_comm, **listen_args)
# Check listener properties
bound_addr = listener.listen_address
bound_scheme, bound_loc = parse_address(bound_addr)
assert bound_scheme in backends
assert bound_scheme == parse_address(addr)[0]
if check_listen_addr is not None:
check_listen_addr(bound_loc)
contact_addr = listener.contact_address
contact_scheme, contact_loc = parse_address(contact_addr)
assert contact_scheme == bound_scheme
if check_contact_addr is not None:
check_contact_addr(contact_loc)
else:
assert contact_addr == bound_addr
# Check client <-> server comms
keys = []
async def client_communicate(key, delay=0):
comm = await connect(listener.contact_address, **connect_args)
try:
assert comm.peer_address == listener.contact_address
await comm.write({"op": "ping", "data": key})
await comm.write({"op": "foobar"})
if delay:
await asyncio.sleep(delay)
msg = await comm.read()
assert msg == {"op": "pong", "data": key}
keys.append(key)
finally:
await comm.close()
await client_communicate(key=1234)
# Many clients at once
futures = [client_communicate(key=i, delay=0.05) for i in range(20)]
await asyncio.gather(*futures)
assert set(keys) == {1234} | set(range(20))
listener.stop()
@gen_test()
async def test_ucx_client_server(ucxx_loop):
pytest.importorskip("distributed.comm.ucx")
ucxx = pytest.importorskip("ucxx")
addr = ucxx.get_address()
await check_client_server("ucxx://" + addr)
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.