repo_id
stringlengths
21
96
file_path
stringlengths
31
155
content
stringlengths
1
92.9M
__index_level_0__
int64
0
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/cluster/linkage.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // XXX: We allow the instantiation of masked_l2_nn here: // raft::linkage::FixConnectivitiesRedOp<value_idx, value_t> red_op(params.n_row); // raft::linkage::cross_component_nn<value_idx, value_t>( // handle, out_edges, data.data(), colors.data(), params.n_row, params.n_col, red_op); // // TODO: consider adding this to libraft.so or creating an instance in a // separate translation unit for this test. #undef RAFT_EXPLICIT_INSTANTIATE_ONLY #include "../test_utils.cuh" #include <raft/core/resource/cuda_stream.hpp> #include <raft/distance/distance_types.hpp> #include <raft/linalg/transpose.cuh> #include <raft/sparse/coo.hpp> #include <raft/core/device_mdspan.hpp> #include <raft/sparse/hierarchy/single_linkage.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <gtest/gtest.h> #include <vector> namespace raft { using namespace std; template <typename T, typename IdxT> struct LinkageInputs { IdxT n_row; IdxT n_col; std::vector<T> data; std::vector<IdxT> expected_labels; int n_clusters; bool use_knn; int c; }; /** * @brief kernel to calculate the values of a and b * @param firstClusterArray: the array of classes of type T * @param secondClusterArray: the array of classes of type T * @param size: the size of the data points * @param a: number of pairs of points that both the clusters have classified the same * @param b: number of pairs of points that both the clusters have classified differently */ template <typename T, int BLOCK_DIM_X, int BLOCK_DIM_Y> RAFT_KERNEL computeTheNumerator( const T* firstClusterArray, const T* secondClusterArray, uint64_t size, uint64_t* a, uint64_t* b) { // calculating the indices of pairs of datapoints compared by the current thread uint64_t j = threadIdx.x + blockIdx.x * blockDim.x; uint64_t i = threadIdx.y + blockIdx.y * blockDim.y; // thread-local variables to count a and b uint64_t myA = 0, myB = 0; if (i < size && j < size && j < i) { // checking if the pair have been classified the same by both the clusters if (firstClusterArray[i] == firstClusterArray[j] && secondClusterArray[i] == secondClusterArray[j]) { ++myA; } // checking if the pair have been classified differently by both the clusters else if (firstClusterArray[i] != firstClusterArray[j] && secondClusterArray[i] != secondClusterArray[j]) { ++myB; } } // specialize blockReduce for a 2D block of 1024 threads of type uint64_t typedef cub::BlockReduce<uint64_t, BLOCK_DIM_X, cub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y> BlockReduce; // Allocate shared memory for blockReduce __shared__ typename BlockReduce::TempStorage temp_storage; // summing up thread-local counts specific to a block myA = BlockReduce(temp_storage).Sum(myA); __syncthreads(); myB = BlockReduce(temp_storage).Sum(myB); __syncthreads(); // executed once per block if (threadIdx.x == 0 && threadIdx.y == 0) { raft::myAtomicAdd<unsigned long long int>((unsigned long long int*)a, myA); raft::myAtomicAdd<unsigned long long int>((unsigned long long int*)b, myB); } } /** * @brief Function to calculate RandIndex * <a href="https://en.wikipedia.org/wiki/Rand_index">more info on rand index</a> * @param firstClusterArray: the array of classes of type T * @param secondClusterArray: the array of classes of type T * @param size: the size of the data points of type uint64_t * @param stream: the cudaStream object */ template <typename T> double compute_rand_index(T* firstClusterArray, T* secondClusterArray, uint64_t size, cudaStream_t stream) { // rand index for size less than 2 is not defined ASSERT(size >= 2, "Rand Index for size less than 2 not defined!"); // allocating and initializing memory for a and b in the GPU rmm::device_uvector<uint64_t> arr_buf(2, stream); RAFT_CUDA_TRY(cudaMemsetAsync(arr_buf.data(), 0, 2 * sizeof(uint64_t), stream)); // kernel configuration static const int BLOCK_DIM_Y = 16, BLOCK_DIM_X = 16; dim3 numThreadsPerBlock(BLOCK_DIM_X, BLOCK_DIM_Y); dim3 numBlocks(raft::ceildiv<int>(size, numThreadsPerBlock.x), raft::ceildiv<int>(size, numThreadsPerBlock.y)); // calling the kernel computeTheNumerator<T, BLOCK_DIM_X, BLOCK_DIM_Y><<<numBlocks, numThreadsPerBlock, 0, stream>>>( firstClusterArray, secondClusterArray, size, arr_buf.data(), arr_buf.data() + 1); // synchronizing and updating the calculated values of a and b from device to host uint64_t ab_host[2] = {0}; raft::update_host(ab_host, arr_buf.data(), 2, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); // error handling RAFT_CUDA_TRY(cudaGetLastError()); // denominator uint64_t nChooseTwo = size * (size - 1) / 2; // calculating the rand_index return (double)(((double)(ab_host[0] + ab_host[1])) / (double)nChooseTwo); } template <typename T, typename IdxT> ::std::ostream& operator<<(::std::ostream& os, const LinkageInputs<T, IdxT>& dims) { return os; } template <typename T, typename IdxT> class LinkageTest : public ::testing::TestWithParam<LinkageInputs<T, IdxT>> { public: LinkageTest() : params(::testing::TestWithParam<LinkageInputs<T, IdxT>>::GetParam()), labels(0, resource::get_cuda_stream(handle)), labels_ref(0, resource::get_cuda_stream(handle)) { } protected: void basicTest() { auto stream = resource::get_cuda_stream(handle); labels.resize(params.n_row, stream); labels_ref.resize(params.n_row, stream); rmm::device_uvector<T> data(params.n_row * params.n_col, stream); raft::copy(data.data(), params.data.data(), data.size(), stream); raft::copy(labels_ref.data(), params.expected_labels.data(), params.n_row, stream); rmm::device_uvector<IdxT> out_children(params.n_row * 2, stream); auto data_view = raft::make_device_matrix_view<const T, IdxT, row_major>( data.data(), params.n_row, params.n_col); auto dendrogram_view = raft::make_device_matrix_view<IdxT, IdxT, row_major>(out_children.data(), params.n_row, 2); auto labels_view = raft::make_device_vector_view<IdxT, IdxT>(labels.data(), params.n_row); if (params.use_knn) { raft::cluster::hierarchy:: single_linkage<T, IdxT, raft::cluster::hierarchy::LinkageDistance::KNN_GRAPH>( handle, data_view, dendrogram_view, labels_view, raft::distance::DistanceType::L2SqrtExpanded, params.n_clusters, std::make_optional<int>(params.c)); } else { raft::cluster::hierarchy:: single_linkage<T, IdxT, raft::cluster::hierarchy::LinkageDistance::PAIRWISE>( handle, data_view, dendrogram_view, labels_view, raft::distance::DistanceType::L2SqrtExpanded, params.n_clusters, std::make_optional<int>(params.c)); } resource::sync_stream(handle, stream); score = compute_rand_index(labels.data(), labels_ref.data(), params.n_row, stream); } void SetUp() override { basicTest(); } protected: raft::resources handle; LinkageInputs<T, IdxT> params; rmm::device_uvector<IdxT> labels, labels_ref; double score; }; const std::vector<LinkageInputs<float, int>> linkage_inputsf2 = { // Test n_clusters == n_points {10, 5, {0.21390334, 0.50261639, 0.91036676, 0.59166485, 0.71162682, 0.10248392, 0.77782677, 0.43772379, 0.4035871, 0.3282796, 0.47544681, 0.59862974, 0.12319357, 0.06239463, 0.28200272, 0.1345717, 0.50498218, 0.5113505, 0.16233086, 0.62165332, 0.42281548, 0.933117, 0.41386077, 0.23264562, 0.73325968, 0.37537541, 0.70719873, 0.14522645, 0.73279625, 0.9126674, 0.84854131, 0.28890216, 0.85267903, 0.74703138, 0.83842071, 0.34942792, 0.27864171, 0.70911132, 0.21338564, 0.32035554, 0.73788331, 0.46926692, 0.57570162, 0.42559178, 0.87120209, 0.22734951, 0.01847905, 0.75549396, 0.76166195, 0.66613745}, {9, 8, 7, 6, 5, 4, 3, 2, 1, 0}, 10, true, -1}, // // Test outlier points {9, 2, {-1, -50, 3, 4, 5000, 10000, 1, 3, 4, 5, 0.000005, 0.00002, 2000000, 500000, 10, 50, 30, 5}, {6, 0, 5, 0, 0, 4, 3, 2, 1}, 7, true, -1}, // Test n_clusters == (n_points / 2) {10, 5, {0.21390334, 0.50261639, 0.91036676, 0.59166485, 0.71162682, 0.10248392, 0.77782677, 0.43772379, 0.4035871, 0.3282796, 0.47544681, 0.59862974, 0.12319357, 0.06239463, 0.28200272, 0.1345717, 0.50498218, 0.5113505, 0.16233086, 0.62165332, 0.42281548, 0.933117, 0.41386077, 0.23264562, 0.73325968, 0.37537541, 0.70719873, 0.14522645, 0.73279625, 0.9126674, 0.84854131, 0.28890216, 0.85267903, 0.74703138, 0.83842071, 0.34942792, 0.27864171, 0.70911132, 0.21338564, 0.32035554, 0.73788331, 0.46926692, 0.57570162, 0.42559178, 0.87120209, 0.22734951, 0.01847905, 0.75549396, 0.76166195, 0.66613745}, {1, 0, 4, 0, 0, 3, 2, 0, 2, 1}, 5, true, -1}, // Test n_points == 100 {100, 10, {6.26168372e-01, 9.30437651e-01, 6.02450208e-01, 2.73025296e-01, 9.53050619e-01, 3.32164396e-01, 6.88942598e-01, 5.79163537e-01, 6.70341547e-01, 2.70140602e-02, 9.30429671e-01, 7.17721157e-01, 9.89948537e-01, 7.75253347e-01, 1.34491522e-02, 2.48522428e-02, 3.51413378e-01, 7.64405834e-01, 7.86373507e-01, 7.18748577e-01, 8.66998621e-01, 6.80316582e-01, 2.51288712e-01, 4.91078420e-01, 3.76246281e-01, 4.86828710e-01, 5.67464772e-01, 5.30734742e-01, 8.99478296e-01, 7.66699088e-01, 9.49339111e-01, 3.55248484e-01, 9.06046929e-01, 4.48407772e-01, 6.96395305e-01, 2.44277335e-01, 7.74840000e-01, 5.21046603e-01, 4.66423971e-02, 5.12019638e-02, 8.95019614e-01, 5.28956953e-01, 4.31536306e-01, 5.83857744e-01, 4.41787364e-01, 4.68656523e-01, 5.73971433e-01, 6.79989654e-01, 3.19650588e-01, 6.12579596e-01, 6.49126442e-02, 8.39131142e-01, 2.85252117e-01, 5.84848929e-01, 9.46507115e-01, 8.58440748e-01, 3.61528940e-01, 2.44215959e-01, 3.80101125e-01, 4.57128957e-02, 8.82216988e-01, 8.31498633e-01, 7.23474381e-01, 7.75788607e-01, 1.40864146e-01, 6.62092382e-01, 5.13985168e-01, 3.00686418e-01, 8.70109949e-01, 2.43187753e-01, 2.89391938e-01, 2.84214238e-01, 8.70985521e-01, 8.77491176e-01, 6.72537226e-01, 3.30929686e-01, 1.85934324e-01, 9.16222614e-01, 6.18239142e-01, 2.64768597e-01, 5.76145451e-01, 8.62961369e-01, 6.84757925e-01, 7.60549082e-01, 1.27645356e-01, 4.51004673e-01, 3.92292980e-01, 4.63170803e-01, 4.35449330e-02, 2.17583404e-01, 5.71832605e-02, 2.06763039e-01, 3.70116249e-01, 2.09750028e-01, 6.17283019e-01, 8.62549231e-01, 9.84156240e-02, 2.66249156e-01, 3.87635103e-01, 2.85591012e-02, 4.24826068e-01, 4.45795088e-01, 6.86227676e-01, 1.08848960e-01, 5.96731841e-02, 3.71770228e-01, 1.91548833e-01, 6.95136078e-01, 9.00700636e-01, 8.76363105e-01, 2.67334632e-01, 1.80619709e-01, 7.94060419e-01, 1.42854171e-02, 1.09372387e-01, 8.74028108e-01, 6.46403232e-01, 4.86588834e-01, 5.93446175e-02, 6.11886291e-01, 8.83865057e-01, 3.15879821e-01, 2.27043992e-01, 9.76764951e-01, 6.15620336e-01, 9.76199360e-01, 2.40548962e-01, 3.21795663e-01, 8.75087904e-02, 8.11234663e-01, 6.96070480e-01, 8.12062321e-01, 1.21958818e-01, 3.44348628e-02, 8.72630414e-01, 3.06162776e-01, 1.76043529e-02, 9.45894971e-01, 5.33896401e-01, 6.21642973e-01, 4.93062535e-01, 4.48984262e-01, 2.24560379e-01, 4.24052195e-02, 4.43447610e-01, 8.95646149e-01, 6.05220676e-01, 1.81840491e-01, 9.70831206e-01, 2.12563586e-02, 6.92582693e-01, 7.55946922e-01, 7.95086143e-01, 6.05328941e-01, 3.99350764e-01, 4.32846636e-01, 9.81114529e-01, 4.98266428e-01, 6.37127930e-03, 1.59085889e-01, 6.34682067e-05, 5.59429440e-01, 7.38827633e-01, 8.93214770e-01, 2.16494306e-01, 9.35430573e-02, 4.75665868e-02, 7.80503518e-01, 7.86240041e-01, 7.06854594e-01, 2.13725879e-02, 7.68246091e-01, 4.50234808e-01, 5.21231104e-01, 5.01989826e-03, 4.22081572e-02, 1.65337732e-01, 8.54134740e-01, 4.99430262e-01, 8.94525601e-01, 1.14028379e-01, 3.69739861e-01, 1.32955599e-01, 2.65563824e-01, 2.52811151e-01, 1.44792843e-01, 6.88449594e-01, 4.44921417e-01, 8.23296587e-01, 1.93266317e-01, 1.19033309e-01, 1.36368966e-01, 3.42600285e-01, 5.64505195e-01, 5.57594559e-01, 7.44257892e-01, 8.38231569e-02, 4.11548847e-01, 3.21010077e-01, 8.55081359e-01, 4.30105779e-01, 1.16229135e-01, 9.87731964e-02, 3.14712335e-01, 4.50880592e-01, 2.72289598e-01, 6.31615256e-01, 8.97432958e-01, 4.44764250e-01, 8.03776440e-01, 2.68767748e-02, 2.43374608e-01, 4.02141103e-01, 4.98881209e-01, 5.33173003e-01, 8.82890436e-01, 7.16149148e-01, 4.19664401e-01, 2.29335357e-01, 2.88637806e-01, 3.44696803e-01, 6.78171906e-01, 5.69849716e-01, 5.86454477e-01, 3.54474989e-01, 9.03876540e-01, 6.45980000e-01, 6.34887593e-01, 7.88039746e-02, 2.04814126e-01, 7.82251754e-01, 2.43147074e-01, 7.50951808e-01, 1.72799092e-02, 2.95349590e-01, 6.57991826e-01, 8.81214312e-01, 5.73970708e-01, 2.77610881e-01, 1.82155097e-01, 7.69797417e-02, 6.44792402e-01, 9.46950998e-01, 7.73064845e-01, 6.04733624e-01, 5.80094567e-01, 1.67498426e-01, 2.66514296e-01, 6.50140368e-01, 1.91170299e-01, 2.08752199e-01, 3.01664091e-01, 9.85033484e-01, 2.92909152e-01, 8.65816607e-01, 1.85222119e-01, 2.28814559e-01, 1.34286382e-02, 2.89234322e-01, 8.18668708e-01, 4.71706924e-01, 9.23199803e-01, 2.80879188e-01, 1.47319284e-01, 4.13915748e-01, 9.31274932e-02, 6.66322195e-01, 9.66953974e-01, 3.19405786e-01, 6.69486551e-01, 5.03096313e-02, 6.95225201e-01, 5.78469859e-01, 6.29481655e-01, 1.39252534e-01, 1.22564968e-01, 6.80663678e-01, 6.34607157e-01, 6.42765834e-01, 1.57127410e-02, 2.92132086e-01, 5.24423878e-01, 4.68676824e-01, 2.86003928e-01, 7.18608322e-01, 8.95617933e-01, 5.48844309e-01, 1.74517278e-01, 5.24379196e-01, 2.13526524e-01, 5.88375435e-01, 9.88560185e-01, 4.17435771e-01, 6.14438688e-01, 9.53760881e-01, 5.27151288e-01, 7.03017278e-01, 3.44448559e-01, 4.47059676e-01, 2.83414901e-01, 1.98979011e-01, 4.24917361e-01, 5.73172761e-01, 2.32398853e-02, 1.65887230e-01, 4.05552785e-01, 9.29665524e-01, 2.26135696e-01, 9.20563384e-01, 7.65259963e-01, 4.54820075e-01, 8.97710267e-01, 3.78559302e-03, 9.15219382e-01, 3.55705698e-01, 6.94905124e-01, 8.58540202e-01, 3.89790666e-01, 2.49478206e-01, 7.93679304e-01, 4.75830027e-01, 4.40425353e-01, 3.70579459e-01, 1.40578049e-01, 1.70386675e-01, 7.04056121e-01, 4.85963102e-01, 9.68450060e-01, 6.77178001e-01, 2.65934654e-01, 2.58915007e-01, 6.70052890e-01, 2.61945109e-01, 8.46207759e-01, 1.01928951e-01, 2.85611334e-01, 2.45776933e-01, 2.66658783e-01, 3.71724077e-01, 4.34319025e-01, 4.24407347e-01, 7.15417683e-01, 8.07997684e-01, 1.64296275e-01, 6.01638065e-01, 8.60606804e-02, 2.68719187e-01, 5.11764101e-01, 9.75844338e-01, 7.81226782e-01, 2.20925515e-01, 7.18135040e-01, 9.82395577e-01, 8.39160243e-01, 9.08058083e-01, 6.88010677e-01, 8.14271847e-01, 5.12460821e-01, 1.17311345e-01, 5.96075228e-01, 9.17455497e-01, 2.12052706e-01, 7.04074603e-01, 8.72872565e-02, 8.76047818e-01, 6.96235046e-01, 8.54801557e-01, 2.49729159e-01, 9.76594604e-01, 2.87386363e-01, 2.36461559e-02, 9.94075254e-01, 4.25193986e-01, 7.61869994e-01, 5.13334255e-01, 6.44711165e-02, 8.92156689e-01, 3.55235167e-01, 1.08154647e-01, 8.78446825e-01, 2.43833016e-01, 9.23071293e-01, 2.72724115e-01, 9.46631338e-01, 3.74510294e-01, 4.08451278e-02, 9.78392777e-01, 3.65079221e-01, 6.37199516e-01, 5.51144906e-01, 5.25978080e-01, 1.42803678e-01, 4.05451674e-01, 7.79788219e-01, 6.26009784e-01, 3.35249497e-01, 1.43159543e-02, 1.80363779e-01, 5.05096904e-01, 2.82619947e-01, 5.83561392e-01, 3.10951324e-01, 8.73223968e-01, 4.38545619e-01, 4.81348800e-01, 6.68497085e-01, 3.79345401e-01, 9.58832501e-01, 1.89869550e-01, 2.34083070e-01, 2.94066207e-01, 5.74892667e-02, 6.92106828e-02, 9.61127686e-02, 6.72650672e-02, 8.47345378e-01, 2.80916761e-01, 7.32177357e-03, 9.80785961e-01, 5.73192225e-02, 8.48781331e-01, 8.83225408e-01, 7.34398275e-01, 7.70381941e-01, 6.20778343e-01, 8.96822048e-01, 5.40732486e-01, 3.69704071e-01, 5.77305837e-01, 2.08221827e-01, 7.34275341e-01, 1.06110900e-01, 3.49496706e-01, 8.34948910e-01, 1.56403291e-02, 6.78576376e-01, 8.96141268e-01, 5.94835119e-01, 1.43943153e-01, 3.49618530e-01, 2.10440392e-01, 3.46585620e-01, 1.05153093e-01, 3.45446174e-01, 2.72177079e-01, 7.07946300e-01, 4.33717726e-02, 3.31232203e-01, 3.91874320e-01, 4.76338141e-01, 6.22777789e-01, 2.95989228e-02, 4.32855769e-01, 7.61049310e-01, 3.63279149e-01, 9.47210350e-01, 6.43721247e-01, 6.58025802e-01, 1.05247633e-02, 5.29974442e-01, 7.30675767e-01, 4.30041079e-01, 6.62634841e-01, 8.25936616e-01, 9.91253704e-01, 6.79399281e-01, 5.44177006e-01, 7.52876048e-01, 3.32139049e-01, 7.98732398e-01, 7.38865223e-01, 9.16055132e-01, 6.11736493e-01, 9.63672879e-01, 1.83778839e-01, 7.27558919e-02, 5.91602822e-01, 3.25235484e-01, 2.34741217e-01, 9.52346277e-01, 9.18556407e-01, 9.35373324e-01, 6.89209070e-01, 2.56049054e-01, 6.17975395e-01, 7.82285691e-01, 9.84983432e-01, 6.62322741e-01, 2.04144457e-01, 3.98446577e-01, 1.38918297e-01, 3.05919921e-01, 3.14043787e-01, 5.91072666e-01, 7.44703771e-01, 8.92272567e-01, 9.78017873e-01, 9.01203161e-01, 1.41526372e-01, 4.14878484e-01, 6.80683651e-01, 5.01733152e-02, 8.14635389e-01, 2.27926375e-01, 9.03269815e-01, 8.68443745e-01, 9.86939190e-01, 7.40779486e-01, 2.61005311e-01, 3.19276232e-01, 9.69509248e-01, 1.11908818e-01, 4.49198556e-01, 1.27056715e-01, 3.84064823e-01, 5.14591811e-01, 2.10747488e-01, 9.53884090e-01, 8.43167950e-01, 4.51187972e-01, 3.75331782e-01, 6.23566461e-01, 3.55290379e-01, 2.95705968e-01, 1.69622690e-01, 1.42981830e-01, 2.72180991e-01, 9.46468040e-01, 3.70932500e-01, 9.94292830e-01, 4.62587505e-01, 7.14817405e-01, 2.45370540e-02, 3.00906377e-01, 5.75768304e-01, 9.71448393e-01, 6.95574827e-02, 3.93693854e-01, 5.29306116e-01, 5.04694554e-01, 6.73797120e-02, 6.76596969e-01, 5.50948898e-01, 3.24909641e-01, 7.70337719e-01, 6.51842631e-03, 3.03264879e-01, 7.61037886e-03, 2.72289601e-01, 1.50502041e-01, 6.71103888e-02, 7.41503703e-01, 1.92088941e-01, 2.19043977e-01, 9.09320161e-01, 2.37993569e-01, 6.18107973e-02, 8.31447852e-01, 2.23355609e-01, 1.84789435e-01, 4.16104518e-01, 4.21573859e-01, 8.72446305e-02, 2.97294197e-01, 4.50328256e-01, 8.72199917e-01, 2.51279916e-01, 4.86219272e-01, 7.57071329e-01, 4.85655942e-01, 1.06187277e-01, 4.92341327e-01, 1.46017513e-01, 5.25421017e-01, 4.22637906e-01, 2.24685018e-01, 8.72648431e-01, 5.54051490e-01, 1.80745062e-01, 2.12756336e-01, 5.20883169e-01, 7.60363654e-01, 8.30254678e-01, 5.00003328e-01, 4.69017439e-01, 6.38105527e-01, 3.50638261e-02, 5.22217353e-02, 9.06516882e-02, 8.52975842e-01, 1.19985883e-01, 3.74926753e-01, 6.50302066e-01, 1.98875727e-01, 6.28362507e-02, 4.32693501e-01, 3.10500685e-01, 6.20732833e-01, 4.58503272e-01, 3.20790034e-01, 7.91284868e-01, 7.93054570e-01, 2.93406765e-01, 8.95399023e-01, 1.06441034e-01, 7.53085241e-02, 8.67523104e-01, 1.47963482e-01, 1.25584706e-01, 3.81545040e-02, 6.34338619e-01, 1.76368938e-02, 5.75553531e-02, 5.31607516e-01, 2.63869588e-01, 9.41945823e-01, 9.24028838e-02, 5.21496463e-01, 7.74866558e-01, 5.65210610e-01, 7.28015327e-02, 6.51963790e-01, 8.94727453e-01, 4.49571590e-01, 1.29932405e-01, 8.64026259e-01, 9.92599934e-01, 7.43721560e-01, 8.87300215e-01, 1.06369925e-01, 8.11335531e-01, 7.87734900e-01, 9.87344678e-01, 5.32502820e-01, 4.42612382e-01, 9.64041183e-01, 1.66085871e-01, 1.12937664e-01, 5.24423470e-01, 6.54689333e-01, 4.59119726e-01, 5.22774091e-01, 3.08722276e-02, 6.26979315e-01, 4.49754105e-01, 8.07495757e-01, 2.34199499e-01, 1.67765675e-01, 9.22168418e-01, 3.73210378e-01, 8.04432575e-01, 5.61890354e-01, 4.47025593e-01, 6.43155678e-01, 2.40407640e-01, 5.91631279e-01, 1.59369206e-01, 7.75799090e-01, 8.32067212e-01, 5.59791576e-02, 6.39105224e-01, 4.85274738e-01, 2.12630838e-01, 2.81431312e-02, 7.16205363e-01, 6.83885011e-01, 5.23869697e-01, 9.99418314e-01, 8.35331599e-01, 4.69877463e-02, 6.74712562e-01, 7.99273684e-01, 2.77001890e-02, 5.75809742e-01, 2.78513031e-01, 8.36209905e-01, 7.25472379e-01, 4.87173943e-01, 7.88311357e-01, 9.64676177e-01, 1.75752651e-01, 4.98112580e-01, 8.08850418e-02, 6.40981131e-01, 4.06647450e-01, 8.46539387e-01, 2.12620694e-01, 9.11012851e-01, 8.25041445e-01, 8.90065575e-01, 9.63626055e-01, 5.96689242e-01, 1.63372670e-01, 4.51640148e-01, 3.43026542e-01, 5.80658851e-01, 2.82327625e-01, 4.75535418e-01, 6.27760926e-01, 8.46314115e-01, 9.61961932e-01, 3.19806094e-01, 5.05508062e-01, 5.28102944e-01, 6.13045057e-01, 7.44714938e-01, 1.50586073e-01, 7.91878033e-01, 4.89839179e-01, 3.10496849e-01, 8.82309038e-01, 2.86922314e-01, 4.84687559e-01, 5.20838630e-01, 4.62955493e-01, 2.38185305e-01, 5.47259907e-02, 7.10916137e-01, 7.31887202e-01, 6.25602317e-01, 8.77741168e-01, 4.19881322e-01, 4.81222328e-01, 1.28224501e-01, 2.46034010e-01, 3.34971854e-01, 7.37216484e-01, 5.62134821e-02, 7.14089724e-01, 9.85549393e-01, 4.66295827e-01, 3.08722434e-03, 4.70237690e-01, 2.66524167e-01, 7.93875484e-01, 4.54795911e-02, 8.09702944e-01, 1.47709735e-02, 1.70082405e-01, 6.35905179e-01, 3.75379109e-01, 4.30315011e-01, 3.15788760e-01, 5.58065230e-01, 2.24643800e-01, 2.42142981e-01, 6.57283636e-01, 3.34921891e-01, 1.26588975e-01, 7.68064155e-01, 9.43856291e-01, 4.47518596e-01, 5.44453573e-01, 9.95764932e-01, 7.16444391e-01, 8.51019765e-01, 1.01179183e-01, 4.45473958e-01, 4.60327322e-01, 4.96895844e-02, 4.72907738e-01, 5.58987444e-01, 3.41027487e-01, 1.56175026e-01, 7.58283148e-01, 6.83600909e-01, 2.14623396e-01, 3.27348880e-01, 3.92517893e-01, 6.70418431e-01, 5.16440832e-01, 8.63140348e-01, 5.73277464e-01, 3.46608058e-01, 7.39396341e-01, 7.20852434e-01, 2.35653246e-02, 3.89935659e-01, 7.53783745e-01, 6.34563528e-01, 8.79339335e-01, 7.41599159e-02, 5.62433904e-01, 6.15553852e-01, 4.56956324e-01, 5.20047447e-01, 5.26845015e-02, 5.58471266e-01, 1.63632233e-01, 5.38936665e-02, 6.49593683e-01, 2.56838748e-01, 8.99035326e-01, 7.20847756e-01, 5.68954684e-01, 7.43684755e-01, 5.70924238e-01, 3.82318724e-01, 4.89328290e-01, 5.62208561e-01, 4.97540804e-02, 4.18011085e-01, 6.88041565e-01, 2.16234653e-01, 7.89548214e-01, 8.46136387e-01, 8.46816189e-01, 1.73842353e-01, 6.11627842e-02, 8.44440559e-01, 4.50646654e-01, 3.74785037e-01, 4.87196697e-01, 4.56276448e-01, 9.13284391e-01, 4.15715464e-01, 7.13597697e-01, 1.23641270e-02, 5.10031271e-01, 4.74601930e-02, 2.55731159e-01, 3.22090006e-01, 1.91165703e-01, 4.51170940e-01, 7.50843157e-01, 4.42420576e-01, 4.25380660e-01, 4.50667257e-01, 6.55689206e-01, 9.68257670e-02, 1.96528793e-01, 8.97343028e-01, 4.99940904e-01, 6.65504083e-01, 9.41828079e-01, 4.54397338e-01, 5.61893331e-01, 5.09839880e-01, 4.53117514e-01, 8.96804127e-02, 1.74888861e-01, 6.65641378e-01, 2.81668336e-01, 1.89532742e-01, 5.61668382e-01, 8.68330157e-02, 8.25092797e-01, 5.18106324e-01, 1.71904024e-01, 3.68385523e-01, 1.62005436e-01, 7.48507399e-01, 9.30274827e-01, 2.38198517e-01, 9.52222901e-01, 5.23587800e-01, 6.94384557e-01, 1.09338652e-01, 4.83356794e-01, 2.73050402e-01, 3.68027050e-01, 5.92366466e-01, 1.83192289e-01, 8.60376029e-01, 7.13926203e-01, 8.16750052e-01, 1.57890291e-01, 6.25691951e-01, 5.24831646e-01, 1.73873797e-01, 1.02429784e-01, 9.17488471e-01, 4.03584434e-01, 9.31170884e-01, 2.79386137e-01, 8.77745206e-01, 2.45200576e-01, 1.28896951e-01, 3.15713052e-01, 5.27874291e-01, 2.16444335e-01, 7.03883817e-01, 7.74738919e-02, 8.42422142e-01, 3.75598924e-01, 3.51002411e-01, 6.22752776e-01, 4.82407943e-01, 7.43107867e-01, 9.46182666e-01, 9.44344819e-01, 3.28124763e-01, 1.06147431e-01, 1.65102684e-01, 3.84060507e-01, 2.91057722e-01, 7.68173662e-02, 1.03543651e-01, 6.76698940e-01, 1.43141994e-01, 7.21342202e-01, 6.69471294e-03, 9.07298311e-01, 5.57080171e-01, 8.10954489e-01, 4.11120526e-01, 2.06407453e-01, 2.59590556e-01, 7.58512718e-01, 5.79873897e-01, 2.92875650e-01, 2.83686529e-01, 2.42829343e-01, 9.19323719e-01, 3.46832864e-01, 3.58238858e-01, 7.42827585e-01, 2.05760059e-01, 9.58438860e-01, 5.66326411e-01, 6.60292846e-01, 5.61095078e-02, 6.79465531e-01, 7.05118513e-01, 4.44713264e-01, 2.09732933e-01, 5.22732436e-01, 1.74396512e-01, 5.29356748e-01, 4.38475687e-01, 4.94036404e-01, 4.09785794e-01, 6.40025507e-01, 5.79371821e-01, 1.57726118e-01, 6.04572263e-01, 5.41072639e-01, 5.18847173e-01, 1.97093284e-01, 8.91767002e-01, 4.29050835e-01, 8.25490570e-01, 3.87699807e-01, 4.50705808e-01, 2.49371643e-01, 3.36074898e-01, 9.29925118e-01, 6.65393649e-01, 9.07275994e-01, 3.73075859e-01, 4.14044139e-03, 2.37463702e-01, 2.25893784e-01, 2.46900245e-01, 4.50350196e-01, 3.48618117e-01, 5.07193932e-01, 5.23435142e-01, 8.13611417e-01, 8.92715622e-01, 1.02623450e-01, 3.06088345e-01, 7.80461650e-01, 2.21453645e-01, 2.01419652e-01, 2.84254457e-01, 3.68286735e-01, 7.39358243e-01, 8.97879394e-01, 9.81599566e-01, 7.56526442e-01, 7.37645545e-01, 4.23976657e-02, 8.25922012e-01, 2.60956996e-01, 2.90702065e-01, 8.98388344e-01, 3.03733299e-01, 8.49071471e-01, 3.45835425e-01, 7.65458276e-01, 5.68094872e-01, 8.93770930e-01, 9.93161641e-01, 5.63368667e-02, 4.26548945e-01, 5.46745780e-01, 5.75674571e-01, 7.94599487e-01, 7.18935553e-02, 4.46492976e-01, 6.40240123e-01, 2.73246969e-01, 2.00465968e-01, 1.30718835e-01, 1.92492005e-01, 1.96617189e-01, 6.61271644e-01, 8.12687657e-01, 8.66342445e-01 }, {0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, true, -4}, {10, 5, {0.21390334, 0.50261639, 0.91036676, 0.59166485, 0.71162682, 0.10248392, 0.77782677, 0.43772379, 0.4035871, 0.3282796, 0.47544681, 0.59862974, 0.12319357, 0.06239463, 0.28200272, 0.1345717, 0.50498218, 0.5113505, 0.16233086, 0.62165332, 0.42281548, 0.933117, 0.41386077, 0.23264562, 0.73325968, 0.37537541, 0.70719873, 0.14522645, 0.73279625, 0.9126674, 0.84854131, 0.28890216, 0.85267903, 0.74703138, 0.83842071, 0.34942792, 0.27864171, 0.70911132, 0.21338564, 0.32035554, 0.73788331, 0.46926692, 0.57570162, 0.42559178, 0.87120209, 0.22734951, 0.01847905, 0.75549396, 0.76166195, 0.66613745}, {9, 8, 7, 6, 5, 4, 3, 2, 1, 0}, 10, false, 5}, // Test outlier points {9, 2, {-1, -50, 3, 4, 5000, 10000, 1, 3, 4, 5, 0.000005, 0.00002, 2000000, 500000, 10, 50, 30, 5}, {6, 0, 5, 0, 0, 4, 3, 2, 1}, 7, false, 5}, // Test n_clusters == (n_points / 2) {10, 5, {0.21390334, 0.50261639, 0.91036676, 0.59166485, 0.71162682, 0.10248392, 0.77782677, 0.43772379, 0.4035871, 0.3282796, 0.47544681, 0.59862974, 0.12319357, 0.06239463, 0.28200272, 0.1345717, 0.50498218, 0.5113505, 0.16233086, 0.62165332, 0.42281548, 0.933117, 0.41386077, 0.23264562, 0.73325968, 0.37537541, 0.70719873, 0.14522645, 0.73279625, 0.9126674, 0.84854131, 0.28890216, 0.85267903, 0.74703138, 0.83842071, 0.34942792, 0.27864171, 0.70911132, 0.21338564, 0.32035554, 0.73788331, 0.46926692, 0.57570162, 0.42559178, 0.87120209, 0.22734951, 0.01847905, 0.75549396, 0.76166195, 0.66613745}, {1, 0, 4, 0, 0, 3, 2, 0, 2, 1}, 5, false, 10}, // Test n_points == 100 {100, 10, {6.26168372e-01, 9.30437651e-01, 6.02450208e-01, 2.73025296e-01, 9.53050619e-01, 3.32164396e-01, 6.88942598e-01, 5.79163537e-01, 6.70341547e-01, 2.70140602e-02, 9.30429671e-01, 7.17721157e-01, 9.89948537e-01, 7.75253347e-01, 1.34491522e-02, 2.48522428e-02, 3.51413378e-01, 7.64405834e-01, 7.86373507e-01, 7.18748577e-01, 8.66998621e-01, 6.80316582e-01, 2.51288712e-01, 4.91078420e-01, 3.76246281e-01, 4.86828710e-01, 5.67464772e-01, 5.30734742e-01, 8.99478296e-01, 7.66699088e-01, 9.49339111e-01, 3.55248484e-01, 9.06046929e-01, 4.48407772e-01, 6.96395305e-01, 2.44277335e-01, 7.74840000e-01, 5.21046603e-01, 4.66423971e-02, 5.12019638e-02, 8.95019614e-01, 5.28956953e-01, 4.31536306e-01, 5.83857744e-01, 4.41787364e-01, 4.68656523e-01, 5.73971433e-01, 6.79989654e-01, 3.19650588e-01, 6.12579596e-01, 6.49126442e-02, 8.39131142e-01, 2.85252117e-01, 5.84848929e-01, 9.46507115e-01, 8.58440748e-01, 3.61528940e-01, 2.44215959e-01, 3.80101125e-01, 4.57128957e-02, 8.82216988e-01, 8.31498633e-01, 7.23474381e-01, 7.75788607e-01, 1.40864146e-01, 6.62092382e-01, 5.13985168e-01, 3.00686418e-01, 8.70109949e-01, 2.43187753e-01, 2.89391938e-01, 2.84214238e-01, 8.70985521e-01, 8.77491176e-01, 6.72537226e-01, 3.30929686e-01, 1.85934324e-01, 9.16222614e-01, 6.18239142e-01, 2.64768597e-01, 5.76145451e-01, 8.62961369e-01, 6.84757925e-01, 7.60549082e-01, 1.27645356e-01, 4.51004673e-01, 3.92292980e-01, 4.63170803e-01, 4.35449330e-02, 2.17583404e-01, 5.71832605e-02, 2.06763039e-01, 3.70116249e-01, 2.09750028e-01, 6.17283019e-01, 8.62549231e-01, 9.84156240e-02, 2.66249156e-01, 3.87635103e-01, 2.85591012e-02, 4.24826068e-01, 4.45795088e-01, 6.86227676e-01, 1.08848960e-01, 5.96731841e-02, 3.71770228e-01, 1.91548833e-01, 6.95136078e-01, 9.00700636e-01, 8.76363105e-01, 2.67334632e-01, 1.80619709e-01, 7.94060419e-01, 1.42854171e-02, 1.09372387e-01, 8.74028108e-01, 6.46403232e-01, 4.86588834e-01, 5.93446175e-02, 6.11886291e-01, 8.83865057e-01, 3.15879821e-01, 2.27043992e-01, 9.76764951e-01, 6.15620336e-01, 9.76199360e-01, 2.40548962e-01, 3.21795663e-01, 8.75087904e-02, 8.11234663e-01, 6.96070480e-01, 8.12062321e-01, 1.21958818e-01, 3.44348628e-02, 8.72630414e-01, 3.06162776e-01, 1.76043529e-02, 9.45894971e-01, 5.33896401e-01, 6.21642973e-01, 4.93062535e-01, 4.48984262e-01, 2.24560379e-01, 4.24052195e-02, 4.43447610e-01, 8.95646149e-01, 6.05220676e-01, 1.81840491e-01, 9.70831206e-01, 2.12563586e-02, 6.92582693e-01, 7.55946922e-01, 7.95086143e-01, 6.05328941e-01, 3.99350764e-01, 4.32846636e-01, 9.81114529e-01, 4.98266428e-01, 6.37127930e-03, 1.59085889e-01, 6.34682067e-05, 5.59429440e-01, 7.38827633e-01, 8.93214770e-01, 2.16494306e-01, 9.35430573e-02, 4.75665868e-02, 7.80503518e-01, 7.86240041e-01, 7.06854594e-01, 2.13725879e-02, 7.68246091e-01, 4.50234808e-01, 5.21231104e-01, 5.01989826e-03, 4.22081572e-02, 1.65337732e-01, 8.54134740e-01, 4.99430262e-01, 8.94525601e-01, 1.14028379e-01, 3.69739861e-01, 1.32955599e-01, 2.65563824e-01, 2.52811151e-01, 1.44792843e-01, 6.88449594e-01, 4.44921417e-01, 8.23296587e-01, 1.93266317e-01, 1.19033309e-01, 1.36368966e-01, 3.42600285e-01, 5.64505195e-01, 5.57594559e-01, 7.44257892e-01, 8.38231569e-02, 4.11548847e-01, 3.21010077e-01, 8.55081359e-01, 4.30105779e-01, 1.16229135e-01, 9.87731964e-02, 3.14712335e-01, 4.50880592e-01, 2.72289598e-01, 6.31615256e-01, 8.97432958e-01, 4.44764250e-01, 8.03776440e-01, 2.68767748e-02, 2.43374608e-01, 4.02141103e-01, 4.98881209e-01, 5.33173003e-01, 8.82890436e-01, 7.16149148e-01, 4.19664401e-01, 2.29335357e-01, 2.88637806e-01, 3.44696803e-01, 6.78171906e-01, 5.69849716e-01, 5.86454477e-01, 3.54474989e-01, 9.03876540e-01, 6.45980000e-01, 6.34887593e-01, 7.88039746e-02, 2.04814126e-01, 7.82251754e-01, 2.43147074e-01, 7.50951808e-01, 1.72799092e-02, 2.95349590e-01, 6.57991826e-01, 8.81214312e-01, 5.73970708e-01, 2.77610881e-01, 1.82155097e-01, 7.69797417e-02, 6.44792402e-01, 9.46950998e-01, 7.73064845e-01, 6.04733624e-01, 5.80094567e-01, 1.67498426e-01, 2.66514296e-01, 6.50140368e-01, 1.91170299e-01, 2.08752199e-01, 3.01664091e-01, 9.85033484e-01, 2.92909152e-01, 8.65816607e-01, 1.85222119e-01, 2.28814559e-01, 1.34286382e-02, 2.89234322e-01, 8.18668708e-01, 4.71706924e-01, 9.23199803e-01, 2.80879188e-01, 1.47319284e-01, 4.13915748e-01, 9.31274932e-02, 6.66322195e-01, 9.66953974e-01, 3.19405786e-01, 6.69486551e-01, 5.03096313e-02, 6.95225201e-01, 5.78469859e-01, 6.29481655e-01, 1.39252534e-01, 1.22564968e-01, 6.80663678e-01, 6.34607157e-01, 6.42765834e-01, 1.57127410e-02, 2.92132086e-01, 5.24423878e-01, 4.68676824e-01, 2.86003928e-01, 7.18608322e-01, 8.95617933e-01, 5.48844309e-01, 1.74517278e-01, 5.24379196e-01, 2.13526524e-01, 5.88375435e-01, 9.88560185e-01, 4.17435771e-01, 6.14438688e-01, 9.53760881e-01, 5.27151288e-01, 7.03017278e-01, 3.44448559e-01, 4.47059676e-01, 2.83414901e-01, 1.98979011e-01, 4.24917361e-01, 5.73172761e-01, 2.32398853e-02, 1.65887230e-01, 4.05552785e-01, 9.29665524e-01, 2.26135696e-01, 9.20563384e-01, 7.65259963e-01, 4.54820075e-01, 8.97710267e-01, 3.78559302e-03, 9.15219382e-01, 3.55705698e-01, 6.94905124e-01, 8.58540202e-01, 3.89790666e-01, 2.49478206e-01, 7.93679304e-01, 4.75830027e-01, 4.40425353e-01, 3.70579459e-01, 1.40578049e-01, 1.70386675e-01, 7.04056121e-01, 4.85963102e-01, 9.68450060e-01, 6.77178001e-01, 2.65934654e-01, 2.58915007e-01, 6.70052890e-01, 2.61945109e-01, 8.46207759e-01, 1.01928951e-01, 2.85611334e-01, 2.45776933e-01, 2.66658783e-01, 3.71724077e-01, 4.34319025e-01, 4.24407347e-01, 7.15417683e-01, 8.07997684e-01, 1.64296275e-01, 6.01638065e-01, 8.60606804e-02, 2.68719187e-01, 5.11764101e-01, 9.75844338e-01, 7.81226782e-01, 2.20925515e-01, 7.18135040e-01, 9.82395577e-01, 8.39160243e-01, 9.08058083e-01, 6.88010677e-01, 8.14271847e-01, 5.12460821e-01, 1.17311345e-01, 5.96075228e-01, 9.17455497e-01, 2.12052706e-01, 7.04074603e-01, 8.72872565e-02, 8.76047818e-01, 6.96235046e-01, 8.54801557e-01, 2.49729159e-01, 9.76594604e-01, 2.87386363e-01, 2.36461559e-02, 9.94075254e-01, 4.25193986e-01, 7.61869994e-01, 5.13334255e-01, 6.44711165e-02, 8.92156689e-01, 3.55235167e-01, 1.08154647e-01, 8.78446825e-01, 2.43833016e-01, 9.23071293e-01, 2.72724115e-01, 9.46631338e-01, 3.74510294e-01, 4.08451278e-02, 9.78392777e-01, 3.65079221e-01, 6.37199516e-01, 5.51144906e-01, 5.25978080e-01, 1.42803678e-01, 4.05451674e-01, 7.79788219e-01, 6.26009784e-01, 3.35249497e-01, 1.43159543e-02, 1.80363779e-01, 5.05096904e-01, 2.82619947e-01, 5.83561392e-01, 3.10951324e-01, 8.73223968e-01, 4.38545619e-01, 4.81348800e-01, 6.68497085e-01, 3.79345401e-01, 9.58832501e-01, 1.89869550e-01, 2.34083070e-01, 2.94066207e-01, 5.74892667e-02, 6.92106828e-02, 9.61127686e-02, 6.72650672e-02, 8.47345378e-01, 2.80916761e-01, 7.32177357e-03, 9.80785961e-01, 5.73192225e-02, 8.48781331e-01, 8.83225408e-01, 7.34398275e-01, 7.70381941e-01, 6.20778343e-01, 8.96822048e-01, 5.40732486e-01, 3.69704071e-01, 5.77305837e-01, 2.08221827e-01, 7.34275341e-01, 1.06110900e-01, 3.49496706e-01, 8.34948910e-01, 1.56403291e-02, 6.78576376e-01, 8.96141268e-01, 5.94835119e-01, 1.43943153e-01, 3.49618530e-01, 2.10440392e-01, 3.46585620e-01, 1.05153093e-01, 3.45446174e-01, 2.72177079e-01, 7.07946300e-01, 4.33717726e-02, 3.31232203e-01, 3.91874320e-01, 4.76338141e-01, 6.22777789e-01, 2.95989228e-02, 4.32855769e-01, 7.61049310e-01, 3.63279149e-01, 9.47210350e-01, 6.43721247e-01, 6.58025802e-01, 1.05247633e-02, 5.29974442e-01, 7.30675767e-01, 4.30041079e-01, 6.62634841e-01, 8.25936616e-01, 9.91253704e-01, 6.79399281e-01, 5.44177006e-01, 7.52876048e-01, 3.32139049e-01, 7.98732398e-01, 7.38865223e-01, 9.16055132e-01, 6.11736493e-01, 9.63672879e-01, 1.83778839e-01, 7.27558919e-02, 5.91602822e-01, 3.25235484e-01, 2.34741217e-01, 9.52346277e-01, 9.18556407e-01, 9.35373324e-01, 6.89209070e-01, 2.56049054e-01, 6.17975395e-01, 7.82285691e-01, 9.84983432e-01, 6.62322741e-01, 2.04144457e-01, 3.98446577e-01, 1.38918297e-01, 3.05919921e-01, 3.14043787e-01, 5.91072666e-01, 7.44703771e-01, 8.92272567e-01, 9.78017873e-01, 9.01203161e-01, 1.41526372e-01, 4.14878484e-01, 6.80683651e-01, 5.01733152e-02, 8.14635389e-01, 2.27926375e-01, 9.03269815e-01, 8.68443745e-01, 9.86939190e-01, 7.40779486e-01, 2.61005311e-01, 3.19276232e-01, 9.69509248e-01, 1.11908818e-01, 4.49198556e-01, 1.27056715e-01, 3.84064823e-01, 5.14591811e-01, 2.10747488e-01, 9.53884090e-01, 8.43167950e-01, 4.51187972e-01, 3.75331782e-01, 6.23566461e-01, 3.55290379e-01, 2.95705968e-01, 1.69622690e-01, 1.42981830e-01, 2.72180991e-01, 9.46468040e-01, 3.70932500e-01, 9.94292830e-01, 4.62587505e-01, 7.14817405e-01, 2.45370540e-02, 3.00906377e-01, 5.75768304e-01, 9.71448393e-01, 6.95574827e-02, 3.93693854e-01, 5.29306116e-01, 5.04694554e-01, 6.73797120e-02, 6.76596969e-01, 5.50948898e-01, 3.24909641e-01, 7.70337719e-01, 6.51842631e-03, 3.03264879e-01, 7.61037886e-03, 2.72289601e-01, 1.50502041e-01, 6.71103888e-02, 7.41503703e-01, 1.92088941e-01, 2.19043977e-01, 9.09320161e-01, 2.37993569e-01, 6.18107973e-02, 8.31447852e-01, 2.23355609e-01, 1.84789435e-01, 4.16104518e-01, 4.21573859e-01, 8.72446305e-02, 2.97294197e-01, 4.50328256e-01, 8.72199917e-01, 2.51279916e-01, 4.86219272e-01, 7.57071329e-01, 4.85655942e-01, 1.06187277e-01, 4.92341327e-01, 1.46017513e-01, 5.25421017e-01, 4.22637906e-01, 2.24685018e-01, 8.72648431e-01, 5.54051490e-01, 1.80745062e-01, 2.12756336e-01, 5.20883169e-01, 7.60363654e-01, 8.30254678e-01, 5.00003328e-01, 4.69017439e-01, 6.38105527e-01, 3.50638261e-02, 5.22217353e-02, 9.06516882e-02, 8.52975842e-01, 1.19985883e-01, 3.74926753e-01, 6.50302066e-01, 1.98875727e-01, 6.28362507e-02, 4.32693501e-01, 3.10500685e-01, 6.20732833e-01, 4.58503272e-01, 3.20790034e-01, 7.91284868e-01, 7.93054570e-01, 2.93406765e-01, 8.95399023e-01, 1.06441034e-01, 7.53085241e-02, 8.67523104e-01, 1.47963482e-01, 1.25584706e-01, 3.81545040e-02, 6.34338619e-01, 1.76368938e-02, 5.75553531e-02, 5.31607516e-01, 2.63869588e-01, 9.41945823e-01, 9.24028838e-02, 5.21496463e-01, 7.74866558e-01, 5.65210610e-01, 7.28015327e-02, 6.51963790e-01, 8.94727453e-01, 4.49571590e-01, 1.29932405e-01, 8.64026259e-01, 9.92599934e-01, 7.43721560e-01, 8.87300215e-01, 1.06369925e-01, 8.11335531e-01, 7.87734900e-01, 9.87344678e-01, 5.32502820e-01, 4.42612382e-01, 9.64041183e-01, 1.66085871e-01, 1.12937664e-01, 5.24423470e-01, 6.54689333e-01, 4.59119726e-01, 5.22774091e-01, 3.08722276e-02, 6.26979315e-01, 4.49754105e-01, 8.07495757e-01, 2.34199499e-01, 1.67765675e-01, 9.22168418e-01, 3.73210378e-01, 8.04432575e-01, 5.61890354e-01, 4.47025593e-01, 6.43155678e-01, 2.40407640e-01, 5.91631279e-01, 1.59369206e-01, 7.75799090e-01, 8.32067212e-01, 5.59791576e-02, 6.39105224e-01, 4.85274738e-01, 2.12630838e-01, 2.81431312e-02, 7.16205363e-01, 6.83885011e-01, 5.23869697e-01, 9.99418314e-01, 8.35331599e-01, 4.69877463e-02, 6.74712562e-01, 7.99273684e-01, 2.77001890e-02, 5.75809742e-01, 2.78513031e-01, 8.36209905e-01, 7.25472379e-01, 4.87173943e-01, 7.88311357e-01, 9.64676177e-01, 1.75752651e-01, 4.98112580e-01, 8.08850418e-02, 6.40981131e-01, 4.06647450e-01, 8.46539387e-01, 2.12620694e-01, 9.11012851e-01, 8.25041445e-01, 8.90065575e-01, 9.63626055e-01, 5.96689242e-01, 1.63372670e-01, 4.51640148e-01, 3.43026542e-01, 5.80658851e-01, 2.82327625e-01, 4.75535418e-01, 6.27760926e-01, 8.46314115e-01, 9.61961932e-01, 3.19806094e-01, 5.05508062e-01, 5.28102944e-01, 6.13045057e-01, 7.44714938e-01, 1.50586073e-01, 7.91878033e-01, 4.89839179e-01, 3.10496849e-01, 8.82309038e-01, 2.86922314e-01, 4.84687559e-01, 5.20838630e-01, 4.62955493e-01, 2.38185305e-01, 5.47259907e-02, 7.10916137e-01, 7.31887202e-01, 6.25602317e-01, 8.77741168e-01, 4.19881322e-01, 4.81222328e-01, 1.28224501e-01, 2.46034010e-01, 3.34971854e-01, 7.37216484e-01, 5.62134821e-02, 7.14089724e-01, 9.85549393e-01, 4.66295827e-01, 3.08722434e-03, 4.70237690e-01, 2.66524167e-01, 7.93875484e-01, 4.54795911e-02, 8.09702944e-01, 1.47709735e-02, 1.70082405e-01, 6.35905179e-01, 3.75379109e-01, 4.30315011e-01, 3.15788760e-01, 5.58065230e-01, 2.24643800e-01, 2.42142981e-01, 6.57283636e-01, 3.34921891e-01, 1.26588975e-01, 7.68064155e-01, 9.43856291e-01, 4.47518596e-01, 5.44453573e-01, 9.95764932e-01, 7.16444391e-01, 8.51019765e-01, 1.01179183e-01, 4.45473958e-01, 4.60327322e-01, 4.96895844e-02, 4.72907738e-01, 5.58987444e-01, 3.41027487e-01, 1.56175026e-01, 7.58283148e-01, 6.83600909e-01, 2.14623396e-01, 3.27348880e-01, 3.92517893e-01, 6.70418431e-01, 5.16440832e-01, 8.63140348e-01, 5.73277464e-01, 3.46608058e-01, 7.39396341e-01, 7.20852434e-01, 2.35653246e-02, 3.89935659e-01, 7.53783745e-01, 6.34563528e-01, 8.79339335e-01, 7.41599159e-02, 5.62433904e-01, 6.15553852e-01, 4.56956324e-01, 5.20047447e-01, 5.26845015e-02, 5.58471266e-01, 1.63632233e-01, 5.38936665e-02, 6.49593683e-01, 2.56838748e-01, 8.99035326e-01, 7.20847756e-01, 5.68954684e-01, 7.43684755e-01, 5.70924238e-01, 3.82318724e-01, 4.89328290e-01, 5.62208561e-01, 4.97540804e-02, 4.18011085e-01, 6.88041565e-01, 2.16234653e-01, 7.89548214e-01, 8.46136387e-01, 8.46816189e-01, 1.73842353e-01, 6.11627842e-02, 8.44440559e-01, 4.50646654e-01, 3.74785037e-01, 4.87196697e-01, 4.56276448e-01, 9.13284391e-01, 4.15715464e-01, 7.13597697e-01, 1.23641270e-02, 5.10031271e-01, 4.74601930e-02, 2.55731159e-01, 3.22090006e-01, 1.91165703e-01, 4.51170940e-01, 7.50843157e-01, 4.42420576e-01, 4.25380660e-01, 4.50667257e-01, 6.55689206e-01, 9.68257670e-02, 1.96528793e-01, 8.97343028e-01, 4.99940904e-01, 6.65504083e-01, 9.41828079e-01, 4.54397338e-01, 5.61893331e-01, 5.09839880e-01, 4.53117514e-01, 8.96804127e-02, 1.74888861e-01, 6.65641378e-01, 2.81668336e-01, 1.89532742e-01, 5.61668382e-01, 8.68330157e-02, 8.25092797e-01, 5.18106324e-01, 1.71904024e-01, 3.68385523e-01, 1.62005436e-01, 7.48507399e-01, 9.30274827e-01, 2.38198517e-01, 9.52222901e-01, 5.23587800e-01, 6.94384557e-01, 1.09338652e-01, 4.83356794e-01, 2.73050402e-01, 3.68027050e-01, 5.92366466e-01, 1.83192289e-01, 8.60376029e-01, 7.13926203e-01, 8.16750052e-01, 1.57890291e-01, 6.25691951e-01, 5.24831646e-01, 1.73873797e-01, 1.02429784e-01, 9.17488471e-01, 4.03584434e-01, 9.31170884e-01, 2.79386137e-01, 8.77745206e-01, 2.45200576e-01, 1.28896951e-01, 3.15713052e-01, 5.27874291e-01, 2.16444335e-01, 7.03883817e-01, 7.74738919e-02, 8.42422142e-01, 3.75598924e-01, 3.51002411e-01, 6.22752776e-01, 4.82407943e-01, 7.43107867e-01, 9.46182666e-01, 9.44344819e-01, 3.28124763e-01, 1.06147431e-01, 1.65102684e-01, 3.84060507e-01, 2.91057722e-01, 7.68173662e-02, 1.03543651e-01, 6.76698940e-01, 1.43141994e-01, 7.21342202e-01, 6.69471294e-03, 9.07298311e-01, 5.57080171e-01, 8.10954489e-01, 4.11120526e-01, 2.06407453e-01, 2.59590556e-01, 7.58512718e-01, 5.79873897e-01, 2.92875650e-01, 2.83686529e-01, 2.42829343e-01, 9.19323719e-01, 3.46832864e-01, 3.58238858e-01, 7.42827585e-01, 2.05760059e-01, 9.58438860e-01, 5.66326411e-01, 6.60292846e-01, 5.61095078e-02, 6.79465531e-01, 7.05118513e-01, 4.44713264e-01, 2.09732933e-01, 5.22732436e-01, 1.74396512e-01, 5.29356748e-01, 4.38475687e-01, 4.94036404e-01, 4.09785794e-01, 6.40025507e-01, 5.79371821e-01, 1.57726118e-01, 6.04572263e-01, 5.41072639e-01, 5.18847173e-01, 1.97093284e-01, 8.91767002e-01, 4.29050835e-01, 8.25490570e-01, 3.87699807e-01, 4.50705808e-01, 2.49371643e-01, 3.36074898e-01, 9.29925118e-01, 6.65393649e-01, 9.07275994e-01, 3.73075859e-01, 4.14044139e-03, 2.37463702e-01, 2.25893784e-01, 2.46900245e-01, 4.50350196e-01, 3.48618117e-01, 5.07193932e-01, 5.23435142e-01, 8.13611417e-01, 8.92715622e-01, 1.02623450e-01, 3.06088345e-01, 7.80461650e-01, 2.21453645e-01, 2.01419652e-01, 2.84254457e-01, 3.68286735e-01, 7.39358243e-01, 8.97879394e-01, 9.81599566e-01, 7.56526442e-01, 7.37645545e-01, 4.23976657e-02, 8.25922012e-01, 2.60956996e-01, 2.90702065e-01, 8.98388344e-01, 3.03733299e-01, 8.49071471e-01, 3.45835425e-01, 7.65458276e-01, 5.68094872e-01, 8.93770930e-01, 9.93161641e-01, 5.63368667e-02, 4.26548945e-01, 5.46745780e-01, 5.75674571e-01, 7.94599487e-01, 7.18935553e-02, 4.46492976e-01, 6.40240123e-01, 2.73246969e-01, 2.00465968e-01, 1.30718835e-01, 1.92492005e-01, 1.96617189e-01, 6.61271644e-01, 8.12687657e-01, 8.66342445e-01 }, {0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, false, 5}}; typedef LinkageTest<float, int> LinkageTestF_Int; TEST_P(LinkageTestF_Int, Result) { EXPECT_TRUE(score == 1.0); } INSTANTIATE_TEST_CASE_P(LinkageTest, LinkageTestF_Int, ::testing::ValuesIn(linkage_inputsf2)); } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/cluster/cluster_solvers_deprecated.cu
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <iostream> #include <memory> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/spectral/cluster_solvers_deprecated.cuh> namespace raft { namespace spectral { TEST(Raft, ClusterSolvers) { using namespace matrix; using index_type = int; using value_type = double; raft::resources h; index_type maxiter{100}; value_type tol{1.0e-10}; unsigned long long seed{100110021003}; auto stream = resource::get_cuda_stream(h); index_type n{100}; index_type d{10}; index_type k{5}; // nullptr expected to trigger exceptions: // value_type* eigvecs{nullptr}; index_type* codes{nullptr}; cluster_solver_config_deprecated_t<index_type, value_type> cfg{k, maxiter, tol, seed}; kmeans_solver_deprecated_t<index_type, value_type> cluster_solver{cfg}; EXPECT_ANY_THROW(cluster_solver.solve(h, n, d, eigvecs, codes)); } } // namespace spectral } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/cluster/kmeans.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <optional> #include <raft/core/resource/cuda_stream.hpp> #include <vector> #include <raft/cluster/kmeans.cuh> #include <raft/core/cudart_utils.hpp> #include <raft/core/operators.hpp> #include <raft/core/resources.hpp> #include <raft/random/make_blobs.cuh> #include <raft/stats/adjusted_rand_index.cuh> #include <raft/util/cuda_utils.cuh> #include <rmm/device_uvector.hpp> #include <thrust/fill.h> namespace raft { template <typename T> struct KmeansInputs { int n_row; int n_col; int n_clusters; T tol; bool weighted; }; template <typename DataT, typename IndexT> void run_cluster_cost(const raft::resources& handle, raft::device_vector_view<DataT, IndexT> minClusterDistance, rmm::device_uvector<char>& workspace, raft::device_scalar_view<DataT> clusterCost) { raft::cluster::kmeans::cluster_cost( handle, minClusterDistance, workspace, clusterCost, raft::add_op{}); } template <typename T> class KmeansTest : public ::testing::TestWithParam<KmeansInputs<T>> { protected: KmeansTest() : d_labels(0, resource::get_cuda_stream(handle)), d_labels_ref(0, resource::get_cuda_stream(handle)), d_centroids(0, resource::get_cuda_stream(handle)), d_sample_weight(0, resource::get_cuda_stream(handle)) { } void apiTest() { testparams = ::testing::TestWithParam<KmeansInputs<T>>::GetParam(); auto stream = resource::get_cuda_stream(handle); int n_samples = testparams.n_row; int n_features = testparams.n_col; params.n_clusters = testparams.n_clusters; params.tol = testparams.tol; params.n_init = 1; params.rng_state.seed = 1; params.oversampling_factor = 0; raft::random::RngState rng(params.rng_state.seed, params.rng_state.type); auto X = raft::make_device_matrix<T, int>(handle, n_samples, n_features); auto labels = raft::make_device_vector<int, int>(handle, n_samples); raft::random::make_blobs<T, int>(X.data_handle(), labels.data_handle(), n_samples, n_features, params.n_clusters, stream, true, nullptr, nullptr, T(1.0), false, (T)-10.0f, (T)10.0f, (uint64_t)1234); d_labels.resize(n_samples, stream); d_labels_ref.resize(n_samples, stream); d_centroids.resize(params.n_clusters * n_features, stream); raft::copy(d_labels_ref.data(), labels.data_handle(), n_samples, stream); rmm::device_uvector<T> d_sample_weight(n_samples, stream); thrust::fill( thrust::cuda::par.on(stream), d_sample_weight.data(), d_sample_weight.data() + n_samples, 1); auto weight_view = raft::make_device_vector_view<const T, int>(d_sample_weight.data(), n_samples); T inertia = 0; int n_iter = 0; rmm::device_uvector<char> workspace(0, stream); rmm::device_uvector<T> L2NormBuf_OR_DistBuf(0, stream); rmm::device_uvector<T> inRankCp(0, stream); auto X_view = raft::make_const_mdspan(X.view()); auto centroids_view = raft::make_device_matrix_view<T, int>(d_centroids.data(), params.n_clusters, n_features); auto miniX = raft::make_device_matrix<T, int>(handle, n_samples / 4, n_features); // Initialize kmeans on a portion of X raft::cluster::kmeans::shuffle_and_gather( handle, X_view, raft::make_device_matrix_view<T, int>(miniX.data_handle(), miniX.extent(0), miniX.extent(1)), miniX.extent(0), params.rng_state.seed); raft::cluster::kmeans::init_plus_plus( handle, params, raft::make_const_mdspan(miniX.view()), centroids_view, workspace); auto minClusterDistance = raft::make_device_vector<T, int>(handle, n_samples); auto minClusterAndDistance = raft::make_device_vector<raft::KeyValuePair<int, T>, int>(handle, n_samples); auto L2NormX = raft::make_device_vector<T, int>(handle, n_samples); auto clusterCostBefore = raft::make_device_scalar<T>(handle, 0); auto clusterCostAfter = raft::make_device_scalar<T>(handle, 0); raft::linalg::rowNorm(L2NormX.data_handle(), X.data_handle(), X.extent(1), X.extent(0), raft::linalg::L2Norm, true, stream); raft::cluster::kmeans::min_cluster_distance(handle, X_view, centroids_view, minClusterDistance.view(), L2NormX.view(), L2NormBuf_OR_DistBuf, params.metric, params.batch_samples, params.batch_centroids, workspace); run_cluster_cost(handle, minClusterDistance.view(), workspace, clusterCostBefore.view()); // Run a fit of kmeans raft::cluster::kmeans::fit_main(handle, params, X_view, weight_view, centroids_view, raft::make_host_scalar_view(&inertia), raft::make_host_scalar_view(&n_iter), workspace); // Check that the cluster cost decreased raft::cluster::kmeans::min_cluster_distance(handle, X_view, centroids_view, minClusterDistance.view(), L2NormX.view(), L2NormBuf_OR_DistBuf, params.metric, params.batch_samples, params.batch_centroids, workspace); run_cluster_cost(handle, minClusterDistance.view(), workspace, clusterCostAfter.view()); T h_clusterCostBefore = T(0); T h_clusterCostAfter = T(0); raft::update_host(&h_clusterCostBefore, clusterCostBefore.data_handle(), 1, stream); raft::update_host(&h_clusterCostAfter, clusterCostAfter.data_handle(), 1, stream); ASSERT_TRUE(h_clusterCostAfter < h_clusterCostBefore); // Count samples in clusters using 2 methods and compare them // Fill minClusterAndDistance raft::cluster::kmeans::min_cluster_and_distance( handle, X_view, raft::make_device_matrix_view<const T, int>( d_centroids.data(), params.n_clusters, n_features), minClusterAndDistance.view(), L2NormX.view(), L2NormBuf_OR_DistBuf, params.metric, params.batch_samples, params.batch_centroids, workspace); raft::cluster::kmeans::KeyValueIndexOp<int, T> conversion_op; cub::TransformInputIterator<int, raft::cluster::kmeans::KeyValueIndexOp<int, T>, raft::KeyValuePair<int, T>*> itr(minClusterAndDistance.data_handle(), conversion_op); auto sampleCountInCluster = raft::make_device_vector<T, int>(handle, params.n_clusters); auto weigthInCluster = raft::make_device_vector<T, int>(handle, params.n_clusters); auto newCentroids = raft::make_device_matrix<T, int>(handle, params.n_clusters, n_features); raft::cluster::kmeans::update_centroids(handle, X_view, weight_view, raft::make_device_matrix_view<const T, int>( d_centroids.data(), params.n_clusters, n_features), itr, weigthInCluster.view(), newCentroids.view()); raft::cluster::kmeans::count_samples_in_cluster(handle, params, X_view, L2NormX.view(), newCentroids.view(), workspace, sampleCountInCluster.view()); ASSERT_TRUE(devArrMatch(sampleCountInCluster.data_handle(), weigthInCluster.data_handle(), params.n_clusters, CompareApprox<T>(params.tol))); } void basicTest() { testparams = ::testing::TestWithParam<KmeansInputs<T>>::GetParam(); int n_samples = testparams.n_row; int n_features = testparams.n_col; params.n_clusters = testparams.n_clusters; params.tol = testparams.tol; params.n_init = 5; params.rng_state.seed = 1; params.oversampling_factor = 0; auto X = raft::make_device_matrix<T, int>(handle, n_samples, n_features); auto labels = raft::make_device_vector<int, int>(handle, n_samples); auto stream = resource::get_cuda_stream(handle); raft::random::make_blobs<T, int>(X.data_handle(), labels.data_handle(), n_samples, n_features, params.n_clusters, stream, true, nullptr, nullptr, T(1.0), false, (T)-10.0f, (T)10.0f, (uint64_t)1234); d_labels.resize(n_samples, stream); d_labels_ref.resize(n_samples, stream); d_centroids.resize(params.n_clusters * n_features, stream); std::optional<raft::device_vector_view<const T>> d_sw = std::nullopt; auto d_centroids_view = raft::make_device_matrix_view<T, int>(d_centroids.data(), params.n_clusters, n_features); if (testparams.weighted) { d_sample_weight.resize(n_samples, stream); d_sw = std::make_optional( raft::make_device_vector_view<const T, int>(d_sample_weight.data(), n_samples)); thrust::fill(thrust::cuda::par.on(stream), d_sample_weight.data(), d_sample_weight.data() + n_samples, 1); } raft::copy(d_labels_ref.data(), labels.data_handle(), n_samples, stream); T inertia = 0; int n_iter = 0; auto X_view = raft::make_const_mdspan(X.view()); raft::cluster::kmeans_fit_predict<T, int>( handle, params, X_view, d_sw, d_centroids_view, raft::make_device_vector_view<int, int>(d_labels.data(), n_samples), raft::make_host_scalar_view<T>(&inertia), raft::make_host_scalar_view<int>(&n_iter)); resource::sync_stream(handle, stream); score = raft::stats::adjusted_rand_index( d_labels_ref.data(), d_labels.data(), n_samples, resource::get_cuda_stream(handle)); if (score < 1.0) { std::stringstream ss; ss << "Expected: " << raft::arr2Str(d_labels_ref.data(), 25, "d_labels_ref", stream); std::cout << (ss.str().c_str()) << '\n'; ss.str(std::string()); ss << "Actual: " << raft::arr2Str(d_labels.data(), 25, "d_labels", stream); std::cout << (ss.str().c_str()) << '\n'; std::cout << "Score = " << score << '\n'; } } void SetUp() override { basicTest(); apiTest(); } protected: raft::resources handle; KmeansInputs<T> testparams; rmm::device_uvector<int> d_labels; rmm::device_uvector<int> d_labels_ref; rmm::device_uvector<T> d_centroids; rmm::device_uvector<T> d_sample_weight; double score; raft::cluster::KMeansParams params; }; const std::vector<KmeansInputs<float>> inputsf2 = {{1000, 32, 5, 0.0001f, true}, {1000, 32, 5, 0.0001f, false}, {1000, 100, 20, 0.0001f, true}, {1000, 100, 20, 0.0001f, false}, {10000, 32, 10, 0.0001f, true}, {10000, 32, 10, 0.0001f, false}, {10000, 100, 50, 0.0001f, true}, {10000, 100, 50, 0.0001f, false}, {10000, 500, 100, 0.0001f, true}, {10000, 500, 100, 0.0001f, false}}; const std::vector<KmeansInputs<double>> inputsd2 = {{1000, 32, 5, 0.0001, true}, {1000, 32, 5, 0.0001, false}, {1000, 100, 20, 0.0001, true}, {1000, 100, 20, 0.0001, false}, {10000, 32, 10, 0.0001, true}, {10000, 32, 10, 0.0001, false}, {10000, 100, 50, 0.0001, true}, {10000, 100, 50, 0.0001, false}, {10000, 500, 100, 0.0001, true}, {10000, 500, 100, 0.0001, false}}; typedef KmeansTest<float> KmeansTestF; TEST_P(KmeansTestF, Result) { ASSERT_TRUE(score == 1.0); } typedef KmeansTest<double> KmeansTestD; TEST_P(KmeansTestD, Result) { ASSERT_TRUE(score == 1.0); } INSTANTIATE_TEST_CASE_P(KmeansTests, KmeansTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(KmeansTests, KmeansTestD, ::testing::ValuesIn(inputsd2)); } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/cluster/cluster_solvers.cu
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <iostream> #include <memory> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/device_id.hpp> #include <raft/core/resources.hpp> #include <raft/spectral/cluster_solvers.cuh> #include <raft/spectral/modularity_maximization.cuh> namespace raft { namespace spectral { TEST(Raft, ClusterSolvers) { using namespace matrix; using index_type = int; using value_type = double; raft::resources h; index_type maxiter{100}; value_type tol{1.0e-10}; unsigned long long seed{100110021003}; auto stream = resource::get_cuda_stream(h); index_type n{100}; index_type d{10}; index_type k{5}; // nullptr expected to trigger exceptions: // value_type* eigvecs{nullptr}; index_type* codes{nullptr}; cluster_solver_config_t<index_type, value_type> cfg{k, maxiter, tol, seed}; kmeans_solver_t<index_type, value_type> cluster_solver{cfg}; EXPECT_ANY_THROW(cluster_solver.solve(h, n, d, eigvecs, codes)); } TEST(Raft, ModularitySolvers) { using namespace matrix; using index_type = int; using value_type = double; raft::resources h; ASSERT_EQ(0, resource::get_device_id(h)); index_type neigvs{10}; index_type maxiter{100}; index_type restart_iter{10}; value_type tol{1.0e-10}; bool reorthog{true}; // nullptr expected to trigger exceptions: // index_type* clusters{nullptr}; value_type* eigvals{nullptr}; value_type* eigvecs{nullptr}; unsigned long long seed{100110021003}; eigen_solver_config_t<index_type, value_type> eig_cfg{ neigvs, maxiter, restart_iter, tol, reorthog, seed}; lanczos_solver_t<index_type, value_type> eig_solver{eig_cfg}; index_type k{5}; cluster_solver_config_t<index_type, value_type> clust_cfg{k, maxiter, tol, seed}; kmeans_solver_t<index_type, value_type> cluster_solver{clust_cfg}; auto stream = resource::get_cuda_stream(h); sparse_matrix_t<index_type, value_type> sm{h, nullptr, nullptr, nullptr, 0, 0}; EXPECT_ANY_THROW(spectral::modularity_maximization( h, sm, eig_solver, cluster_solver, clusters, eigvals, eigvecs)); value_type modularity{0}; EXPECT_ANY_THROW(spectral::analyzeModularity(h, sm, k, clusters, modularity)); } } // namespace spectral } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/cluster/kmeans_balanced.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.h" #include <gtest/gtest.h> #include <optional> #include <raft/core/resource/cuda_stream.hpp> #include <vector> #include <raft/cluster/kmeans_balanced.cuh> #include <raft/core/cudart_utils.hpp> #include <raft/core/handle.hpp> #include <raft/core/operators.hpp> #include <raft/linalg/unary_op.cuh> #include <raft/random/make_blobs.cuh> #include <raft/stats/adjusted_rand_index.cuh> #include <raft/util/cuda_utils.cuh> #include <rmm/device_uvector.hpp> #include <thrust/fill.h> /* This test takes advantage of the fact that make_blobs generates balanced clusters. * It doesn't currently test whether the algorithm can make balanced clusters with an imbalanced * dataset. */ namespace raft { template <typename MathT, typename IdxT> struct KmeansBalancedInputs { IdxT n_rows; IdxT n_cols; IdxT n_clusters; raft::cluster::kmeans_balanced_params kb_params; MathT tol; }; template <typename MathT, typename IdxT> ::std::ostream& operator<<(::std::ostream& os, const KmeansBalancedInputs<MathT, IdxT>& p) { os << "{ " << p.n_rows << ", " << p.n_cols << ", " << p.n_clusters << ", " << p.kb_params.n_iters << static_cast<int>(p.kb_params.metric) << '}' << std::endl; return os; } template <typename DataT, typename MathT, typename LabelT, typename IdxT, typename MappingOpT> class KmeansBalancedTest : public ::testing::TestWithParam<KmeansBalancedInputs<MathT, IdxT>> { protected: KmeansBalancedTest() : stream(resource::get_cuda_stream(handle)), d_labels(0, stream), d_labels_ref(0, stream), d_centroids(0, stream) { } void basicTest() { MappingOpT op{}; auto p = ::testing::TestWithParam<KmeansBalancedInputs<MathT, IdxT>>::GetParam(); auto X = raft::make_device_matrix<DataT, IdxT>(handle, p.n_rows, p.n_cols); auto blob_labels = raft::make_device_vector<IdxT, IdxT>(handle, p.n_rows); MathT* blobs_ptr; rmm::device_uvector<MathT> blobs(0, stream); if constexpr (!std::is_same_v<DataT, MathT>) { blobs.resize(p.n_rows * p.n_cols, stream); blobs_ptr = blobs.data(); } else { blobs_ptr = X.data_handle(); } raft::random::make_blobs<MathT, IdxT>(blobs_ptr, blob_labels.data_handle(), p.n_rows, p.n_cols, p.n_clusters, stream, true, nullptr, nullptr, MathT{0.1}, true, MathT{-1}, MathT{1}, (uint64_t)1234); // Convert blobs dataset to DataT if necessary if constexpr (!std::is_same_v<DataT, MathT>) { raft::linalg::unaryOp( X.data_handle(), blobs.data(), p.n_rows * p.n_cols, op.reverse_op, stream); } d_labels.resize(p.n_rows, stream); d_labels_ref.resize(p.n_rows, stream); d_centroids.resize(p.n_clusters * p.n_cols, stream); raft::linalg::unaryOp( d_labels_ref.data(), blob_labels.data_handle(), p.n_rows, raft::cast_op<LabelT>(), stream); auto X_view = raft::make_device_matrix_view<const DataT, IdxT>(X.data_handle(), X.extent(0), X.extent(1)); auto d_centroids_view = raft::make_device_matrix_view<MathT, IdxT>(d_centroids.data(), p.n_clusters, p.n_cols); auto d_labels_view = raft::make_device_vector_view<LabelT, IdxT>(d_labels.data(), p.n_rows); raft::cluster::kmeans_balanced::fit_predict( handle, p.kb_params, X_view, d_centroids_view, d_labels_view, op); resource::sync_stream(handle, stream); score = raft::stats::adjusted_rand_index( d_labels_ref.data(), d_labels.data(), p.n_rows, resource::get_cuda_stream(handle)); if (score < 1.0) { std::stringstream ss; ss << "Expected: " << raft::arr2Str(d_labels_ref.data(), 25, "d_labels_ref", stream); std::cout << (ss.str().c_str()) << '\n'; ss.str(std::string()); ss << "Actual: " << raft::arr2Str(d_labels.data(), 25, "d_labels", stream); std::cout << (ss.str().c_str()) << '\n'; std::cout << "Score = " << score << '\n'; } } void SetUp() override { basicTest(); } protected: raft::handle_t handle; cudaStream_t stream; rmm::device_uvector<LabelT> d_labels; rmm::device_uvector<LabelT> d_labels_ref; rmm::device_uvector<MathT> d_centroids; double score; }; template <typename MathT, typename IdxT> std::vector<KmeansBalancedInputs<MathT, IdxT>> get_kmeans_balanced_inputs() { std::vector<KmeansBalancedInputs<MathT, IdxT>> out; KmeansBalancedInputs<MathT, IdxT> p; p.kb_params.n_iters = 20; p.kb_params.metric = raft::distance::DistanceType::L2Expanded; p.tol = MathT{0.0001}; std::vector<std::tuple<size_t, size_t, size_t>> row_cols_k = {{1000, 32, 5}, {1000, 100, 20}, {10000, 32, 10}, {10000, 100, 50}, {10000, 500, 100}, {1000000, 128, 10}}; for (auto& rck : row_cols_k) { p.n_rows = static_cast<IdxT>(std::get<0>(rck)); p.n_cols = static_cast<IdxT>(std::get<1>(rck)); p.n_clusters = static_cast<IdxT>(std::get<2>(rck)); out.push_back(p); } return out; } const auto inputsf_i32 = get_kmeans_balanced_inputs<float, int>(); const auto inputsd_i32 = get_kmeans_balanced_inputs<double, int>(); const auto inputsf_i64 = get_kmeans_balanced_inputs<float, int64_t>(); const auto inputsd_i64 = get_kmeans_balanced_inputs<double, int64_t>(); #define KB_TEST(test_type, test_name, test_inputs) \ typedef RAFT_DEPAREN(test_type) test_name; \ TEST_P(test_name, Result) { ASSERT_TRUE(score == 1.0); } \ INSTANTIATE_TEST_CASE_P(KmeansBalancedTests, test_name, ::testing::ValuesIn(test_inputs)) /* * First set of tests: no conversion */ KB_TEST((KmeansBalancedTest<float, float, uint32_t, int, raft::identity_op>), KmeansBalancedTestFFU32I32, inputsf_i32); KB_TEST((KmeansBalancedTest<double, double, uint32_t, int, raft::identity_op>), KmeansBalancedTestDDU32I32, inputsd_i32); KB_TEST((KmeansBalancedTest<float, float, uint32_t, int64_t, raft::identity_op>), KmeansBalancedTestFFU32I64, inputsf_i64); KB_TEST((KmeansBalancedTest<double, double, uint32_t, int64_t, raft::identity_op>), KmeansBalancedTestDDU32I64, inputsd_i64); KB_TEST((KmeansBalancedTest<float, float, int, int, raft::identity_op>), KmeansBalancedTestFFI32I32, inputsf_i32); KB_TEST((KmeansBalancedTest<float, float, int, int64_t, raft::identity_op>), KmeansBalancedTestFFI32I64, inputsf_i64); KB_TEST((KmeansBalancedTest<float, float, int64_t, int, raft::identity_op>), KmeansBalancedTestFFI64I32, inputsf_i32); KB_TEST((KmeansBalancedTest<float, float, int64_t, int64_t, raft::identity_op>), KmeansBalancedTestFFI64I64, inputsf_i64); /* * Second set of tests: integer dataset with conversion */ template <typename DataT, typename MathT> struct i2f_scaler { // Note: with a scaling factor of 42, and generating blobs with centers between -1 and 1 with a // standard deviation of 0.1, it's statistically very unlikely that we'd overflow const raft::compose_op<raft::div_const_op<MathT>, raft::cast_op<MathT>> op{ raft::div_const_op<MathT>{42}, raft::cast_op<MathT>{}}; const raft::compose_op<raft::cast_op<DataT>, raft::mul_const_op<MathT>> reverse_op{ raft::cast_op<DataT>{}, raft::mul_const_op<MathT>{42}}; RAFT_INLINE_FUNCTION auto operator()(const DataT& x) const { return op(x); }; }; KB_TEST((KmeansBalancedTest<int8_t, float, uint32_t, int, i2f_scaler<int8_t, float>>), KmeansBalancedTestFI8U32I32, inputsf_i32); KB_TEST((KmeansBalancedTest<int8_t, double, uint32_t, int, i2f_scaler<int8_t, double>>), KmeansBalancedTestDI8U32I32, inputsd_i32); } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/label/label.cu
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/label/classlabels.cuh> #include "../test_utils.cuh" #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <iostream> #include <vector> namespace raft { namespace label { class labelTest : public ::testing::Test { protected: void SetUp() override {} void TearDown() override {} }; typedef labelTest MakeMonotonicTest; TEST_F(MakeMonotonicTest, Result) { cudaStream_t stream; RAFT_CUDA_TRY(cudaStreamCreate(&stream)); int m = 12; rmm::device_uvector<float> data(m, stream); rmm::device_uvector<float> actual(m, stream); rmm::device_uvector<float> expected(m, stream); float* data_h = new float[m]{1.0, 2.0, 2.0, 2.0, 2.0, 3.0, 8.0, 7.0, 8.0, 8.0, 25.0, 80.0}; float* expected_h = new float[m]{1.0, 2.0, 2.0, 2.0, 2.0, 3.0, 5.0, 4.0, 5.0, 5.0, 6.0, 7.0}; raft::update_device(data.data(), data_h, m, stream); raft::update_device(expected.data(), expected_h, m, stream); make_monotonic(actual.data(), data.data(), m, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); ASSERT_TRUE(devArrMatch(actual.data(), expected.data(), m, raft::Compare<bool>(), stream)); delete data_h; delete expected_h; } TEST(labelTest, Classlabels) { cudaStream_t stream; RAFT_CUDA_TRY(cudaStreamCreate(&stream)); int n_rows = 6; rmm::device_uvector<float> y_d(n_rows, stream); float y_h[] = {2, -1, 1, 2, 1, 1}; raft::update_device(y_d.data(), y_h, n_rows, stream); rmm::device_uvector<float> y_unique_d(0, stream); int n_classes = getUniquelabels(y_unique_d, y_d.data(), n_rows, stream); ASSERT_EQ(n_classes, 3); float y_unique_exp[] = {-1, 1, 2}; EXPECT_TRUE( devArrMatchHost(y_unique_exp, y_unique_d.data(), n_classes, raft::Compare<float>(), stream)); rmm::device_uvector<float> y_relabeled_d(n_rows, stream); getOvrlabels(y_d.data(), n_rows, y_unique_d.data(), n_classes, y_relabeled_d.data(), 2, stream); float y_relabeled_exp[] = {1, -1, -1, 1, -1, -1}; EXPECT_TRUE( devArrMatchHost(y_relabeled_exp, y_relabeled_d.data(), n_rows, raft::Compare<float>(), stream)); } }; // namespace label }; // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/label/merge_labels.cu
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/label/merge_labels.cuh> #include "../test_utils.cuh" #include <raft/core/resources.hpp> #include <raft/util/cudart_utils.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> #include <thrust/device_ptr.h> #include <vector> namespace raft { namespace label { template <typename Index_> struct MergeLabelsInputs { Index_ N; std::vector<Index_> labels_a; std::vector<Index_> labels_b; std::vector<uint8_t> mask; // to avoid std::vector<bool> optimization std::vector<Index_> expected; }; template <typename Index_> class MergeLabelsTest : public ::testing::TestWithParam<MergeLabelsInputs<Index_>> { protected: MergeLabelsTest() : params(::testing::TestWithParam<MergeLabelsInputs<Index_>>::GetParam()), stream(resource::get_cuda_stream(handle)), labels_a(params.N, stream), labels_b(params.N, stream), expected(params.N, stream), R(params.N, stream), mask(params.N, stream), m(stream) { } void Run() { raft::update_device(labels_a.data(), params.labels_a.data(), params.N, stream); raft::update_device(labels_b.data(), params.labels_b.data(), params.N, stream); raft::update_device(expected.data(), params.expected.data(), params.N, stream); raft::update_device(mask.data(), reinterpret_cast<bool*>(params.mask.data()), params.N, stream); merge_labels( labels_a.data(), labels_b.data(), mask.data(), R.data(), m.data(), params.N, stream); cudaStreamSynchronize(stream); ASSERT_TRUE(raft::devArrMatch<Index_>( expected.data(), labels_a.data(), params.N, raft::Compare<Index_>(), stream)); } protected: raft::resources handle; cudaStream_t stream; MergeLabelsInputs<Index_> params; rmm::device_uvector<Index_> labels_a, labels_b, expected, R; rmm::device_uvector<bool> mask; rmm::device_scalar<bool> m; }; using MergeLabelsTestI = MergeLabelsTest<int>; TEST_P(MergeLabelsTestI, Result) { Run(); } using MergeLabelsTestL = MergeLabelsTest<int64_t>; TEST_P(MergeLabelsTestL, Result) { Run(); } constexpr int MAX32 = std::numeric_limits<int>::max(); constexpr int64_t MAX64 = std::numeric_limits<int64_t>::max(); const std::vector<MergeLabelsInputs<int>> merge_inputs_32 = { {4, {1, 1, 3, MAX32}, {1, 3, 3, 1}, {1, 0, 1, 0}, {1, 1, 3, 1}}, {5, {1, 2, 2, 2, 1}, {4, 2, 4, 4, 4}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}, {6, {1, 2, 1, 4, 5, MAX32}, {1, 2, MAX32, 4, 5, 4}, {1, 1, 0, 1, 1, 0}, {1, 2, 1, 4, 5, 4}}, {6, {1, 2, 2, 2, 2, 6}, {1, 1, 1, 5, 5, 5}, {1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1}}, {8, {1, 1, 3, 3, MAX32, 1, 3, MAX32}, {1, 2, 3, 2, MAX32, 2, 2, 2}, {1, 1, 1, 1, 0, 1, 1, 0}, {1, 1, 1, 1, MAX32, 1, 1, 1}}, {8, {1, 1, 1, 4, 4, 7, 7, 8}, {1, 2, 2, 2, 2, 7, 7, 7}, {1, 1, 1, 1, 0, 0, 1, 1}, {1, 1, 1, 1, 1, 7, 7, 7}}, }; const std::vector<MergeLabelsInputs<int64_t>> merge_inputs_64 = { {4, {1, 1, 3, MAX64}, {1, 3, 3, 1}, {1, 0, 1, 0}, {1, 1, 3, 1}}, {5, {1, 2, 2, 2, 1}, {4, 2, 4, 4, 4}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}, {6, {1, 2, 1, 4, 5, MAX64}, {1, 2, MAX64, 4, 5, 4}, {1, 1, 0, 1, 1, 0}, {1, 2, 1, 4, 5, 4}}, {6, {1, 2, 2, 2, 2, 6}, {1, 1, 1, 5, 5, 5}, {1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1}}, {8, {1, 1, 3, 3, MAX64, 1, 3, MAX64}, {1, 2, 3, 2, MAX64, 2, 2, 2}, {1, 1, 1, 1, 0, 1, 1, 0}, {1, 1, 1, 1, MAX64, 1, 1, 1}}, {8, {1, 1, 1, 4, 4, 7, 7, 8}, {1, 2, 2, 2, 2, 7, 7, 7}, {1, 1, 1, 1, 0, 0, 1, 1}, {1, 1, 1, 1, 1, 7, 7, 7}}, }; INSTANTIATE_TEST_CASE_P(MergeLabelsTests, MergeLabelsTestI, ::testing::ValuesIn(merge_inputs_32)); INSTANTIATE_TEST_CASE_P(MergeLabelsTests, MergeLabelsTestL, ::testing::ValuesIn(merge_inputs_64)); } // namespace label } // namespace raft
0
rapidsai_public_repos/raft/cpp
rapidsai_public_repos/raft/cpp/internal/CMakeLists.txt
# ============================================================================= # Copyright (c) 2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License # is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing permissions and limitations under # the License. # ============================================================================= if(BUILD_TESTS OR BUILD_PRIMS_BENCH) add_library(raft_internal INTERFACE) target_include_directories( raft_internal INTERFACE "$<BUILD_INTERFACE:${RAFT_SOURCE_DIR}/internal>" ) target_compile_features(raft_internal INTERFACE cxx_std_17 $<BUILD_INTERFACE:cuda_std_17>) endif()
0
rapidsai_public_repos/raft/cpp/internal/raft_internal
rapidsai_public_repos/raft/cpp/internal/raft_internal/neighbors/naive_knn.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/distance/distance_types.hpp> #include <raft/matrix/detail/select_k.cuh> #include <raft/spatial/knn/detail/ann_utils.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/core/resource/cuda_stream.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/mr/device/device_memory_resource.hpp> namespace raft::neighbors { template <typename EvalT, typename DataT, typename IdxT> RAFT_KERNEL naive_distance_kernel(EvalT* dist, const DataT* x, const DataT* y, IdxT m, IdxT n, IdxT k, raft::distance::DistanceType metric) { IdxT midx = IdxT(threadIdx.x) + IdxT(blockIdx.x) * IdxT(blockDim.x); if (midx >= m) return; IdxT grid_size = IdxT(blockDim.y) * IdxT(gridDim.y); for (IdxT nidx = threadIdx.y + blockIdx.y * blockDim.y; nidx < n; nidx += grid_size) { EvalT acc = EvalT(0); for (IdxT i = 0; i < k; ++i) { IdxT xidx = i + midx * k; IdxT yidx = i + nidx * k; auto xv = EvalT(x[xidx]); auto yv = EvalT(y[yidx]); switch (metric) { case raft::distance::DistanceType::InnerProduct: { acc += xv * yv; } break; case raft::distance::DistanceType::L2SqrtExpanded: case raft::distance::DistanceType::L2SqrtUnexpanded: case raft::distance::DistanceType::L2Expanded: case raft::distance::DistanceType::L2Unexpanded: { auto diff = xv - yv; acc += diff * diff; } break; default: break; } } switch (metric) { case raft::distance::DistanceType::L2SqrtExpanded: case raft::distance::DistanceType::L2SqrtUnexpanded: { acc = raft::sqrt(acc); } break; default: break; } dist[midx * n + nidx] = acc; } } /** * Naive, but flexible bruteforce KNN search. * * TODO: either replace this with brute_force_knn or with distance+select_k * when either distance or brute_force_knn support 8-bit int inputs. */ template <typename EvalT, typename DataT, typename IdxT> void naive_knn(raft::resources const& handle, EvalT* dist_topk, IdxT* indices_topk, const DataT* x, const DataT* y, size_t n_inputs, size_t input_len, size_t dim, uint32_t k, raft::distance::DistanceType type) { rmm::mr::device_memory_resource* mr = nullptr; auto pool_guard = raft::get_pool_memory_resource(mr, 1024 * 1024); auto stream = raft::resource::get_cuda_stream(handle); dim3 block_dim(16, 32, 1); // maximum reasonable grid size in `y` direction auto grid_y = static_cast<uint16_t>(std::min<size_t>(raft::ceildiv<size_t>(input_len, block_dim.y), 32768)); // bound the memory used by this function size_t max_batch_size = std::min<size_t>(n_inputs, raft::ceildiv<size_t>(size_t(1) << size_t(27), input_len)); rmm::device_uvector<EvalT> dist(max_batch_size * input_len, stream, mr); for (size_t offset = 0; offset < n_inputs; offset += max_batch_size) { size_t batch_size = std::min(max_batch_size, n_inputs - offset); dim3 grid_dim(raft::ceildiv<size_t>(batch_size, block_dim.x), grid_y, 1); naive_distance_kernel<EvalT, DataT, IdxT><<<grid_dim, block_dim, 0, stream>>>( dist.data(), x + offset * dim, y, batch_size, input_len, dim, type); matrix::detail::select_k<EvalT, IdxT>(handle, dist.data(), nullptr, batch_size, input_len, static_cast<int>(k), dist_topk + offset * k, indices_topk + offset * k, type != raft::distance::DistanceType::InnerProduct, mr); } RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } } // namespace raft::neighbors
0
rapidsai_public_repos/raft/cpp/internal/raft_internal
rapidsai_public_repos/raft/cpp/internal/raft_internal/neighbors/refine_helper.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/resource/cuda_stream.hpp> #include <raft_internal/neighbors/naive_knn.cuh> #include <raft/core/device_mdarray.hpp> #include <raft/core/device_mdspan.hpp> #include <raft/core/host_mdarray.hpp> #include <raft/core/host_mdspan.hpp> #include <raft/core/resources.hpp> #include <raft/distance/distance_types.hpp> #include <raft/random/rng.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> namespace raft::neighbors { template <typename IdxT> struct RefineInputs { IdxT n_queries; IdxT n_rows; IdxT dim; IdxT k; // after refinement IdxT k0; // initial k before refinement (k0 >= k). raft::distance::DistanceType metric; bool host_data; }; /** Helper class to allocate arrays and generate input data for refinement test and benchmark. */ template <typename DataT, typename DistanceT, typename IdxT> class RefineHelper { public: RefineHelper(const raft::resources& handle, RefineInputs<IdxT> params) : handle_(handle), stream_(resource::get_cuda_stream(handle)), p(params), dataset(handle), queries(handle), refined_distances(handle), refined_indices(handle), candidates(handle), dataset_host(handle), queries_host(handle), candidates_host(handle), refined_distances_host(handle), refined_indices_host(handle) { raft::random::RngState rng(1234ULL); dataset = raft::make_device_matrix<DataT, IdxT>(handle_, p.n_rows, p.dim); queries = raft::make_device_matrix<DataT, IdxT>(handle_, p.n_queries, p.dim); if constexpr (std::is_same<DataT, float>{}) { raft::random::uniform( handle, rng, dataset.data_handle(), dataset.size(), DataT(-10.0), DataT(10.0)); raft::random::uniform( handle, rng, queries.data_handle(), queries.size(), DataT(-10.0), DataT(10.0)); } else { raft::random::uniformInt( handle, rng, dataset.data_handle(), dataset.size(), DataT(1), DataT(20)); raft::random::uniformInt( handle, rng, queries.data_handle(), queries.size(), DataT(1), DataT(20)); } refined_distances = raft::make_device_matrix<DistanceT, IdxT>(handle_, p.n_queries, p.k); refined_indices = raft::make_device_matrix<IdxT, IdxT>(handle_, p.n_queries, p.k); // Generate candidate vectors { candidates = raft::make_device_matrix<IdxT, IdxT>(handle_, p.n_queries, p.k0); rmm::device_uvector<DistanceT> distances_tmp(p.n_queries * p.k0, stream_); naive_knn<DistanceT, DataT, IdxT>(handle_, distances_tmp.data(), candidates.data_handle(), queries.data_handle(), dataset.data_handle(), p.n_queries, p.n_rows, p.dim, p.k0, p.metric); resource::sync_stream(handle_, stream_); } if (p.host_data) { dataset_host = raft::make_host_matrix<DataT, IdxT>(p.n_rows, p.dim); queries_host = raft::make_host_matrix<DataT, IdxT>(p.n_queries, p.dim); candidates_host = raft::make_host_matrix<IdxT, IdxT>(p.n_queries, p.k0); raft::copy(dataset_host.data_handle(), dataset.data_handle(), dataset.size(), stream_); raft::copy(queries_host.data_handle(), queries.data_handle(), queries.size(), stream_); raft::copy( candidates_host.data_handle(), candidates.data_handle(), candidates.size(), stream_); refined_distances_host = raft::make_host_matrix<DistanceT, IdxT>(p.n_queries, p.k); refined_indices_host = raft::make_host_matrix<IdxT, IdxT>(p.n_queries, p.k); resource::sync_stream(handle_, stream_); } // Generate ground thruth for testing. { rmm::device_uvector<DistanceT> distances_dev(p.n_queries * p.k, stream_); rmm::device_uvector<IdxT> indices_dev(p.n_queries * p.k, stream_); naive_knn<DistanceT, DataT, IdxT>(handle_, distances_dev.data(), indices_dev.data(), queries.data_handle(), dataset.data_handle(), p.n_queries, p.n_rows, p.dim, p.k, p.metric); true_refined_distances_host.resize(p.n_queries * p.k); true_refined_indices_host.resize(p.n_queries * p.k); raft::copy(true_refined_indices_host.data(), indices_dev.data(), indices_dev.size(), stream_); raft::copy( true_refined_distances_host.data(), distances_dev.data(), distances_dev.size(), stream_); resource::sync_stream(handle_, stream_); } } public: RefineInputs<IdxT> p; const raft::resources& handle_; rmm::cuda_stream_view stream_; raft::device_matrix<DataT, IdxT, row_major> dataset; raft::device_matrix<DataT, IdxT, row_major> queries; raft::device_matrix<IdxT, IdxT, row_major> candidates; // Neighbor candidate indices raft::device_matrix<IdxT, IdxT, row_major> refined_indices; raft::device_matrix<DistanceT, IdxT, row_major> refined_distances; raft::host_matrix<DataT, IdxT, row_major> dataset_host; raft::host_matrix<DataT, IdxT, row_major> queries_host; raft::host_matrix<IdxT, IdxT, row_major> candidates_host; raft::host_matrix<IdxT, IdxT, row_major> refined_indices_host; raft::host_matrix<DistanceT, IdxT, row_major> refined_distances_host; std::vector<IdxT> true_refined_indices_host; std::vector<DistanceT> true_refined_distances_host; }; } // namespace raft::neighbors
0
rapidsai_public_repos/raft/cpp/internal/raft_internal
rapidsai_public_repos/raft/cpp/internal/raft_internal/matrix/select_k.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/matrix/detail/select_radix.cuh> #include <raft/matrix/detail/select_warpsort.cuh> #include <raft/matrix/select_k.cuh> #include <raft/neighbors/detail/selection_faiss.cuh> namespace raft::matrix::select { struct params { size_t batch_size; size_t len; int k; bool select_min; bool use_index_input = true; bool use_same_leading_bits = false; bool use_memory_pool = true; double frac_infinities = 0.0; }; inline auto operator<<(std::ostream& os, const params& ss) -> std::ostream& { os << "params{batch_size: " << ss.batch_size; os << ", len: " << ss.len; os << ", k: " << ss.k; os << (ss.select_min ? ", asc" : ", dsc"); if (!ss.use_index_input) { os << ", no-input-index"; } if (ss.use_same_leading_bits) { os << ", same-leading-bits"; } if (ss.frac_infinities > 0) { os << ", infs: " << ss.frac_infinities; } os << "}"; return os; } enum class Algo { kPublicApi, kRadix8bits, kRadix11bits, kRadix11bitsExtraPass, kWarpAuto, kWarpImmediate, kWarpFiltered, kWarpDistributed, kWarpDistributedShm, kFaissBlockSelect }; inline auto operator<<(std::ostream& os, const Algo& algo) -> std::ostream& { switch (algo) { case Algo::kPublicApi: return os << "kPublicApi"; case Algo::kRadix8bits: return os << "kRadix8bits"; case Algo::kRadix11bits: return os << "kRadix11bits"; case Algo::kRadix11bitsExtraPass: return os << "kRadix11bitsExtraPass"; case Algo::kWarpAuto: return os << "kWarpAuto"; case Algo::kWarpImmediate: return os << "kWarpImmediate"; case Algo::kWarpFiltered: return os << "kWarpFiltered"; case Algo::kWarpDistributed: return os << "kWarpDistributed"; case Algo::kWarpDistributedShm: return os << "kWarpDistributedShm"; case Algo::kFaissBlockSelect: return os << "kFaissBlockSelect"; default: return os << "unknown enum value"; } } template <typename T, typename IdxT> void select_k_impl(const resources& handle, const Algo& algo, const T* in, const IdxT* in_idx, size_t batch_size, size_t len, int k, T* out, IdxT* out_idx, bool select_min) { auto stream = resource::get_cuda_stream(handle); switch (algo) { case Algo::kPublicApi: { auto in_extent = make_extents<int64_t>(batch_size, len); auto out_extent = make_extents<int64_t>(batch_size, k); auto in_span = make_mdspan<const T, int64_t, row_major, false, true>(in, in_extent); auto in_idx_span = make_mdspan<const IdxT, int64_t, row_major, false, true>(in_idx, in_extent); auto out_span = make_mdspan<T, int64_t, row_major, false, true>(out, out_extent); auto out_idx_span = make_mdspan<IdxT, int64_t, row_major, false, true>(out_idx, out_extent); if (in_idx == nullptr) { // NB: std::nullopt prevents automatic inference of the template parameters. return matrix::select_k<T, IdxT>( handle, in_span, std::nullopt, out_span, out_idx_span, select_min, true); } else { return matrix::select_k(handle, in_span, std::make_optional(in_idx_span), out_span, out_idx_span, select_min, true); } } case Algo::kRadix8bits: return detail::select::radix::select_k<T, IdxT, 8, 512>(in, in_idx, batch_size, len, k, out, out_idx, select_min, true, // fused_last_filter stream); case Algo::kRadix11bits: return detail::select::radix::select_k<T, IdxT, 11, 512>(in, in_idx, batch_size, len, k, out, out_idx, select_min, true, // fused_last_filter stream); case Algo::kRadix11bitsExtraPass: return detail::select::radix::select_k<T, IdxT, 11, 512>(in, in_idx, batch_size, len, k, out, out_idx, select_min, false, // fused_last_filter stream); case Algo::kWarpAuto: return detail::select::warpsort::select_k<T, IdxT>( in, in_idx, batch_size, len, k, out, out_idx, select_min, stream); case Algo::kWarpImmediate: return detail::select::warpsort:: select_k_impl<T, IdxT, detail::select::warpsort::warp_sort_immediate>( in, in_idx, batch_size, len, k, out, out_idx, select_min, stream); case Algo::kWarpFiltered: return detail::select::warpsort:: select_k_impl<T, IdxT, detail::select::warpsort::warp_sort_filtered>( in, in_idx, batch_size, len, k, out, out_idx, select_min, stream); case Algo::kWarpDistributed: return detail::select::warpsort:: select_k_impl<T, IdxT, detail::select::warpsort::warp_sort_distributed>( in, in_idx, batch_size, len, k, out, out_idx, select_min, stream); case Algo::kWarpDistributedShm: return detail::select::warpsort:: select_k_impl<T, IdxT, detail::select::warpsort::warp_sort_distributed_ext>( in, in_idx, batch_size, len, k, out, out_idx, select_min, stream); case Algo::kFaissBlockSelect: return neighbors::detail::select_k( in, in_idx, batch_size, len, out, out_idx, select_min, k, stream); } } } // namespace raft::matrix::select
0
rapidsai_public_repos/raft
rapidsai_public_repos/raft/notebooks/tutorial_ivf_pq.ipynb
import os import tempfile import cupy as cp import numpy as np import matplotlib.pyplot as plt import rmm import urllib.request import h5py from rmm.allocators.cupy import rmm_cupy_allocator from pylibraft.common import DeviceResources from pylibraft.neighbors import ivf_pq, refine from adjustText import adjust_text from utils import calc_recall, load_dataset %matplotlib inline# A clumsy helper for inspecting properties of an object def show_properties(obj): return { attr: getattr(obj, attr) for attr in dir(obj) if type(getattr(type(obj), attr)).__name__ == 'getset_descriptor' }# We'll need to load store some data in this tutorial WORK_FOLDER = os.path.join(tempfile.gettempdir(), 'raft_ivf_pq_tutorial') if not os.path.exists(WORK_FOLDER): os.makedirs(WORK_FOLDER) print("The index and data will be saved in", WORK_FOLDER)# Report the GPU in use to put the measurements into perspective !nvidia-smipool = rmm.mr.PoolMemoryResource( rmm.mr.CudaMemoryResource(), initial_pool_size=2**30 ) rmm.mr.set_current_device_resource(pool) cp.cuda.set_allocator(rmm_cupy_allocator)DATASET_URL = "http://ann-benchmarks.com/sift-128-euclidean.hdf5" f = load_dataset(DATASET_URL)metric = f.attrs['distance'] dataset = cp.array(f['train']) queries = cp.array(f['test']) gt_neighbors = cp.array(f['neighbors']) gt_distances = cp.array(f['distances']) print(f"Loaded dataset of size {dataset.shape}; metric: '{metric}'.") print(f"Number of test queries: {queries.shape[0]}")# RAFT's DeviceResources controls the GPU, cuda stream, memory policies etc. # For now, we just create a default instance. resources = DeviceResources()# First, we need to initialize the build/indexing parameters. # One of the more important parameters is the product quantisation (PQ) dim. # Effectively, this parameter says # "shrink the dataset to this dimensionality to reduce the index size". # It must be not bigger than the dataset dim, # and it should be divisible by 32 for better GPU performance. pq_dim = 1 while pq_dim * 2 < dataset.shape[1]: pq_dim = pq_dim * 2 # We'll use the ANN-benchmarks-provided metric and sensible defaults for the rest of parameters. index_params = ivf_pq.IndexParams(n_lists=1024, metric=metric, pq_dim=pq_dim) show_properties(index_params)%%time ## Build the index # This function takes a row-major either numpy or cupy (GPU) array. # Generally, it's a bit faster with GPU inputs, but the CPU version may come in handy # if the whole dataset cannot fit into GPU memory. index = ivf_pq.build(index_params, dataset, handle=resources) # This function is asynchronous so we need to explicitly synchronize the GPU before we can measure the execution time resources.sync() index%%time index_filepath = os.path.join(WORK_FOLDER, "ivf_pq.bin") ivf_pq.save(index_filepath, index) loaded_index = ivf_pq.load(index_filepath) resources.sync() indexk = 10 search_params = ivf_pq.SearchParams() show_properties(search_params)%%time distances, neighbors = ivf_pq.search(search_params, index, queries, k, handle=resources) # Sync the GPU to make sure we've got the timing right resources.sync()recall_first_try = calc_recall(neighbors, gt_neighbors) print(f"Got recall = {recall_first_try} with the default parameters (k = {k}).")%%time candidates = ivf_pq.search(search_params, index, queries, k * 2, handle=resources)[1] distances, neighbors = refine(dataset, queries, candidates, k, handle=resources) resources.sync()recall_refine2x = calc_recall(neighbors, gt_neighbors) print(f"Got recall = {recall_refine2x} with 2x refinement (k = {k}).")bench_k = np.exp2(np.arange(10)).astype(np.int32) bench_avg = np.zeros_like(bench_k, dtype=np.float32) bench_std = np.zeros_like(bench_k, dtype=np.float32) for i, k in enumerate(bench_k): r = %timeit -o ivf_pq.search(search_params, index, queries, k, handle=resources); resources.sync() bench_avg[i] = (queries.shape[0] * r.loops / np.array(r.all_runs)).mean() bench_std[i] = (queries.shape[0] * r.loops / np.array(r.all_runs)).std() fig, ax = plt.subplots(1, 1, figsize=plt.figaspect(1/2)) ax.errorbar(bench_k, bench_avg, bench_std) ax.set_xscale('log') ax.set_xticks(bench_k, bench_k) ax.set_xlabel('k') ax.grid() ax.set_ylabel('QPS');bench_probes = np.exp2(np.arange(11)).astype(np.int32) bench_qps = np.zeros_like(bench_probes, dtype=np.float32) bench_recall = np.zeros_like(bench_probes, dtype=np.float32) k = 100 for i, n_probes in enumerate(bench_probes): sp = ivf_pq.SearchParams(n_probes=n_probes) r = %timeit -o ivf_pq.search(sp, index, queries, k, handle=resources); resources.sync() bench_qps[i] = (queries.shape[0] * r.loops / np.array(r.all_runs)).mean() bench_recall[i] = calc_recall(ivf_pq.search(sp, index, queries, k, handle=resources)[1], gt_neighbors) fig, ax = plt.subplots(1, 3, figsize=plt.figaspect(1/4)) ax[0].plot(bench_probes, bench_recall) ax[0].set_xscale('log') ax[0].set_xticks(bench_probes, bench_probes) ax[0].set_xlabel('n_probes') ax[0].set_ylabel('recall') ax[0].grid() ax[1].plot(bench_probes, bench_qps) ax[1].set_xscale('log') ax[1].set_xticks(bench_probes, bench_probes) ax[1].set_xlabel('n_probes') ax[1].set_ylabel('QPS') ax[1].set_yscale('log') ax[1].grid() ax[2].plot(bench_recall, bench_qps) ax[2].set_xlabel('recall') ax[2].set_ylabel('QPS') ax[2].set_yscale('log') ax[2].grid();bench_qps_s1 = np.zeros((5,), dtype=np.float32) bench_recall_s1 = np.zeros((5,), dtype=np.float32) k = 10 n_probes = 256 search_params_32_32 = ivf_pq.SearchParams(n_probes=n_probes, internal_distance_dtype=np.float32, lut_dtype=np.float32) search_params_32_16 = ivf_pq.SearchParams(n_probes=n_probes, internal_distance_dtype=np.float32, lut_dtype=np.float16) search_params_32_08 = ivf_pq.SearchParams(n_probes=n_probes, internal_distance_dtype=np.float32, lut_dtype=np.uint8) search_params_16_16 = ivf_pq.SearchParams(n_probes=n_probes, internal_distance_dtype=np.float16, lut_dtype=np.float16) search_params_16_08 = ivf_pq.SearchParams(n_probes=n_probes, internal_distance_dtype=np.float16, lut_dtype=np.uint8) search_ps = [search_params_32_32, search_params_32_16, search_params_32_08, search_params_16_16, search_params_16_08] bench_names = ['32/32', '32/16', '32/8', '16/16', '16/8'] for i, sp in enumerate(search_ps): r = %timeit -o ivf_pq.search(sp, index, queries, k, handle=resources); resources.sync() bench_qps_s1[i] = (queries.shape[0] * r.loops / np.array(r.all_runs)).mean() bench_recall_s1[i] = calc_recall(ivf_pq.search(sp, index, queries, k, handle=resources)[1], gt_neighbors)fig, ax = plt.subplots(1, 1, figsize=plt.figaspect(1/2)) fig.suptitle( f'Effects of search parameters on QPS/recall trade-off ({DATASET_FILENAME})\n' + \ f'k = {k}, n_probes = {n_probes}, pq_dim = {pq_dim}') ax.plot(bench_recall_s1, bench_qps_s1, 'o') ax.set_xlabel('recall') ax.set_ylabel('QPS') ax.grid() annotations = [] for i, label in enumerate(bench_names): annotations.append(ax.text( bench_recall_s1[i], bench_qps_s1[i], f" {label} ", ha='center', va='center')) clutter = [ ax.text( 0.02, 0.08, 'Labels denote the bitsize of: internal_distance_dtype/lut_dtype', verticalalignment='top', bbox={'facecolor': 'white', 'edgecolor': 'grey'}, transform = ax.transAxes) ] adjust_text(annotations, objects=clutter);def search_refine(ps, ratio): k_search = k * ratio candidates = ivf_pq.search(ps, index, queries, k_search, handle=resources)[1] return candidates if ratio == 1 else refine(dataset, queries, candidates, k, handle=resources)[1] ratios = [1, 2, 4] bench_qps_sr = np.zeros((len(ratios), len(search_ps)), dtype=np.float32) bench_recall_sr = np.zeros((len(ratios), len(search_ps)), dtype=np.float32) for j, ratio in enumerate(ratios): for i, ps in enumerate(search_ps): r = %timeit -o search_refine(ps, ratio); resources.sync() bench_qps_sr[j, i] = (queries.shape[0] * r.loops / np.array(r.all_runs)).mean() bench_recall_sr[j, i] = calc_recall(search_refine(ps, ratio), gt_neighbors)fig, ax = plt.subplots(1, 1, figsize=plt.figaspect(1/2)) fig.suptitle( f'Effects of search parameters on QPS/recall trade-off ({DATASET_FILENAME})\n' + \ f'k = {k}, n_probes = {n_probes}, pq_dim = {pq_dim}') labels = [] for j, ratio in enumerate(ratios): ax.plot(bench_recall_sr[j, :], bench_qps_sr[j, :], 'o') labels.append(f"refine ratio = {ratio}") ax.legend(labels) ax.set_xlabel('recall') ax.set_ylabel('QPS') ax.grid() colors = plt.rcParams["axes.prop_cycle"].by_key()["color"] annotations = [] for j, ratio in enumerate(ratios): for i, label in enumerate(bench_names): annotations.append(ax.text( bench_recall_sr[j, i], bench_qps_sr[j, i], f" {label} ", color=colors[j], ha='center', va='center')) clutter = [ ax.text( 0.02, 0.08, 'Labels denote the bitsize of: internal_distance_dtype/lut_dtype', verticalalignment='top', bbox={'facecolor': 'white', 'edgecolor': 'grey'}, transform = ax.transAxes) ] adjust_text(annotations, objects=clutter);def search_refine(internal_distance_dtype, lut_dtype, ratio, n_probes): k_search = k * ratio ps = ivf_pq.SearchParams( n_probes=n_probes, internal_distance_dtype=internal_distance_dtype, lut_dtype=lut_dtype) candidates = ivf_pq.search(ps, index, queries, k_search, handle=resources)[1] return candidates if ratio == 1 else refine(dataset, queries, candidates, k, handle=resources)[1] search_configs = [ lambda n_probes: search_refine(np.float16, np.float16, 1, n_probes), lambda n_probes: search_refine(np.float32, np.uint8, 1, n_probes), lambda n_probes: search_refine(np.float32, np.uint8, 2, n_probes) ] search_config_names = [ '16/16', '32/8', '32/8/r2' ]n_list_variants = [100, 500, 1000, 2000, 5000] pl_ratio_variants = [500, 200, 100, 50, 10, 5] selected_search_variant = 1 search_fun = search_configs[selected_search_variant] search_label = search_config_names[selected_search_variant] bench_qps_nl = np.zeros((len(n_list_variants), len(pl_ratio_variants)), dtype=np.float32) bench_recall_nl = np.zeros_like(bench_qps_nl, dtype=np.float32) for i, n_lists in enumerate(n_list_variants): index_params = ivf_pq.IndexParams(n_lists=n_lists, metric=metric, pq_dim=pq_dim) index = ivf_pq.build(index_params, dataset, handle=resources) for j, pl_ratio in enumerate(pl_ratio_variants): n_probes = max(1, n_lists // pl_ratio) r = %timeit -o search_fun(n_probes); resources.sync() bench_qps_nl[i, j] = (queries.shape[0] * r.loops / np.array(r.all_runs)).mean() bench_recall_nl[i, j] = calc_recall(search_fun(n_probes), gt_neighbors)fig, ax = plt.subplots(1, 1, figsize=plt.figaspect(1/2)) fig.suptitle( f'Effects of n_list on QPS/recall trade-off ({DATASET_FILENAME})\n' + \ f'k = {k}, pq_dim = {pq_dim}, search = {search_label}') labels = [] for i, n_lists in enumerate(n_list_variants): ax.plot(bench_recall_nl[i, :], bench_qps_nl[i, :]) labels.append(f"n_lists = {n_lists}") ax.legend(labels) ax.set_xlabel('recall') ax.set_ylabel('QPS') ax.set_yscale('log') ax.grid()# Let's try a few build configurations. # Warning: this will take some time k = 10 n_probes_variants = [10, 20, 50, 100] n_lists = 1000 build_configs = { '64-8-subspace': ivf_pq.IndexParams(n_lists=n_lists, metric=metric, pq_dim=64, pq_bits=8, codebook_kind="subspace"), '128-8-subspace': ivf_pq.IndexParams(n_lists=n_lists, metric=metric, pq_dim=128, pq_bits=8, codebook_kind="subspace"), '128-6-subspace': ivf_pq.IndexParams(n_lists=n_lists, metric=metric, pq_dim=128, pq_bits=6, codebook_kind="subspace"), '128-6-cluster': ivf_pq.IndexParams(n_lists=n_lists, metric=metric, pq_dim=128, pq_bits=6, codebook_kind="cluster"), } bench_qps_ip = np.zeros((len(build_configs), len(search_configs), len(n_probes_variants)), dtype=np.float32) bench_recall_ip = np.zeros_like(bench_qps_ip, dtype=np.float32) for i, index_params in enumerate(build_configs.values()): index = ivf_pq.build(index_params, dataset, handle=resources) for l, search_fun in enumerate(search_configs): for j, n_probes in enumerate(n_probes_variants): r = %timeit -o search_fun(n_probes); resources.sync() bench_qps_ip[i, l, j] = (queries.shape[0] * r.loops / np.array(r.all_runs)).mean() bench_recall_ip[i, l, j] = calc_recall(search_fun(n_probes), gt_neighbors)fig, ax = plt.subplots(len(search_config_names), 1, figsize=(16, len(search_config_names)*8)) fig.suptitle( f'Effects of index parameters on QPS/recall trade-off ({DATASET_FILENAME})\n' + \ f'k = {k}, n_lists = {n_lists}') for j, search_label in enumerate(search_config_names): labels = [] for i, index_label in enumerate(build_configs.keys()): ax[j].plot(bench_recall_ip[i, j, :], bench_qps_ip[i, j, :]) labels.append(index_label) ax[j].set_title(f"search: {search_label}") ax[j].legend(labels) ax[j].set_xlabel('recall') ax[j].set_ylabel('QPS') ax[j].set_yscale('log') ax[j].grid()
0
rapidsai_public_repos/raft
rapidsai_public_repos/raft/notebooks/VectorSearch_QuestionRetrieval.ipynb
import json from sentence_transformers import SentenceTransformer, CrossEncoder, util import time import gzip import os import torch import pylibraft from pylibraft.neighbors import ivf_flat, ivf_pq pylibraft.config.set_output_as(lambda device_ndarray: device_ndarray.copy_to_host()) if not torch.cuda.is_available(): print("Warning: No GPU found. Please add GPU to your notebook")# We use the Bi-Encoder to encode all passages, so that we can use it with semantic search model_name = 'nq-distilbert-base-v1' bi_encoder = SentenceTransformer(model_name) # As dataset, we use Simple English Wikipedia. Compared to the full English wikipedia, it has only # about 170k articles. We split these articles into paragraphs and encode them with the bi-encoder wikipedia_filepath = 'data/simplewiki-2020-11-01.jsonl.gz' if not os.path.exists(wikipedia_filepath): util.http_get('http://sbert.net/datasets/simplewiki-2020-11-01.jsonl.gz', wikipedia_filepath) passages = [] with gzip.open(wikipedia_filepath, 'rt', encoding='utf8') as fIn: for line in fIn: data = json.loads(line.strip()) for paragraph in data['paragraphs']: # We encode the passages as [title, text] passages.append([data['title'], paragraph]) # If you like, you can also limit the number of passages you want to use print("Passages:", len(passages)) # To speed things up, pre-computed embeddings are downloaded. # The provided file encoded the passages with the model 'nq-distilbert-base-v1' if model_name == 'nq-distilbert-base-v1': embeddings_filepath = 'simplewiki-2020-11-01-nq-distilbert-base-v1.pt' if not os.path.exists(embeddings_filepath): util.http_get('http://sbert.net/datasets/simplewiki-2020-11-01-nq-distilbert-base-v1.pt', embeddings_filepath) corpus_embeddings = torch.load(embeddings_filepath) corpus_embeddings = corpus_embeddings.float() # Convert embedding file to float if torch.cuda.is_available(): corpus_embeddings = corpus_embeddings.to('cuda') else: # Here, we compute the corpus_embeddings from scratch (which can take a while depending on the GPU) corpus_embeddings = bi_encoder.encode(passages, convert_to_tensor=True, show_progress_bar=True)%%time params = ivf_pq.IndexParams(n_lists=150, pq_dim=96) pq_index = ivf_pq.build(params, corpus_embeddings) search_params = ivf_pq.SearchParams() def search_raft_pq(query, top_k = 5): # Encode the query using the bi-encoder and find potentially relevant passages question_embedding = bi_encoder.encode(query, convert_to_tensor=True) hits = ivf_pq.search(search_params, pq_index, question_embedding[None], top_k) # Output of top-k hits print("Input question:", query) for k in range(top_k): print("\t{:.3f}\t{}".format(hits[0][0, k], passages[hits[1][0, k]]))pq_index_mem = pq_index.pq_dim * pq_index.size * pq_index.pq_bits print("IVF-PQ memory footprint: {:.1f} MB".format(pq_index_mem / 2**20)) original_mem = corpus_embeddings.shape[0] * corpus_embeddings.shape[1] * 4 print("Original dataset: {:.1f} MB".format(original_mem / 2**20)) print("Memory saved: {:.1f}%".format(100 * (1 - pq_index_mem / original_mem)))%%time search_raft_pq(query="Who was Grace Hopper?")%%time search_raft_pq(query="Who was Alan Turing?")%%time search_raft_pq(query = "What is creating tides?")%%time params = ivf_flat.IndexParams(n_lists=150) flat_index = ivf_flat.build(params, corpus_embeddings) search_params = ivf_flat.SearchParams() def search_raft_flat(query, top_k = 5): # Encode the query using the bi-encoder and find potentially relevant passages question_embedding = bi_encoder.encode(query, convert_to_tensor=True) start_time = time.time() hits = ivf_flat.search(search_params, flat_index, question_embedding[None], top_k) end_time = time.time() # Output of top-k hits print("Input question:", query) print("Results (after {:.3f} seconds):".format(end_time - start_time)) for k in range(top_k): print("\t{:.3f}\t{}".format(hits[0][0, k], passages[hits[1][0, k]]))%%time search_raft_flat(query="Who was Grace Hopper?")%%time search_raft_flat(query="Who was Alan Turing?")%%time search_raft_flat(query = "What is creating tides?")from pylibraft.neighbors import cagra%%time params = cagra.IndexParams(intermediate_graph_degree=128, graph_degree=64) cagra_index = cagra.build(params, corpus_embeddings) search_params = cagra.SearchParams()def search_raft_cagra(query, top_k = 5): # Encode the query using the bi-encoder and find potentially relevant passages question_embedding = bi_encoder.encode(query, convert_to_tensor=True) hits = cagra.search(search_params, cagra_index, question_embedding[None], top_k) # Output of top-k hits print("Input question:", query) for k in range(top_k): print("\t{:.3f}\t{}".format(hits[0][0, k], passages[hits[1][0, k]]))%time search_raft_cagra(query="Who was Grace Hopper?")
0
rapidsai_public_repos/raft
rapidsai_public_repos/raft/notebooks/ivf_flat_example.ipynb
import os import cupy as cp import numpy as np from pylibraft.common import DeviceResources from pylibraft.neighbors import ivf_flat import matplotlib.pyplot as plt import tempfile from utils import BenchmarkTimer, calc_recall, load_datasetimport rmm from rmm.allocators.cupy import rmm_cupy_allocator mr = rmm.mr.PoolMemoryResource( rmm.mr.CudaMemoryResource(), initial_pool_size=2**30 ) rmm.mr.set_current_device_resource(mr) cp.cuda.set_allocator(rmm_cupy_allocator)# Report the GPU in use !nvidia-smiWORK_FOLDER = os.path.join(tempfile.gettempdir(), "raft_example") f = load_dataset("http://ann-benchmarks.com/sift-128-euclidean.hdf5", work_folder=WORK_FOLDER)metric = f.attrs['distance'] dataset = cp.array(f['train']) queries = cp.array(f['test']) gt_neighbors = cp.array(f['neighbors']) gt_distances = cp.array(f['distances']) itemsize = dataset.dtype.itemsize print(f"Loaded dataset of size {dataset.shape}, {dataset.size*itemsize/(1<<30):4.1f} GiB; metric: '{metric}'.") print(f"Number of test queries: {queries.shape[0]}")%%time build_params = ivf_flat.IndexParams( n_lists=1024, metric="euclidean", kmeans_trainset_fraction=0.1, kmeans_n_iters=20, add_data_on_build=True ) index = ivf_flat.build(build_params, dataset)print(index)handle = DeviceResources()%%time n_queries=10000 # n_probes is the number of clusters we select in the first (coarse) search step. This is the only hyper parameter for search. search_params = ivf_flat.SearchParams(n_probes=30) # Search 10 nearest neighbors. distances, indices = ivf_flat.search(search_params, index, cp.asarray(queries[:n_queries,:]), k=10, handle=handle) # RAFT calls are asynchronous (when handle arg is provided), we need to sync before accessing the results. handle.sync() distances, neighbors = cp.asnumpy(distances), cp.asnumpy(indices)calc_recall(neighbors, gt_neighbors)index_file = os.path.join(WORK_FOLDER, "my_ivf_flat_index.bin") ivf_flat.save(index_file, index)index = ivf_flat.load(index_file)n_probes = np.asarray([10, 20, 30, 50, 100, 200, 500, 1024]); qps = np.zeros(n_probes.shape); recall = np.zeros(n_probes.shape); for i in range(len(n_probes)): print("\nBenchmarking search with n_probes =", n_probes[i]) timer = BenchmarkTimer(reps=1, warmup=1) for rep in timer.benchmark_runs(): distances, neighbors = ivf_flat.search( ivf_flat.SearchParams(n_probes=n_probes[i]), index, cp.asarray(queries), k=10, handle=handle, ) handle.sync() recall[i] = calc_recall(cp.asnumpy(neighbors), gt_neighbors) print("recall", recall[i]) timings = np.asarray(timer.timings) avg_time = timings.mean() std_time = timings.std() qps[i] = queries.shape[0] / avg_time print("Average search time: {0:7.3f} +/- {1:7.3} s".format(avg_time, std_time)) print("Queries per second (QPS): {0:8.0f}".format(qps[i]))fig = plt.figure(figsize=(12,3)) ax = fig.add_subplot(131) ax.plot(n_probes, recall,'o-') #ax.set_xticks(bench_k, bench_k) ax.set_xlabel('n_probes') ax.grid() ax.set_ylabel('recall (@k=10)') ax = fig.add_subplot(132) ax.plot(n_probes, qps,'o-') #ax.set_xticks(bench_k, bench_k) ax.set_xlabel('n_probes') ax.grid() ax.set_ylabel('queries per second'); ax = fig.add_subplot(133) ax.plot(recall, qps,'o-') #ax.set_xticks(bench_k, bench_k) ax.set_xlabel('recall') ax.grid() ax.set_ylabel('queries per second'); #ax.set_yscale('log')%%time build_params = ivf_flat.IndexParams( n_lists=100, metric="euclidean", kmeans_trainset_fraction=1, kmeans_n_iters=20, add_data_on_build=True ) index = ivf_flat.build(build_params, dataset, handle=handle)%%time n_queries=10000 search_params = ivf_flat.SearchParams(n_probes=10) # Search 10 nearest neighbors. distances, indices = ivf_flat.search(search_params, index, cp.asarray(queries[:n_queries,:]), k=10, handle=handle) handle.sync() distances, neighbors = cp.asnumpy(distances), cp.asnumpy(indices)calc_recall(neighbors, gt_neighbors)%%time build_params = ivf_flat.IndexParams( n_lists=100, metric="sqeuclidean", kmeans_trainset_fraction=0.1, kmeans_n_iters=20 ) index = ivf_flat.build(build_params, dataset, handle=handle)search_params = ivf_flat.SearchParams(n_probes=10) distances, indices = ivf_flat.search(search_params, index, cp.asarray(queries[:n_queries,:]), k=10, handle=handle) handle.sync() distances, neighbors = cp.asnumpy(distances), cp.asnumpy(indices) calc_recall(neighbors, gt_neighbors)# subsample the dataset n_train = 10000 train_set = dataset[cp.random.choice(dataset.shape[0], n_train, replace=False),:] # build using training set build_params = ivf_flat.IndexParams( n_lists=1024, metric="sqeuclidean", kmeans_trainset_fraction=1, kmeans_n_iters=20, add_data_on_build=False ) index = ivf_flat.build(build_params, train_set) print("Index before adding vectors", index) ivf_flat.extend(index, dataset, cp.arange(dataset.shape[0], dtype=cp.int64)) print("Index after adding vectors", index)
0
rapidsai_public_repos/raft
rapidsai_public_repos/raft/notebooks/utils.py
# # Copyright (c) 2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cupy as cp import h5py import os import tempfile import time import urllib ## Check the quality of the prediction (recall) def calc_recall(found_indices, ground_truth): found_indices = cp.asarray(found_indices) bs, k = found_indices.shape if bs != ground_truth.shape[0]: raise RuntimeError( "Batch sizes do not match {} vs {}".format( bs, ground_truth.shape[0] ) ) if k > ground_truth.shape[1]: raise RuntimeError( "Not enough indices in the ground truth ({} > {})".format( k, ground_truth.shape[1] ) ) n = 0 # Go over the batch for i in range(bs): # Note, ivf-pq does not guarantee the ordered input, hence the use of intersect1d n += cp.intersect1d(found_indices[i, :k], ground_truth[i, :k]).size recall = n / found_indices.size return recall class BenchmarkTimer: """Provides a context manager that runs a code block `reps` times and records results to the instance variable `timings`. Use like: .. code-block:: python timer = BenchmarkTimer(rep=5) for _ in timer.benchmark_runs(): ... do something ... print(np.min(timer.timings)) This class is borrowed from the rapids/cuml benchmark suite """ def __init__(self, reps=1, warmup=0): self.warmup = warmup self.reps = reps self.timings = [] def benchmark_runs(self): for r in range(self.reps + self.warmup): t0 = time.time() yield r t1 = time.time() self.timings.append(t1 - t0) if r >= self.warmup: self.timings.append(t1 - t0) def load_dataset(dataset_url, work_folder=None): """Download dataset from url. It is expected that the dataset contains a hdf5 file in ann-benchmarks format Parameters ---------- dataset_url address of hdf5 file work_folder name of the local folder to store the dataset """ dataset_url = "http://ann-benchmarks.com/sift-128-euclidean.hdf5" dataset_filename = dataset_url.split("/")[-1] # We'll need to load store some data in this tutorial if work_folder is None: work_folder = os.path.join(tempfile.gettempdir(), "raft_example") if not os.path.exists(work_folder): os.makedirs(work_folder) print("The index and data will be saved in", work_folder) ## download the dataset dataset_path = os.path.join(work_folder, dataset_filename) if not os.path.exists(dataset_path): urllib.request.urlretrieve(dataset_url, dataset_path) f = h5py.File(dataset_path, "r") return f
0
rapidsai_public_repos/raft
rapidsai_public_repos/raft/.devcontainer/README.md
# RAFT Development Containers This directory contains [devcontainer configurations](https://containers.dev/implementors/json_reference/) for using VSCode to [develop in a container](https://code.visualstudio.com/docs/devcontainers/containers) via the `Remote Containers` [extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) or [GitHub Codespaces](https://github.com/codespaces). This container is a turnkey development environment for building and testing the RAFT C++ and Python libraries. ## Table of Contents * [Prerequisites](#prerequisites) * [Host bind mounts](#host-bind-mounts) * [Launch a Dev Container](#launch-a-dev-container) ## Prerequisites * [VSCode](https://code.visualstudio.com/download) * [VSCode Remote Containers extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) ## Host bind mounts By default, the following directories are bind-mounted into the devcontainer: * `${repo}:/home/coder/raft` * `${repo}/../.aws:/home/coder/.aws` * `${repo}/../.local:/home/coder/.local` * `${repo}/../.cache:/home/coder/.cache` * `${repo}/../.conda:/home/coder/.conda` * `${repo}/../.config:/home/coder/.config` This ensures caches, configurations, dependencies, and your commits are persisted on the host across container runs. ## Launch a Dev Container To launch a devcontainer from VSCode, open the RAFT repo and select the "Reopen in Container" button in the bottom right:<br/><img src="https://user-images.githubusercontent.com/178183/221771999-97ab29d5-e718-4e5f-b32f-2cdd51bba25c.png"/> Alternatively, open the VSCode command palette (typically `cmd/ctrl + shift + P`) and run the "Rebuild and Reopen in Container" command. ## Using the devcontainer On startup, the devcontainer creates or updates the conda/pip environment using `raft/dependencies.yaml`. The container includes convenience functions to clean, configure, and build the various RAFT components: ```shell $ clean-raft-cpp # only cleans the C++ build dir $ clean-pylibraft-python # only cleans the Python build dir $ clean-raft # cleans both C++ and Python build dirs $ configure-raft-cpp # only configures raft C++ lib $ build-raft-cpp # only builds raft C++ lib $ build-pylibraft-python # only builds raft Python lib $ build-raft # builds both C++ and Python libs ``` * The C++ build script is a small wrapper around `cmake -S ~/raft/cpp -B ~/raft/cpp/build` and `cmake --build ~/raft/cpp/build` * The Python build script is a small wrapper around `pip install --editable ~/raft/cpp` Unlike `build.sh`, these convenience scripts *don't* install the libraries after building them. Instead, they automatically inject the correct arguments to build the C++ libraries from source and use their build dirs as package roots: ```shell $ cmake -S ~/raft/cpp -B ~/raft/cpp/build $ CMAKE_ARGS="-Draft_ROOT=~/raft/cpp/build" \ # <-- this argument is automatic pip install -e ~/raft/cpp ```
0
rapidsai_public_repos/raft
rapidsai_public_repos/raft/.devcontainer/Dockerfile
# syntax=docker/dockerfile:1.5 ARG BASE ARG PYTHON_PACKAGE_MANAGER=conda FROM ${BASE} as pip-base ENV DEFAULT_VIRTUAL_ENV=rapids FROM ${BASE} as conda-base ENV DEFAULT_CONDA_ENV=rapids FROM ${PYTHON_PACKAGE_MANAGER}-base ARG CUDA ENV CUDAARCHS="RAPIDS" ENV CUDA_VERSION="${CUDA_VERSION:-${CUDA}}" ARG PYTHON_PACKAGE_MANAGER ENV PYTHON_PACKAGE_MANAGER="${PYTHON_PACKAGE_MANAGER}" ENV PYTHONSAFEPATH="1" ENV PYTHONUNBUFFERED="1" ENV PYTHONDONTWRITEBYTECODE="1" ENV SCCACHE_REGION="us-east-2" ENV SCCACHE_BUCKET="rapids-sccache-devs" ENV VAULT_HOST="https://vault.ops.k8s.rapids.ai" ENV HISTFILE="/home/coder/.cache/._bash_history"
0
rapidsai_public_repos/raft/.devcontainer
rapidsai_public_repos/raft/.devcontainer/cuda11.8-pip/devcontainer.json
{ "build": { "context": "${localWorkspaceFolder}/.devcontainer", "dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile", "args": { "CUDA": "11.8", "PYTHON_PACKAGE_MANAGER": "pip", "BASE": "rapidsai/devcontainers:24.02-cpp-llvm16-cuda11.8-ubuntu22.04" } }, "hostRequirements": {"gpu": "optional"}, "features": { "ghcr.io/rapidsai/devcontainers/features/ucx:24.2": {"version": "1.14.1"}, "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.2": {} }, "overrideFeatureInstallOrder": [ "ghcr.io/rapidsai/devcontainers/features/ucx", "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils" ], "initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config/pip,local/share/${localWorkspaceFolderBasename}-cuda11.8-venvs}"], "postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"], "workspaceFolder": "/home/coder", "workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/raft,type=bind,consistency=consistent", "mounts": [ "source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.local/share/${localWorkspaceFolderBasename}-cuda11.8-venvs,target=/home/coder/.local/share/venvs,type=bind,consistency=consistent" ], "customizations": { "vscode": { "extensions": [ "ms-python.flake8", "nvidia.nsight-vscode-edition" ] } } }
0
rapidsai_public_repos/raft/.devcontainer
rapidsai_public_repos/raft/.devcontainer/cuda12.0-pip/devcontainer.json
{ "build": { "context": "${localWorkspaceFolder}/.devcontainer", "dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile", "args": { "CUDA": "12.0", "PYTHON_PACKAGE_MANAGER": "pip", "BASE": "rapidsai/devcontainers:24.02-cpp-llvm16-cuda12.0-ubuntu22.04" } }, "hostRequirements": {"gpu": "optional"}, "features": { "ghcr.io/rapidsai/devcontainers/features/ucx:24.2": {"version": "1.14.1"}, "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.2": {} }, "overrideFeatureInstallOrder": [ "ghcr.io/rapidsai/devcontainers/features/ucx", "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils" ], "initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config/pip,local/share/${localWorkspaceFolderBasename}-cuda12.0-venvs}"], "postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"], "workspaceFolder": "/home/coder", "workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/raft,type=bind,consistency=consistent", "mounts": [ "source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.local/share/${localWorkspaceFolderBasename}-cuda12.0-venvs,target=/home/coder/.local/share/venvs,type=bind,consistency=consistent" ], "customizations": { "vscode": { "extensions": [ "ms-python.flake8", "nvidia.nsight-vscode-edition" ] } } }
0
rapidsai_public_repos/raft/.devcontainer
rapidsai_public_repos/raft/.devcontainer/cuda12.0-conda/devcontainer.json
{ "build": { "context": "${localWorkspaceFolder}/.devcontainer", "dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile", "args": { "CUDA": "12.0", "PYTHON_PACKAGE_MANAGER": "conda", "BASE": "rapidsai/devcontainers:24.02-cpp-mambaforge-ubuntu22.04" } }, "hostRequirements": {"gpu": "optional"}, "features": { "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.2": {} }, "overrideFeatureInstallOrder": [ "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils" ], "initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config,conda/pkgs,conda/${localWorkspaceFolderBasename}-cuda12.0-envs}"], "postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"], "workspaceFolder": "/home/coder", "workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/raft,type=bind,consistency=consistent", "mounts": [ "source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.conda/pkgs,target=/home/coder/.conda/pkgs,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.conda/${localWorkspaceFolderBasename}-cuda12.0-envs,target=/home/coder/.conda/envs,type=bind,consistency=consistent" ], "customizations": { "vscode": { "extensions": [ "ms-python.flake8", "nvidia.nsight-vscode-edition" ] } } }
0
rapidsai_public_repos/raft/.devcontainer
rapidsai_public_repos/raft/.devcontainer/cuda11.8-conda/devcontainer.json
{ "build": { "context": "${localWorkspaceFolder}/.devcontainer", "dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile", "args": { "CUDA": "11.8", "PYTHON_PACKAGE_MANAGER": "conda", "BASE": "rapidsai/devcontainers:24.02-cpp-llvm16-cuda11.8-mambaforge-ubuntu22.04" } }, "hostRequirements": {"gpu": "optional"}, "features": { "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.2": {} }, "overrideFeatureInstallOrder": [ "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils" ], "initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config,conda/pkgs,conda/${localWorkspaceFolderBasename}-cuda11.8-envs}"], "postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"], "workspaceFolder": "/home/coder", "workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/raft,type=bind,consistency=consistent", "mounts": [ "source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.conda/pkgs,target=/home/coder/.conda/pkgs,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.conda/${localWorkspaceFolderBasename}-cuda11.8-envs,target=/home/coder/.conda/envs,type=bind,consistency=consistent" ], "customizations": { "vscode": { "extensions": [ "ms-python.flake8", "nvidia.nsight-vscode-edition" ] } } }
0
rapidsai_public_repos/raft
rapidsai_public_repos/raft/docs/README.md
# Building Documentation ## Building locally: #### [Build and install RAFT](source/build.md) #### Generate the docs ```shell script bash build.sh docs ``` #### Once the process finishes, documentation can be found in build/html ```shell script xdg-open build/html/index.html` ```
0
rapidsai_public_repos/raft
rapidsai_public_repos/raft/docs/make.bat
@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=source set BUILDDIR=build set SPHINXPROJ=cuML if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% :end popd
0
rapidsai_public_repos/raft
rapidsai_public_repos/raft/docs/Makefile
# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = RAFT SOURCEDIR = source BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help -v "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/pylibraft_api.rst
~~~~~~~~~~ Python API ~~~~~~~~~~ .. _api: .. toctree:: :maxdepth: 4 pylibraft_api/cluster.rst pylibraft_api/common.rst pylibraft_api/distance.rst pylibraft_api/matrix.rst pylibraft_api/neighbors.rst pylibraft_api/random.rst
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/cpp_api.rst
~~~~~~~ C++ API ~~~~~~~ .. _api: .. toctree:: :maxdepth: 4 cpp_api/core.rst cpp_api/cluster.rst cpp_api/distance.rst cpp_api/linalg.rst cpp_api/matrix.rst cpp_api/mdspan.rst cpp_api/mnmg.rst cpp_api/neighbors.rst cpp_api/random.rst cpp_api/solver.rst cpp_api/sparse.rst cpp_api/stats.rst cpp_api/utils.rst
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/vector_search_tutorial.md
# Vector Search in C++ Tutorial RAFT has several important algorithms for performing vector search on the GPU and this tutorial walks through the primary vector search APIs from start to finish to provide a reference for quick setup and C++ API usage. This tutorial assumes RAFT has been installed and/or added to your build so that you are able to compile and run RAFT code. If not done already, please follow the [build and install instructions](build.md) and consider taking a look at the [example c++ template project](https://github.com/rapidsai/raft/tree/HEAD/cpp/template) for ready-to-go examples that you can immediately build and start playing with. Also take a look at RAFT's library of [reproducible vector search benchmarks](raft_ann_benchmarks.md) to run benchmarks that compare RAFT against other state-of-the-art nearest neighbors algorithms at scale. For more information about the various APIs demonstrated in this tutorial, along with comprehensive usage examples of all the APIs offered by RAFT, please refer to the [RAFT's C++ API Documentation](https://docs.rapids.ai/api/raft/nightly/cpp_api/). ## Step 1: Starting off with RAFT ### CUDA Development? If you are reading this tuturial then you probably know about CUDA and its relationship to general-purpose GPU computing (GPGPU). You probably also know about Nvidia GPUs but might not necessarily be familiar with the programming model nor GPU computing. The good news is that extensive knowledge of CUDA and GPUs are not needed in order to get started with or build applications with RAFT. RAFT hides away most of the complexities behind simple single-threaded stateless functions that are inherently asynchronous, meaning the result of a computation isn't necessarily read to be used when the function executes and control is given back to the user. The functions are, however, allowed to be chained together in a sequence of calls that don't need to wait for subsequent computations to complete in order to continue execution. In fact, the only time you need to wait for the computation to complete is when you are ready to use the result. A common structure you will encounter when using RAFT is a `raft::device_resources` object. This object is a container for important resources for a single GPU that might be needed during computation. If communicating with multiple GPUs, multiple `device_resources` might be needed, one for each GPU. `device_resources` contains several methods for managing its state but most commonly, you'll call the `sync_stream()` to guarantee all recently submitted computation has completed (as mentioned above.) A simple example of using `raft::device_resources` in RAFT: ```c++ #include <raft/core/device_resources.hpp> raft::device_resources res; // Call a bunch of RAFT functions in sequence... res.sync_stream() ``` ### Host vs Device Memory We differentiate between two different types of memory. `host` memory is your traditional RAM memory that is primarily accessible by applications on the CPU. `device` memory, on the other hand, is what we call the special memory on the GPU, which is not accessible from the CPU. In order to access host memory from the GPU, it needs to be explicitly copied to the GPU and in order to access device memory by the CPU, it needs to be explicitly copied there. We have several mechanisms available for allocating and managing the lifetime of device memory on the stack so that we don't need to explicitly allocate and free pointers on the heap. For example, instead of a `std::vector` for host memory, we can use `rmm::device_uvector` on the device. The following function will copy an array from host memory to device memory: ```c++ #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <vector> raft::device_resources res; std::vector<int> my_host_vector = {0, 1, 2, 3, 4}; rmm::device_uvector<int> my_device_vector(my_host_vector.size(), res.get_stream()); raft::copy(my_device_vector.data(), my_host_vector.data(), my_host_vector.size(), res.get_stream()); ``` Since a stream is involved in the copy operation above, RAFT functions can be invoked immediately so long as the same `device_resources` instances is used (or, more specifically, the same main stream from the `devices_resources`.) As you might notice in the example above, `res.get_stream()` can be used to extract the main stream from a `device_resources` instance. ### Multi-dimensional data representation `rmm::device_uvector` is a great mechanism for allocating and managing a chunk of device memory. While it's possible to use a single array to represent objects in higher dimensions like matrices, it lacks the means to pass that information along. For example, in addition to knowing that we have a 2d structure, we would need to know the number of rows, the number of columns, and even whether we read the columns or rows first (referred to as column- or row-major respectively). For this reason, RAFT relies on the `mdspan` standard, which was composed specifically for this purpose. To be even more, `mdspan` itself doesn't actually allocate or own any data on host or device because it's just a view over an existing memory on host device. The `mdspan` simply gives us a way to represent multi-dimensional data so we can pass along the needed metadata to our APIs. Even more powerful is that we can design functions that only accept a matrix of `float` in device memory that is laid out in row-major format. The memory-owning counterpart to the `mdspan` is the `mdarray` and the `mdarray` can allocate memory on device or host and carry along with it the metadata about its shape and layout. An `mdspan` can be produced from an `mdarray` for invoking RAFT APIs with `mdarray.view()`. They also follow similar paradigms to the STL, where we represent an immutable `mdspan` of `int` using `mdspan<const int>` instead of `const mdspan<int>` to ensure it's the type carried along by the `mdspan` that's not allowed to change. Many RAFT functions require `mdspan<const T>` to represent immutable input data and there's no implicit conversion between `mdspan<T>` and `mdspan<const T>` we use `raft::make_const_mdspan()` to alleviate the pain of constructing a new `mdspan` to invoke these functions. The following example demonstrates how to create `mdarray` matrices in both device and host memory, copy one to the other, and create mdspans out of them: ```c++ #include <raft/core/device_mdarray.hpp> #include <raft/core/host_mdarray.hpp> #include <raft/core/copy.hpp> raft::device_resources res; int n_rows = 10; int n_cols = 10; auto device_matrix = raft::make_device_matrix<float>(res, n_rows, n_cols); auto host_matrix = raft::make_host_matrix<float>(res, n_rows, n_cols); // Set the diagonal to 1 for(int i = 0; i < n_rows; i++) { host_matrix(i, i) = 1; } raft::copy(res, device_matrix.view(), host_matrix.view()); ``` ## Step 2: Generate some data Let's build upon the fundamentals from the prior section and actually invoke some of RAFT's computational APIs on the device. A good starting point is data generation. ```c++ #include <raft/core/device_mdarray.hpp> #include <raft/random/make_blobs.cuh> raft::device_resources res; int n_rows = 10000; int n_cols = 10000; auto dataset = raft::make_device_matrix<float, int>(res, n_rows, n_cols); auto labels = raft::make_device_vector<float, int>(res, n_rows); raft::make_blobs(res, dataset.view(), labels.view()); ``` That's it. We've now generated a random 10kx10k matrix with points that cleanly separate into Gaussian clusters, along with a vector of cluster labels for each of the data points. Notice the `cuh` extension in the header file include for `make_blobs`. This signifies to us that this file contains CUDA device functions like kernel code so the CUDA compiler, `nvcc` is needed in order to compile any code that uses it. Generally, any source files that include headers with a `cuh` extension use the `.cu` extension instead of `.cpp`. The rule here is that `cpp` source files contain code which can be compiled with a C++ compiler like `g++` while `cu` files require the CUDA compiler. Since the `make_blobs` code generates the random dataset on the GPU device, we didn't need to do any host to device copies in this one. `make_blobs` is also asynchronous, so if we don't need to copy and use the data in host memory right away, we can continue calling RAFT functions with the `device_resources` instance and the data transformations will all be scheduled on the same stream. ## Step 3: Using brute-force indexes ### Build brute-force index Consider the `(10k, 10k)` shaped random matrix we generated in the previous step. We want to be able to find the k-nearest neighbors for all points of the matrix, or what we refer to as the all-neighbors graph, which means finding the neighbors of all data points within the same matrix. ```c++ #include <raft/neighbors/brute_force.cuh> raft::device_resources res; // set number of neighbors to search for int const k = 64; auto bfknn_index = raft::neighbors::brute_force::build(res, raft::make_const_mdspan(dataset.view())); ``` ### Query brute-force index ```c++ // using matrix `dataset` from previous example auto search = raft::make_const_mdspan(dataset.view()); // Indices and Distances are of dimensions (n, k) // where n is number of rows in the search matrix auto reference_indices = raft::make_device_matrix<int, int>(search.extent(0), k); // stores index of neighbors auto reference_distances = raft::make_device_matrix<float, int>(search.extent(0), k); // stores distance to neighbors raft::neighbors::brute_force::search(res, bfknn_index, search, raft::make_const_mdspan(indices.view()), raft::make_const_mdspan(distances.view())); ``` We have established several things here by building a flat index. Now we know the exact 64 neighbors of all points in the matrix, and this algorithm can be generally useful in several ways: 1. Creating a baseline to compare against when building an approximate nearest neighbors index. 2. Directly using the brute-force algorithm when accuracy is more important than speed of computation. Don't worry, our implementation is still the best in-class and will provide not only significant speedups over other brute force methods, but also be quick relatively when the matrices are small! ## Step 4: Using the ANN indexes ### Build a CAGRA index Next we'll train an ANN index. We'll use our graph-based CAGRA algorithm for this example but the other index types use a very similar pattern. ```c++ #include <raft/neighbors/cagra.cuh> raft::device_resources res; // use default index parameters cagra::index_params index_params; auto index = cagra::build<float, uint32_t>(res, index_params, dataset); ``` ### Query the CAGRA index Now that we've trained a CAGRA index, we can query it by first allocating our output `mdarray` objects and passing the trained index model into the search function. ```c++ // create output arrays auto indices = raft::make_device_matrix<uint32_t>(res, n_rows, k); auto distances = raft::make_device_matrix<float>(res, n_rows, k); // use default search parameters cagra::search_params search_params; // search K nearest neighbors cagra::search<float, uint32_t>( res, search_params, index, search, indices.view(), distances.view()); ``` ## Step 7: Evaluate neighborhood quality In step 3 we built a flat index and queried for exact neighbors while in step 4 we build an ANN index and queried for approximate neighbors. How do you quickly figure out the quality of our approximate neighbors and whether it's in an acceptable range based on your needs? Just compute the `neighborhood_recall` which gives a single value in the range [0, 1]. Closer the value to 1, higher the quality of the approximation. ```c++ #include <raft/stats/neighborhood_recall.cuh> raft::device_resources res; // Assuming matrices as type raft::device_matrix_view and variables as // indices : approximate neighbor indices // reference_indices : exact neighbor indices // distances : approximate neighbor distances // reference_distances : exact neighbor distances // We want our `neighborhood_recall` value in host memory float const recall_scalar = 0.0; auto recall_value = raft::make_host_scalar(recall_scalar); raft::stats::neighborhood_recall(res, raft::make_const_mdspan(indices.view()), raft::make_const_mdspan(reference_indices.view()), recall_value.view(), raft::make_const_mdspan(distances), raft::make_const_mdspan(reference_distances)); res.sync_stream(); ``` Notice we can run invoke the functions for index build and search for both algorithms, one right after the other, because we don't need to access any outputs from the algorithms in host memory. We will need to synchronize the stream on the `raft::device_resources` instance before we can read the result of the `neighborhood_recall` computation, though. Similar to a Numpy array, when we use a `host_scalar`, we are really using a multi-dimensional structure that contains only a single dimension, and further a single element. We can use element indexing to access the resulting element directly. ```c++ std::cout << recall_value(0) << std::endl; ``` While it may seem like unnecessary additional work to wrap the result in a `host_scalar` mdspan, this API choice is made intentionally to support the possibility of also receiving the result as a `device_scalar` so that it can be used directly on the device for follow-on computations without having to incur the synchronization or transfer cost of bringing the result to host. This pattern becomes even more important when the result is being computed in a loop, such as an iterative solver, and the cost of synchronization and device-to-host (d2h) transfer becomes very expensive. ## Advanced features The following sections present some advanced features that we have found can be useful for squeezing more utilization out of GPU hardware. As you've seen in this tutorial, RAFT provides several very useful tools and building blocks for developing accelerated applications beyond vector search capabilities. ### Stream pools Within each CPU thread, CUDA uses `streams` to submit asynchronous work. You can think of a stream as a queue. Each stream can submit work to the GPU independently of other streams but work submitted within each stream is queued and executed in the order in which it was submitted. Similar to how we can use thread pools to bound the parallelism of CPU threads, we can use CUDA stream pools to bound the amount of concurrent asynchronous work that can be scheduled on a GPU. Each instance of `device_resources` has a main stream, but can also create a stream pool. For a single CPU thread, multiple different instances of `device_resources` can be created with different main streams and used to invoke a series of RAFT functions concurrently on the same or different GPU devices, so long as the target devices have available resources to perform the work. Once a device is saturated, queued work on streams will be scheduled and wait for a chance to do more work. During this time the streams are waiting, the CPU thread will still continue its own execution asynchronously unless `sync_stream_pool()` is called, causing the thread to block and wait for the thread pools to complete. Also, beware that before splitting GPU work onto multiple different concurrent streams, it can often be important to wait for the main stream in the `device_resources`. This can be done with `wait_stream_pool_on_stream()`. To summarize, if wanting to execute multiple different streams in parallel, we would often use a stream pool like this: ```c++ #include <raft/core/device_resources.hpp> #include <rmm/cuda_stream_pool.hpp> #include <rmm/cuda_stream.hpp> int n_streams = 5; rmm::cuda_stream stream; std::shared_ptr<rmm::cuda_stream_pool> stream_pool(5) raft::device_resources res(stream.view(), stream_pool); // Submit some work on the main stream... res.wait_stream_pool_on_stream() for(int i = 0; i < n_streams; ++i) { rmm::cuda_stream_view stream_from_pool = res.get_next_usable_stream(); raft::device_resources pool_res(stream_from_pool); // Submit some work with pool_res... } res.sync_stream_pool(); ``` ### Device resources manager In multi-threaded applications, it is often useful to create a set of `raft::device_resources` objects on startup to avoid the overhead of re-initializing underlying resources every time a `raft::device_resources` object is needed. To help simplify this common initialization logic, RAFT provides a `raft::device_resources_manager` to handle this for downstream applications. On startup, the application can specify certain limits on the total resource consumption of the `raft::device_resources` objects that will be generated: ```c++ #include <raft/core/device_resources_manager.hpp> void initialize_application() { // Set the total number of CUDA streams to use on each GPU across all CPU // threads. If this method is not called, the default stream per thread // will be used. raft::device_resources_manager::set_streams_per_device(16); // Create a memory pool with given max size in bytes. Passing std::nullopt will allow // the pool to grow to the available memory of the device. raft::device_resources_manager::set_max_mem_pool_size(std::nullopt); // Set the initial size of the memory pool in bytes. raft::device_resources_manager::set_init_mem_pool_size(16000000); // If neither of the above methods are called, no memory pool will be used } ``` While this example shows some commonly used settings, `raft::device_resources_manager` provides support for several other resource options and constraints, including options to initialize entire stream pools that can be used by an individual `raft::device_resources` object. After this initialization method is called, the following function could be called from any CPU thread: ```c++ void foo() { raft::device_resources const& res = raft::device_resources_manager::get_device_resources(); // Submit some work with res res.sync_stream(); } ``` If any `raft::device_resources_manager` setters are called _after_ the first call to `raft::device_resources_manager::get_device_resources()`, these new settings are ignored, and a warning will be logged. If a thread calls `raft::device_resources_manager::get_device_resources()` multiple times, it is guaranteed to access the same underlying `raft::device_resources` object every time. This can be useful for chaining work in different calls on the same thread without keeping a persistent reference to the resources object. ### Device memory resources The RAPIDS software ecosystem makes heavy use of the [RAPIDS Memory Manager](https://github.com/rapidsai/rmm) (RMM) to enable zero-copy sharing of device memory across various GPU-enabled libraries such as PyTorch, Jax, Tensorflow, and FAISS. A really powerful feature of RMM is the ability to set a memory resource, such as a pooled memory resource that allocates a block of memory up front to speed up subsequent smaller allocations, and have all the libraries in the GPU ecosystem recognize and use that same memory resource for all of their memory allocations. As an example, the following code snippet creates a `pool_memory_resource` and sets it as the default memory resource, which means all other libraries that use RMM will now allocate their device memory from this same pool: ```c++ #include <rmm/mr/device/pool_memory_resource.hpp> rmm::mr::cuda_memory_resource cuda_mr; // Construct a resource that uses a coalescing best-fit pool allocator rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource> pool_mr{&cuda_mr}; rmm::mr::set_current_device_resource(&pool_mr); // Updates the current device resource pointer to `pool_mr` ``` The `raft::device_resources` object will now also use the `rmm::current_device_resource`. This isn't limited to C++, however. Often a user will be interacting with PyTorch, RAPIDS, or Tensorflow through Python and so they can set and use RMM's `current_device_resource` [right in Python](https://github.com/rapidsai/rmm#using-rmm-in-python-code). ### Workspace memory resource As mentioned above, `raft::device_resources` will use `rmm::current_device_resource` by default for all memory allocations. However, there are times when a particular algorithm might benefit from using a different memory resource such as a `managed_memory_resource`, which creates a unified memory space between device and host memory, paging memory in and out of device as needed. Most of RAFT's algorithms allocate temporary memory as needed to perform their computations and we can control the memory resource used for these temporary allocations through the `workspace_resource` in the `raft::device_resources` instance. For some applications, the `managed_memory_resource`, can enable a memory space that is larger than the GPU, thus allowing a natural spilling to host memory when needed. This isn't always the best way to use managed memory, though, as it can quickly lead to thrashing and severely impact performance. Still, when it can be used, it provides a very powerful tool that can also avoid out of memory errors when enough host memory is available. The following creates a managed memory allocator and set it as the `workspace_resource` of the `raft::device_resources` instance: ```c++ #include <raft/core/device_resources.hpp> #include <rmm/mr/device/managed_memory_resource.hpp> std::shared_ptr<rmm::mr::managed_memory_resource> managed_resource; raft::device_resource res(managed_resource); ``` The `workspace_resource` uses an `rmm::mr::limiting_resource_adaptor`, which limits the total amount of allocation possible. This allows RAFT algorithms to work within the confines of the memory constraints imposed by the user so that things like batch sizes can be automatically set to reasonable values without exceeding the allotted memory. By default, this limit restricts the memory allocation space for temporary workspace buffers to the memory available on the device. The below example specifies the total number of bytes that RAFT can use for temporary workspace allocations to 3GB: ```c++ #include <raft/core/device_resources.hpp> #include <rmm/mr/device/managed_memory_resource.hpp> #include <optional> std::shared_ptr<rmm::mr::managed_memory_resource> managed_resource; raft::device_resource res(managed_resource, std::make_optional<std::size_t>(3 * 1024^3)); ```
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/ann_benchmarks_build.md
### Dependencies CUDA 11 and a GPU with Pascal architecture or later are required to run the benchmarks. Please refer to the [installation docs](https://docs.rapids.ai/api/raft/stable/build.html#cuda-gpu-requirements) for the base requirements to build RAFT. In addition to the base requirements for building RAFT, additional dependencies needed to build the ANN benchmarks include: 1. FAISS GPU >= 1.7.1 2. Google Logging (GLog) 3. H5Py 4. HNSWLib 5. nlohmann_json 6. GGNN [rapids-cmake](https://github.com/rapidsai/rapids-cmake) is used to build the ANN benchmarks so the code for dependencies not already supplied in the CUDA toolkit will be downloaded and built automatically. The easiest (and most reproducible) way to install the dependencies needed to build the ANN benchmarks is to use the conda environment file located in the `conda/environments` directory of the RAFT repository. The following command will use `mamba` (which is preferred over `conda`) to build and activate a new environment for compiling the benchmarks: ```bash mamba env create --name raft_ann_benchmarks -f conda/environments/bench_ann_cuda-118_arch-x86_64.yaml conda activate raft_ann_benchmarks ``` The above conda environment will also reduce the compile times as dependencies like FAISS will already be installed and not need to be compiled with `rapids-cmake`. ### Compiling the Benchmarks After the needed dependencies are satisfied, the easiest way to compile ANN benchmarks is through the `build.sh` script in the root of the RAFT source code repository. The following will build the executables for all the support algorithms: ```bash ./build.sh bench-ann ``` You can limit the algorithms that are built by providing a semicolon-delimited list of executable names (each algorithm is suffixed with `_ANN_BENCH`): ```bash ./build.sh bench-ann -n --limit-bench-ann=HNSWLIB_ANN_BENCH;RAFT_IVF_PQ_ANN_BENCH ``` Available targets to use with `--limit-bench-ann` are: - FAISS_IVF_FLAT_ANN_BENCH - FAISS_IVF_PQ_ANN_BENCH - FAISS_BFKNN_ANN_BENCH - GGNN_ANN_BENCH - HNSWLIB_ANN_BENCH - RAFT_CAGRA_ANN_BENCH - RAFT_IVF_PQ_ANN_BENCH - RAFT_IVF_FLAT_ANN_BENCH By default, the `*_ANN_BENCH` executables program infer the dataset's datatype from the filename's extension. For example, an extension of `fbin` uses a `float` datatype, `f16bin` uses a `float16` datatype, extension of `i8bin` uses `int8_t` datatype, and `u8bin` uses `uint8_t` type. Currently, only `float`, `float16`, int8_t`, and `unit8_t` are supported.
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/wiki_all_dataset.md
# Wiki-all Dataset The `wiki-all` dataset was created to stress vector search algorithms at scale with both a large number of vectors and dimensions. The entire dataset contains 88M vectors with 768 dimensions and is meant for testing the types of vectors one would typically encounter in retrieval augmented generation (RAG) workloads. The full dataset is ~251GB in size, which is intentionally larger than the typical memory of GPUs. The massive scale is intended to promote the use of compression and efficient out-of-core methods for both indexing and search. The dataset is composed of all the available languages of in the [Cohere Wikipedia dataset](https://huggingface.co/datasets/Cohere/wikipedia-22-12). An [English version]( https://www.kaggle.com/datasets/jjinho/wikipedia-20230701) is also available. The dataset is composed of English wiki texts from [Kaggle](https://www.kaggle.com/datasets/jjinho/wikipedia-20230701) and multi-lingual wiki texts from [Cohere Wikipedia](https://huggingface.co/datasets/Cohere/wikipedia-22-12). Cohere's English Texts are older (2022) and smaller than the Kaggle English Wiki texts (2023) so the English texts have been removed from Cohere completely. The final Wiki texts include English Wiki from Kaggle and the other languages from Cohere. The English texts constitute 50% of the total text size. To form the final dataset, the Wiki texts were chunked into 85 million 128-token pieces. For reference, Cohere chunks Wiki texts into 104-token pieces. Finally, the embeddings of each chunk were computed using the [paraphrase-multilingual-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-mpnet-base-v2) embedding model. The resulting dataset is an embedding matrix of size 88 million by 768. Also included with the dataset is a query file containing 10k query vectors and a groundtruth file to evaluate nearest neighbors algorithms. ## Getting the dataset ### Full dataset A version of the dataset is made available in the binary format that can be used directly by the [raft-ann-bench](https://docs.rapids.ai/api/raft/nightly/raft_ann_benchmarks/) tool. The full 88M dataset is ~251GB and the download link below contains tarballs that have been split into multiple parts. The following will download all 10 the parts and untar them to a `wiki_all_88M` directory: ```bash curl -s https://data.rapids.ai/raft/datasets/wiki_all/wiki_all.tar.{00..9} | tar -xf - -C wiki_all_88M/ ``` The above has the unfortunate drawback that if the command should fail for any reason, all the parts need to be re-downloaded. The files can also be downloaded individually and then untarred to the directory. Each file is ~27GB and there are 10 of them. ```bash curl -s https://data.rapids.ai/raft/datasets/wiki_all/wiki_all.tar.00 ... curl -s https://data.rapids.ai/raft/datasets/wiki_all/wiki_all.tar.09 cat wiki_all.tar.* | tar -xf - -C wiki_all_88M/ ``` ### 1M and 10M subsets Also available are 1M and 10M subsets of the full dataset which are 2.9GB and 29GB, respectively. These subsets also include query sets of 10k vectors and corresponding groundtruth files. ```bash curl -s https://data.rapids.ai/raft/datasets/wiki_all_1M/wiki_all_1M.tar curl -s https://data.rapids.ai/raft/datasets/wiki_all_10M/wiki_all_10M.tar ``` ## Using the dataset After the dataset is downloaded and extracted to the `wiki_all_88M` directory (or `wiki_all_1M`/`wiki_all_10M` depending on whether the subsets are used), the files can be used in the benchmarking tool. The dataset name is `wiki_all` (or `wiki_all_1M`/`wiki_all_10M`), and the benchmarking tool can be used by specifying the appropriate name `--dataset wiki_all_88M` in the scripts.
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/ann_benchmarks_dataset.md
# ANN Benchmarks Datasets A dataset usually has 4 binary files containing database vectors, query vectors, ground truth neighbors and their corresponding distances. For example, Glove-100 dataset has files `base.fbin` (database vectors), `query.fbin` (query vectors), `groundtruth.neighbors.ibin` (ground truth neighbors), and `groundtruth.distances.fbin` (ground truth distances). The first two files are for index building and searching, while the other two are associated with a particular distance and are used for evaluation. The file suffixes `.fbin`, `.f16bin`, `.ibin`, `.u8bin`, and `.i8bin` denote that the data type of vectors stored in the file are `float32`, `float16`(a.k.a `half`), `int`, `uint8`, and `int8`, respectively. These binary files are little-endian and the format is: the first 8 bytes are `num_vectors` (`uint32_t`) and `num_dimensions` (`uint32_t`), and the following `num_vectors * num_dimensions * sizeof(type)` bytes are vectors stored in row-major order. Some implementation can take `float16` database and query vectors as inputs and will have better performance. Use `script/fbin_to_f16bin.py` to transform dataset from `float32` to `float16` type. Commonly used datasets can be downloaded from two websites: 1. Million-scale datasets can be found at the [Data sets](https://github.com/erikbern/ann-benchmarks#data-sets) section of [`ann-benchmarks`](https://github.com/erikbern/ann-benchmarks). However, these datasets are in HDF5 format. Use `cpp/bench/ann/scripts/hdf5_to_fbin.py` to transform the format. A few Python packages are required to run it: ```bash pip3 install numpy h5py ``` The usage of this script is: ```bash $ cpp/bench/ann/scripts/hdf5_to_fbin.py usage: scripts/hdf5_to_fbin.py [-n] <input>.hdf5 -n: normalize base/query set outputs: <input>.base.fbin <input>.query.fbin <input>.groundtruth.neighbors.ibin <input>.groundtruth.distances.fbin ``` So for an input `.hdf5` file, four output binary files will be produced. See previous section for an example of prepossessing GloVe dataset. Most datasets provided by `ann-benchmarks` use `Angular` or `Euclidean` distance. `Angular` denotes cosine distance. However, computing cosine distance reduces to computing inner product by normalizing vectors beforehand. In practice, we can always do the normalization to decrease computation cost, so it's better to measure the performance of inner product rather than cosine distance. The `-n` option of `hdf5_to_fbin.py` can be used to normalize the dataset. 2. <a id='billion-scale'></a>Billion-scale datasets can be found at [`big-ann-benchmarks`](http://big-ann-benchmarks.com). The ground truth file contains both neighbors and distances, thus should be split. A script is provided for this: ```bash $ cpp/bench/ann/scripts/split_groundtruth.pl usage: script/split_groundtruth.pl input output_prefix ``` Take Deep-1B dataset as an example: ```bash pushd cd cpp/bench/ann mkdir -p data/deep-1B && cd data/deep-1B # download manually "Ground Truth" file of "Yandex DEEP" # suppose the file name is deep_new_groundtruth.public.10K.bin ../../scripts/split_groundtruth.pl deep_new_groundtruth.public.10K.bin groundtruth # two files 'groundtruth.neighbors.ibin' and 'groundtruth.distances.fbin' should be produced popd ``` Besides ground truth files for the whole billion-scale datasets, this site also provides ground truth files for the first 10M or 100M vectors of the base sets. This mean we can use these billion-scale datasets as million-scale datasets. To facilitate this, an optional parameter `subset_size` for dataset can be used. See the next step for further explanation. ## Generate ground truth If you have a dataset, but no corresponding ground truth file, then you can generate ground trunth using the `generate_groundtruth` utility. Example usage: ```bash # With existing query file python -m raft-ann-bench.generate_groundtruth --dataset /dataset/base.fbin --output=groundtruth_dir --queries=/dataset/query.public.10K.fbin # With randomly generated queries python -m raft-ann-bench.generate_groundtruth --dataset /dataset/base.fbin --output=groundtruth_dir --queries=random --n_queries=10000 # Using only a subset of the dataset. Define queries by randomly # selecting vectors from the (subset of the) dataset. python -m raft-ann-bench.generate_groundtruth --dataset /dataset/base.fbin --nrows=2000000 --output=groundtruth_dir --queries=random-choice --n_queries=10000 ```
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/contributing.md
# Contributing If you are interested in contributing to RAFT, your contributions will fall into three categories: 1. You want to report a bug, feature request, or documentation issue - File an [issue](https://github.com/rapidsai/RAFT/issues/new/choose) describing what you encountered or what you want to see changed. - The RAPIDS team will evaluate the issues and triage them, scheduling them for a release. If you believe the issue needs priority attention comment on the issue to notify the team. 2. You want to propose a new Feature and implement it - Post about your intended feature, and we shall discuss the design and implementation. - Once we agree that the plan looks good, go ahead and implement it, using the [code contributions](#code-contributions) guide below. 3. You want to implement a feature or bug-fix for an outstanding issue - Follow the [code contributions](#code-contributions) guide below. - If you need more context on a particular issue, please ask and we shall provide. ## Code contributions ### Your first issue 1. Read the project's [README.md](https://github.com/rapidsai/raft) to learn how to setup the development environment 2. Find an issue to work on. The best way is to look for the [good first issue](https://github.com/rapidsai/RAFT/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) or [help wanted](https://github.com/rapidsai/RAFT/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) labels 3. Comment on the issue saying you are going to work on it 4. Code! Make sure to update unit tests! 5. When done, [create your pull request](https://github.com/rapidsai/RAFT/compare) 6. Verify that CI passes all [status checks](https://help.github.com/articles/about-status-checks/). Fix if needed 7. Wait for other developers to review your code and update code as needed 8. Once reviewed and approved, a RAPIDS developer will merge your pull request Remember, if you are unsure about anything, don't hesitate to comment on issues and ask for clarifications! ### Python / Pre-commit hooks RAFT uses [pre-commit](https://pre-commit.com/) to execute code linters and formatters such as [Black](https://black.readthedocs.io/en/stable/), [isort](https://pycqa.github.io/isort/), and [flake8](https://flake8.pycqa.org/en/latest/). These tools ensure a consistent code format throughout the project. Using pre-commit ensures that linter versions and options are aligned for all developers. Additionally, there is a CI check in place to enforce that committed code follows our standards. To use `pre-commit`, install via `conda` or `pip`: ```bash conda install -c conda-forge pre-commit ``` ```bash pip install pre-commit ``` Then run pre-commit hooks before committing code: ```bash pre-commit run ``` Optionally, you may set up the pre-commit hooks to run automatically when you make a git commit. This can be done by running: ```bash pre-commit install ``` Now code linters and formatters will be run each time you commit changes. You can skip these checks with `git commit --no-verify` or with the short version `git commit -n`. ### Seasoned developers Once you have gotten your feet wet and are more comfortable with the code, you can look at the prioritized issues of our next release in our [project boards](https://github.com/rapidsai/RAFT/projects). > **Pro Tip:** Always look at the release board with the highest number for issues to work on. This is where RAPIDS developers also focus their efforts. Look at the unassigned issues, and find an issue you are comfortable with contributing to. Start with _Step 3_ from above, commenting on the issue to let others know you are working on it. If you have any questions related to the implementation of the issue, ask them in the issue instead of the PR. ## Attribution Portions adopted from https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/ann_benchmarks_param_tuning.md
# ANN Benchmarks Parameter Tuning Guide This guide outlines the various parameter settings that can be specified in [RAFT ANN Benchmark](raft_ann_benchmarks.md) json configuration files and explains the impact they have on corresponding algorithms to help inform their settings for benchmarking across desired levels of recall. ## RAFT Indexes ### `raft_brute_force` Use RAFT brute-force index for exact search. Brute-force has no further build or search parameters. ### `raft_ivf_flat` IVF-flat uses an inverted-file index, which partitions the vectors into a series of clusters, or lists, storing them in an interleaved format which is optimized for fast distance computation. The searching of an IVF-flat index reduces the total vectors in the index to those within some user-specified nearest clusters called probes. IVF-flat is a simple algorithm which won't save any space, but it provides competitive search times even at higher levels of recall. | Parameter | Type | Required | Data Type | Default | Description | |----------------------|------------------|----------|----------------------------|----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `nlist` | `build_param` | Y | Positive Integer >0 | | Number of clusters to partition the vectors into. Larger values will put less points into each cluster but this will impact index build time as more clusters need to be trained. | | `niter` | `build_param` | N | Positive Integer >0 | 20 | Number of clusters to partition the vectors into. Larger values will put less points into each cluster but this will impact index build time as more clusters need to be trained. | | `ratio` | `build_param` | N | Positive Integer >0 | 2 | `1/ratio` is the number of training points which should be used to train the clusters. | | `dataset_memory_type` | `build_param` | N | ["device", "host", "mmap"] | "device" | What memory type should the dataset reside? | | `query_memory_type` | `search_params` | N | ["device", "host", "mmap"] | "device | What memory type should the queries reside? | | `nprobe` | `search_params` | Y | Positive Integer >0 | | The closest number of clusters to search for each query vector. Larger values will improve recall but will search more points in the index. | ### `raft_ivf_pq` IVF-pq is an inverted-file index, which partitions the vectors into a series of clusters, or lists, in a similar way to IVF-flat above. The difference is that IVF-PQ uses product quantization to also compress the vectors, giving the index a smaller memory footprint. Unfortunately, higher levels of compression can also shrink recall, which a refinement step can improve when the original vectors are still available. | Parameter | Type | Required | Data Type | Default | Description | |------------------------|----------------|---|----------------------------------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `nlist` | `build_param` | Y | Positive Integer >0 | | Number of clusters to partition the vectors into. Larger values will put less points into each cluster but this will impact index build time as more clusters need to be trained. | | `niter` | `build_param` | N | Positive Integer >0 | 20 | Number of k-means iterations to use when training the clusters. | | `ratio` | `build_param` | N | Positive Integer >0 | 2 | `1/ratio` is the number of training points which should be used to train the clusters. | | `pq_dim` | `build_param` | N | Positive Integer. Multiple of 8. | 0 | Dimensionality of the vector after product quantization. When 0, a heuristic is used to select this value. `pq_dim` * `pq_bits` must be a multiple of 8. | | `pq_bits` | `build_param` | N | Positive Integer. [4-8] | 8 | Bit length of the vector element after quantization. | | `codebook_kind` | `build_param` | N | ["cluster", "subspace"] | "subspace" | Type of codebook. See the [API docs](https://docs.rapids.ai/api/raft/nightly/cpp_api/neighbors_ivf_pq/#_CPPv412codebook_gen) for more detail | | `dataset_memory_type` | `build_param` | N | ["device", "host", "mmap"] | "device" | What memory type should the dataset reside? | | `query_memory_type` | `search_params` | N | ["device", "host", "mmap"] | "device | What memory type should the queries reside? | | `nprobe` | `search_params` | Y | Positive Integer >0 | | The closest number of clusters to search for each query vector. Larger values will improve recall but will search more points in the index. | | `internalDistanceDtype` | `search_params` | N | [`float`, `half`] | `half` | The precision to use for the distance computations. Lower precision can increase performance at the cost of accuracy. | | `smemLutDtype` | `search_params` | N | [`float`, `half`, `fp8`] | `half` | The precision to use for the lookup table in shared memory. Lower precision can increase performance at the cost of accuracy. | | `refine_ratio` | `search_params` | N| Positive Number >=0 | 0 | `refine_ratio * k` nearest neighbors are queried from the index initially and an additional refinement step improves recall by selecting only the best `k` neighbors. | ### `raft_cagra` <a id='raft-cagra'></a>CAGRA uses a graph-based index, which creates an intermediate, approximate kNN graph using IVF-PQ and then further refining and optimizing to create a final kNN graph. This kNN graph is used by CAGRA as an index for search. | Parameter | Type | Required | Data Type | Default | Description | |-----------------------------|----------------|----------|----------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `graph_degree` | `build_param` | N | Positive Integer >0 | 64 | Degree of the final kNN graph index. | | `intermediate_graph_degree` | `build_param` | N | Positive Integer >0 | 128 | Degree of the intermediate kNN graph. | | `graph_build_algo` | `build_param` | N | ["IVF_PQ", "NN_DESCENT"] | "IVF_PQ" | Algorithm to use for search | | `dataset_memory_type` | `build_param` | N | ["device", "host", "mmap"] | "device" | What memory type should the dataset reside while constructing the index? | | `query_memory_type` | `search_params` | N | ["device", "host", "mmap"] | "device | What memory type should the queries reside? | | `itopk` | `search_wdith` | N | Positive Integer >0 | 64 | Number of intermediate search results retained during the search. Higher values improve search accuracy at the cost of speed. | | `search_width` | `search_param` | N | Positive Integer >0 | 1 | Number of graph nodes to select as the starting point for the search in each iteration. | | `max_iterations` | `search_param` | N | Integer >=0 | 0 | Upper limit of search iterations. Auto select when 0. | | `algo` | `search_param` | N | string | "auto" | Algorithm to use for search. Possible values: {"auto", "single_cta", "multi_cta", "multi_kernel"} | | `graph_memory_type` | `search_param` | N | string | "device" | Memory type to store gaph. Must be one of {"device", "host_pinned", "host_huge_page"}. | | `internal_dataset_memory_type` | `search_param` | N | string | "device" | Memory type to store dataset in the index. Must be one of {"device", "host_pinned", "host_huge_page"}. | The `graph_memory_type` or `internal_dataset_memory_type` options can be useful for large datasets that do not fit the device memory. Setting `internal_dataset_memory_type` other than `device` has negative impact on search speed. Using `host_huge_page` option is only supported on systems with Heterogeneous Memory Management or on platforms that natively support GPU access to system allocated memory, for example Grace Hopper. To fine tune CAGRA index building we can customize IVF-PQ index builder options using the following settings. These take effect only if `graph_build_algo == "IVF_PQ"`. It is recommended to experiment using a separate IVF-PQ index to find the config that gives the largest QPS for large batch. Recall does not need to be very high, since CAGRA further optimizes the kNN neighbor graph. Some of the default values are derived from the dataset size which is assumed to be [n_vecs, dim]. | Parameter | Type | Required | Data Type | Default | Description | |------------------------|----------------|---|----------------------------------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `ivf_pq_build_nlist` | `build_param` | N | Positive Integer >0 | n_vecs / 2500 | Number of clusters to partition the vectors into. Larger values will put less points into each cluster but this will impact index build time as more clusters need to be trained. | | `ivf_pq_build_niter` | `build_param` | N | Positive Integer >0 | 25 | Number of k-means iterations to use when training the clusters. | | `ivf_pq_build_ratio` | `build_param` | N | Positive Integer >0 | 10 | `1/ratio` is the number of training points which should be used to train the clusters. | | `ivf_pq_build_pq_dim` | `build_param` | N | Positive Integer. Multiple of 8. | dim/2 rounded up to 8 | Dimensionality of the vector after product quantization. When 0, a heuristic is used to select this value. `pq_dim` * `pq_bits` must be a multiple of 8. | | `ivf_pq_build_pq_bits` | `build_param` | N | Positive Integer. [4-8] | 8 | Bit length of the vector element after quantization. | | `ivf_pq_build_codebook_kind` | `build_param` | N | ["cluster", "subspace"] | "subspace" | Type of codebook. See the [API docs](https://docs.rapids.ai/api/raft/nightly/cpp_api/neighbors_ivf_pq/#_CPPv412codebook_gen) for more detail | | `ivf_pq_search_nprobe` | `build_params` | N | Positive Integer >0 | min(2*dim, nlist) | The closest number of clusters to search for each query vector. | | `ivf_pq_search_internalDistanceDtype` | `build_params` | N | [`float`, `half`] | `fp8` | The precision to use for the distance computations. Lower precision can increase performance at the cost of accuracy. | | `ivf_pq_search_smemLutDtype` | `build_params` | N | [`float`, `half`, `fp8`] | `half` | The precision to use for the lookup table in shared memory. Lower precision can increase performance at the cost of accuracy. | | `ivf_pq_search_refine_ratio` | `build_params` | N| Positive Number >=0 | 2 | `refine_ratio * k` nearest neighbors are queried from the index initially and an additional refinement step improves recall by selecting only the best `k` neighbors. | Alternatively, if `graph_build_algo == "NN_DESCENT"`, then we can customize the following parameters | Parameter | Type | Required | Data Type | Default | Description | |-----------------------------|----------------|----------|----------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `nn_descent_niter` | `build_param` | N | Positive Integer>0 | 20 | Number of NN Descent iterations. | | `nn_descent_intermediate_graph_degree` | `build_param` | N | Positive Integer>0 | `intermediate_graph_degree` * 1.5 | Intermadiate graph degree during NN descent iterations | | `nn_descent_max_iterations` | `build_param` | N | Positive Integer>0 | 20 | Alias for `nn_descent_niter` | | `nn_descent_termination_threshold` | `build_param` | N | Positive float>0 | 0.0001 | Termination threshold for NN descent. | ### `raft_cagra_hnswlib` This is a benchmark that enables interoperability between `CAGRA` built `HNSW` search. It uses the `CAGRA` built graph as the base layer of an `hnswlib` index to search queries only within the base layer (this is enabled with a simple patch to `hnswlib`). `build_param` : Same as `build_param` of [CAGRA](#raft-cagra) `search_param` : Same as `search_param` of [hnswlib](#hnswlib) ## FAISS Indexes ### `faiss_gpu_flat` Use FAISS flat index on the GPU, which performs an exact search using brute-force and doesn't have any further build or search parameters. ### `faiss_gpu_ivf_flat` IVF-flat uses an inverted-file index, which partitions the vectors into a series of clusters, or lists, storing them in an interleaved format which is optimized for fast distance computation. The searching of an IVF-flat index reduces the total vectors in the index to those within some user-specified nearest clusters called probes. IVF-flat is a simple algorithm which won't save any space, but it provides competitive search times even at higher levels of recall. | Parameter | Type | Required | Data Type | Default | Description | |-----------|----------------|----------|---------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `nlists` | `build_param` | Y | Positive Integer >0 | | Number of clusters to partition the vectors into. Larger values will put less points into each cluster but this will impact index build time as more clusters need to be trained. | | `ratio` | `build_param` | N | Positive Integer >0 | 2 | `1/ratio` is the number of training points which should be used to train the clusters. | | `nprobe` | `search_params` | Y | Positive Integer >0 | | The closest number of clusters to search for each query vector. Larger values will improve recall but will search more points in the index. | ### `faiss_gpu_ivf_pq` IVF-pq is an inverted-file index, which partitions the vectors into a series of clusters, or lists, in a similar way to IVF-flat above. The difference is that IVF-PQ uses product quantization to also compress the vectors, giving the index a smaller memory footprint. Unfortunately, higher levels of compression can also shrink recall, which a refinement step can improve when the original vectors are still available. | Parameter | Type | Required | Data Type | Default | Description | |------------------|----------------|----------|----------------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `nlist` | `build_param` | Y | Positive Integer >0 | | Number of clusters to partition the vectors into. Larger values will put less points into each cluster but this will impact index build time as more clusters need to be trained. | | `ratio` | `build_param` | N | Positive Integer >0 | 2 | `1/ratio` is the number of training points which should be used to train the clusters. | | `M_ratio` | `build_param` | Y | Positive Integer Power of 2 [8-64] | | Ratio of numbeer of chunks or subquantizers for each vector. Computed by `dims` / `M_ratio` | | `usePrecomputed` | `build_param` | N | Boolean. Default=`false` | `false` | Use pre-computed lookup tables to speed up search at the cost of increased memory usage. | | `useFloat16` | `build_param` | N | Boolean. Default=`false` | `false` | Use half-precision floats for clustering step. | | `nprobe` | `search_params` | Y | Positive Integer >0 | | The closest number of clusters to search for each query vector. Larger values will improve recall but will search more points in the index. | | `refine_ratio` | `search_params` | N| Positive Number >=0 | 0 | `refine_ratio * k` nearest neighbors are queried from the index initially and an additional refinement step improves recall by selecting only the best `k` neighbors. | ### `faiss_cpu_flat` Use FAISS flat index on the CPU, which performs an exact search using brute-force and doesn't have any further build or search parameters. | Parameter | Type | Required | Data Type | Default | Description | |-----------|----------------|----------|---------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `numThreads` | `search_params` | N | Positive Integer >0 | 1 | Number of threads to use for queries. | ### `faiss_cpu_ivf_flat` Use FAISS IVF-Flat index on CPU | Parameter | Type | Required | Data Type | Default | Description | |----------|----------------|----------|---------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `nlist` | `build_param` | Y | Positive Integer >0 | | Number of clusters to partition the vectors into. Larger values will put less points into each cluster but this will impact index build time as more clusters need to be trained. | | `ratio` | `build_param` | N | Positive Integer >0 | 2 | `1/ratio` is the number of training points which should be used to train the clusters. | | `nprobe` | `search_params` | Y | Positive Integer >0 | | The closest number of clusters to search for each query vector. Larger values will improve recall but will search more points in the index. | | `numThreads` | `search_params` | N | Positive Integer >0 | 1 | Number of threads to use for queries. | ### `faiss_cpu_ivf_pq` Use FAISS IVF-PQ index on CPU | Parameter | Type | Required | Data Type | Default | Description | |------------------|----------------|----------|------------------------------------|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `nlist` | `build_param` | Y | Positive Integer >0 | | Number of clusters to partition the vectors into. Larger values will put less points into each cluster but this will impact index build time as more clusters need to be trained. | | `ratio` | `build_param` | N | Positive Integer >0 | 2 | `1/ratio` is the number of training points which should be used to train the clusters. | | `M` | `build_param` | Y | Positive Integer Power of 2 [8-64] | | Number of chunks or subquantizers for each vector. | | `usePrecomputed` | `build_param` | N | Boolean. Default=`false` | `false` | Use pre-computed lookup tables to speed up search at the cost of increased memory usage. | | `bitsPerCode` | `build_param` | N | Positive Integer [4-8] | 8 | Number of bits to use for each code. | | `nprobe` | `search_params` | Y | Positive Integer >0 | | The closest number of clusters to search for each query vector. Larger values will improve recall but will search more points in the index. | | `refine_ratio` | `search_params` | N| Positive Number >=0 | 0 | `refine_ratio * k` nearest neighbors are queried from the index initially and an additional refinement step improves recall by selecting only the best `k` neighbors. | | `numThreads` | `search_params` | N | Positive Integer >0 | 1 | Number of threads to use for queries. | ## HNSW <a id='hnswlib'></a> ### `hnswlib` | Parameter | Type | Required | Data Type | Default | Description | |------------------|-----------------|----------|--------------------------------------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `efConstruction` | `build_param` | Y | Positive Integer >0 | | Controls index time and accuracy. Bigger values increase the index quality. At some point, increasing this will no longer improve the quality. | | `M` | `build_param` | Y | Positive Integer often between 2-100 | | Number of bi-directional links create for every new element during construction. Higher values work for higher intrinsic dimensionality and/or high recall, low values can work for datasets with low intrinsic dimensionality and/or low recalls. Also affects the algorithm's memory consumption. | | `numThreads` | `build_param` | N | Positive Integer >0 | 1 | Number of threads to use to build the index. | | `ef` | `search_param` | Y | Positive Integer >0 | | Size of the dynamic list for the nearest neighbors used for search. Higher value leads to more accurate but slower search. Cannot be lower than `k`. | | `numThreads` | `search_params` | N | Positive Integer >0 | 1 | Number of threads to use for queries. | Please refer to [HNSW algorithm parameters guide](https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md) from `hnswlib` to learn more about these arguments.
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/ann_benchmarks_low_level.md
### Low-level Scripts and Executables #### End-to-end Example An end-to-end example (run from the RAFT source code root directory): ```bash # (0) get raft sources git clone https://github.com/rapidsai/raft.git cd raft # (1) prepare a dataset export PYTHONPATH=python/raft-ann-bench/src:$PYTHONPATH python -m raft-ann-bench.get_dataset --dataset glove-100-angular --normalize # option --normalize is used here to normalize vectors so cosine distance is converted # to inner product; don't use -n for l2 distance # (2) build index $CONDA_PREFIX/bin/ann/RAFT_IVF_FLAT_ANN_BENCH \ --data_prefix=datasets \ --build \ --benchmark_filter="raft_ivf_flat\..*" \ python/raft-ann-bench/src/raft-ann-bench/run/conf/glove-100-inner.json # (3) search $CONDA_PREFIX/bin/ann/RAFT_IVF_FLAT_ANN_BENCH\ --data_prefix=datasets \ --benchmark_min_time=2s \ --benchmark_out=ivf_flat_search.csv \ --benchmark_out_format=csv \ --benchmark_counters_tabular \ --search \ --benchmark_filter="raft_ivf_flat\..*" \ python/raft-ann-bench/src/raft-ann-bench/run/conf/glove-100-inner.json # optional step: plot QPS-Recall figure using data in ivf_flat_search.csv with your favorite tool ``` ##### Step 1: Prepare Dataset Note: the preferred way to download and process smaller (million scale) datasets is to use the `get_dataset` script as demonstrated in the example above. A dataset usually has 4 binary files containing database vectors, query vectors, ground truth neighbors and their corresponding distances. For example, Glove-100 dataset has files `base.fbin` (database vectors), `query.fbin` (query vectors), `groundtruth.neighbors.ibin` (ground truth neighbors), and `groundtruth.distances.fbin` (ground truth distances). The first two files are for index building and searching, while the other two are associated with a particular distance and are used for evaluation. The file suffixes `.fbin`, `.f16bin`, `.ibin`, `.u8bin`, and `.i8bin` denote that the data type of vectors stored in the file are `float32`, `float16`(a.k.a `half`), `int`, `uint8`, and `int8`, respectively. These binary files are little-endian and the format is: the first 8 bytes are `num_vectors` (`uint32_t`) and `num_dimensions` (`uint32_t`), and the following `num_vectors * num_dimensions * sizeof(type)` bytes are vectors stored in row-major order. Some implementation can take `float16` database and query vectors as inputs and will have better performance. Use `python/raft-ann-bench/src/raft-ann-bench/get_dataset/fbin_to_f16bin.py` to transform dataset from `float32` to `float16` type. Commonly used datasets can be downloaded from two websites: 1. Million-scale datasets can be found at the [Data sets](https://github.com/erikbern/ann-benchmarks#data-sets) section of [`ann-benchmarks`](https://github.com/erikbern/ann-benchmarks). However, these datasets are in HDF5 format. Use `python/raft-ann-bench/src/raft-ann-bench/get_dataset/fbin_to_f16bin.py/hdf5_to_fbin.py` to transform the format. A few Python packages are required to run it: ```bash pip3 install numpy h5py ``` The usage of this script is: ```bash $ cpp/bench/ann/scripts/hdf5_to_fbin.py usage: scripts/hdf5_to_fbin.py [-n] <input>.hdf5 -n: normalize base/query set outputs: <input>.base.fbin <input>.query.fbin <input>.groundtruth.neighbors.ibin <input>.groundtruth.distances.fbin ``` So for an input `.hdf5` file, four output binary files will be produced. See previous section for an example of prepossessing GloVe dataset. Most datasets provided by `ann-benchmarks` use `Angular` or `Euclidean` distance. `Angular` denotes cosine distance. However, computing cosine distance reduces to computing inner product by normalizing vectors beforehand. In practice, we can always do the normalization to decrease computation cost, so it's better to measure the performance of inner product rather than cosine distance. The `-n` option of `hdf5_to_fbin.py` can be used to normalize the dataset. 2. Billion-scale datasets can be found at [`big-ann-benchmarks`](http://big-ann-benchmarks.com). The ground truth file contains both neighbors and distances, thus should be split. A script is provided for this: ```bash $ python/raft-ann-bench/src/raft-ann-bench/split_groundtruth/split_groundtruth.pl usage: split_groundtruth.pl input output_prefix ``` Take Deep-1B dataset as an example: ```bash pushd cd cpp/bench/ann mkdir -p data/deep-1B && cd data/deep-1B # download manually "Ground Truth" file of "Yandex DEEP" # suppose the file name is deep_new_groundtruth.public.10K.bin /path/to/raft/python/raft-ann-bench/src/raft-ann-bench/split_groundtruth/split_groundtruth.pl deep_new_groundtruth.public.10K.bin groundtruth # two files 'groundtruth.neighbors.ibin' and 'groundtruth.distances.fbin' should be produced popd ``` Besides ground truth files for the whole billion-scale datasets, this site also provides ground truth files for the first 10M or 100M vectors of the base sets. This mean we can use these billion-scale datasets as million-scale datasets. To facilitate this, an optional parameter `subset_size` for dataset can be used. See the next step for further explanation. ##### Step 2: Build Index An index is a data structure to facilitate searching. Different algorithms may use different data structures for their index. We can use `RAFT_IVF_FLAT_ANN_BENCH --build` to build an index and save it to disk. To run a benchmark executable, like `RAFT_IVF_FLAT_ANN_BENCH`, a JSON configuration file is required. Refer to [`cpp/bench/ann/conf/glove-100-inner.json`](../../cpp/cpp/bench/ann/conf/glove-100-inner.json) as an example. Configuration file has 3 sections: * `dataset` section specifies the name and files of a dataset, and also the distance in use. Since the `*_ANN_BENCH` programs are for index building and searching, only `base_file` for database vectors and `query_file` for query vectors are needed. Ground truth files are for evaluation thus not needed. - To use only a subset of the base dataset, an optional parameter `subset_size` can be specified. It means using only the first `subset_size` vectors of `base_file` as the base dataset. * `search_basic_param` section specifies basic parameters for searching: - `k` is the "k" in "k-nn", that is, the number of neighbors (or results) we want from the searching. * `index` section specifies an array of configurations for index building and searching: - `build_param` and `search_params` are parameters for building and searching, respectively. `search_params` is an array since we will search with different parameters to get different recall values. - `file` is the file name of index. Building will save built index to this file, while searching will load this file. - if `refine_ratio` is specified, refinement, as a post-processing step of search, will be done. It's for algorithms that compress vectors. For example, if `"refine_ratio" : 2` is set, 2`k` results are first computed, then exact distances of them are computed using original uncompressed vectors, and finally top `k` results among them are kept. The usage of `*_ANN_BENCH` can be found by running `*_ANN_BENCH --help` on one of the executables: ```bash $ ./cpp/build/*_ANN_BENCH --help benchmark [--benchmark_list_tests={true|false}] [--benchmark_filter=<regex>] [--benchmark_min_time=`<integer>x` OR `<float>s` ] [--benchmark_min_warmup_time=<min_warmup_time>] [--benchmark_repetitions=<num_repetitions>] [--benchmark_enable_random_interleaving={true|false}] [--benchmark_report_aggregates_only={true|false}] [--benchmark_display_aggregates_only={true|false}] [--benchmark_format=<console|json|csv>] [--benchmark_out=<filename>] [--benchmark_out_format=<json|console|csv>] [--benchmark_color={auto|true|false}] [--benchmark_counters_tabular={true|false}] [--benchmark_context=<key>=<value>,...] [--benchmark_time_unit={ns|us|ms|s}] [--v=<verbosity>] [--build|--search] [--overwrite] [--data_prefix=<prefix>] <conf>.json Note the non-standard benchmark parameters: --build: build mode, will build index --search: search mode, will search using the built index one and only one of --build and --search should be specified --overwrite: force overwriting existing index files --data_prefix=<prefix>: prepend <prefix> to dataset file paths specified in the <conf>.json. --override_kv=<key:value1:value2:...:valueN>: override a build/search key one or more times multiplying the number of configurations; you can use this parameter multiple times to get the Cartesian product of benchmark configs. ``` * `--build`: build index. * `--search`: do the searching with built index. * `--overwrite`: by default, the building mode skips building an index if it find out it already exists. This is useful when adding more configurations to the config; only new indices are build without the need to specify an elaborate filtering regex. By supplying `overwrite` flag, you disable this behavior; all indices are build regardless whether they are already stored on disk. * `--data_prefix`: prepend an arbitrary path to the data file paths. By default, it is equal to `data`. Note, this does not apply to index file paths. * `--override_kv`: override a build/search key one or more times multiplying the number of configurations. In addition to these ANN-specific flags, you can use all of the standard google benchmark flags. Some of the useful flags: * `--benchmark_filter`: specify subset of benchmarks to run * `--benchmark_out`, `--benchmark_out_format`: store the output to a file * `--benchmark_list_tests`: check the available configurations * `--benchmark_min_time`: specify the minimum duration or number of iterations per case to improve accuracy of the benchmarks. Refer to the google benchmark [user guide](https://github.com/google/benchmark/blob/main/docs/user_guide.md#command-line) for more information about the command-line usage. ##### Step 3: Searching Use the `--search` flag on any of the `*_ANN_BENCH` executables. Other options are the same as in step 2. ## Adding a new ANN algorithm Implementation of a new algorithm should be a class that inherits `class ANN` (defined in `cpp/bench/ann/src/ann.h`) and implements all the pure virtual functions. In addition, it should define two `struct`s for building and searching parameters. The searching parameter class should inherit `struct ANN<T>::AnnSearchParam`. Take `class HnswLib` as an example, its definition is: ```c++ template<typename T> class HnswLib : public ANN<T> { public: struct BuildParam { int M; int ef_construction; int num_threads; }; using typename ANN<T>::AnnSearchParam; struct SearchParam : public AnnSearchParam { int ef; int num_threads; }; // ... }; ``` The benchmark program uses JSON configuration file. To add the new algorithm to the benchmark, need be able to specify `build_param`, whose value is a JSON object, and `search_params`, whose value is an array of JSON objects, for this algorithm in configuration file. Still take the configuration for `HnswLib` as an example: ```json { "name" : "...", "algo" : "hnswlib", "build_param": {"M":12, "efConstruction":500, "numThreads":32}, "file" : "/path/to/file", "search_params" : [ {"ef":10, "numThreads":1}, {"ef":20, "numThreads":1}, {"ef":40, "numThreads":1} ] }, ``` How to interpret these JSON objects is totally left to the implementation and should be specified in `cpp/bench/ann/src/factory.cuh`: 1. First, add two functions for parsing JSON object to `struct BuildParam` and `struct SearchParam`, respectively: ```c++ template<typename T> void parse_build_param(const nlohmann::json& conf, typename cuann::HnswLib<T>::BuildParam& param) { param.ef_construction = conf.at("efConstruction"); param.M = conf.at("M"); if (conf.contains("numThreads")) { param.num_threads = conf.at("numThreads"); } } template<typename T> void parse_search_param(const nlohmann::json& conf, typename cuann::HnswLib<T>::SearchParam& param) { param.ef = conf.at("ef"); if (conf.contains("numThreads")) { param.num_threads = conf.at("numThreads"); } } ``` 2. Next, add corresponding `if` case to functions `create_algo()` and `create_search_param()` by calling parsing functions. The string literal in `if` condition statement must be the same as the value of `algo` in configuration file. For example, ```c++ // JSON configuration file contains a line like: "algo" : "hnswlib" if (algo == "hnswlib") { // ... } ```
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/raft_dask_api.rst
~~~~~~~~~~~~~ RAFT Dask API ~~~~~~~~~~~~~ .. role:: py(code) :language: python :class: highlight Dask-based Multi-Node Multi-GPU Communicator -------------------------------------------- .. autoclass:: raft_dask.common.Comms :members:
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/quick_start.md
# Quick Start This guide is meant to provide a quick-start tutorial for interacting with RAFT's C++ & Python APIs. ## RAPIDS Memory Manager (RMM) RAFT relies heavily on the [RMM](https://github.com/rapidsai/rmm) library which eases the burden of configuring different allocation strategies globally across the libraries that use it. ## Multi-dimensional Spans and Arrays Most of the APIs in RAFT accept [mdspan](https://arxiv.org/abs/2010.06474) multi-dimensional array view for representing data in higher dimensions similar to the `ndarray` in the Numpy Python library. RAFT also contains the corresponding owning `mdarray` structure, which simplifies the allocation and management of multi-dimensional data in both host and device (GPU) memory. The `mdarray` is an owning object that forms a convenience layer over RMM and can be constructed in RAFT using a number of different helper functions: ```c++ #include <raft/core/device_mdarray.hpp> int n_rows = 10; int n_cols = 10; auto scalar = raft::make_device_scalar<float>(handle, 1.0); auto vector = raft::make_device_vector<float>(handle, n_cols); auto matrix = raft::make_device_matrix<float>(handle, n_rows, n_cols); ``` The `mdspan` is a lightweight non-owning view that can wrap around any pointer, maintaining shape, layout, and indexing information for accessing elements. We can construct `mdspan` instances directly from the above `mdarray` instances: ```c++ // Scalar mdspan on device auto scalar_view = scalar.view(); // Vector mdspan on device auto vector_view = vector.view(); // Matrix mdspan on device auto matrix_view = matrix.view(); ``` Since the `mdspan` is just a lightweight wrapper, we can also construct it from the underlying data handles in the `mdarray` instances above. We use the extent to get information about the `mdarray` or `mdspan`'s shape. ```c++ #include <raft/core/device_mdspan.hpp> auto scalar_view = raft::make_device_scalar_view(scalar.data_handle()); auto vector_view = raft::make_device_vector_view(vector.data_handle(), vector.extent(0)); auto matrix_view = raft::make_device_matrix_view(matrix.data_handle(), matrix.extent(0), matrix.extent(1)); ``` Of course, RAFT's `mdspan`/`mdarray` APIs aren't just limited to the `device`. You can also create `host` variants: ```c++ #include <raft/core/host_mdarray.hpp> #include <raft/core/host_mdspan.hpp> int n_rows = 10; int n_cols = 10; auto scalar = raft::make_host_scalar<float>(handle, 1.0); auto vector = raft::make_host_vector<float>(handle, n_cols); auto matrix = raft::make_host_matrix<float>(handle, n_rows, n_cols); auto scalar_view = raft::make_host_scalar_view(scalar.data_handle()); auto vector_view = raft::make_host_vector_view(vector.data_handle(), vector.extent(0)); auto matrix_view = raft::make_host_matrix_view(matrix.data_handle(), matrix.extent(0), matrix.extent(1)); ``` And `managed` variants: ```c++ #include <raft/core/device_mdspan.hpp> int n_rows = 10; int n_cols = 10; auto matrix = raft::make_managed_mdspan(managed_ptr, raft::make_matrix_extents(n_rows, n_cols)); ``` You can also create strided mdspans: ```c++ #include <raft/core/device_mdspan.hpp> int n_elements = 10; int stride = 10; auto vector = raft::make_device_vector_view(vector_ptr, raft::make_vector_strided_layout(n_elements, stride)); ``` ## C++ Example Most of the primitives in RAFT accept a `raft::handle_t` object for the management of resources which are expensive to create, such CUDA streams, stream pools, and handles to other CUDA libraries like `cublas` and `cusolver`. The example below demonstrates creating a RAFT handle and using it with `device_matrix` and `device_vector` to allocate memory, generating random clusters, and computing pairwise Euclidean distances: ```c++ #include <raft/core/handle.hpp> #include <raft/core/device_mdarray.hpp> #include <raft/random/make_blobs.cuh> #include <raft/distance/distance.cuh> raft::handle_t handle; int n_samples = 5000; int n_features = 50; auto input = raft::make_device_matrix<float>(handle, n_samples, n_features); auto labels = raft::make_device_vector<int>(handle, n_samples); auto output = raft::make_device_matrix<float>(handle, n_samples, n_samples); raft::random::make_blobs(handle, input.view(), labels.view()); auto metric = raft::distance::DistanceType::L2SqrtExpanded; raft::distance::pairwise_distance(handle, input.view(), input.view(), output.view(), metric); ``` ## Python Example The `pylibraft` package contains a Python API for RAFT algorithms and primitives. `pylibraft` integrates nicely into other libraries by being very lightweight with minimal dependencies and accepting any object that supports the `__cuda_array_interface__`, such as [CuPy's ndarray](https://docs.cupy.dev/en/stable/user_guide/interoperability.html#rmm). The number of RAFT algorithms exposed in this package is continuing to grow from release to release. The example below demonstrates computing the pairwise Euclidean distances between CuPy arrays. Note that CuPy is not a required dependency for `pylibraft`. ```python import cupy as cp from pylibraft.distance import pairwise_distance n_samples = 5000 n_features = 50 in1 = cp.random.random_sample((n_samples, n_features), dtype=cp.float32) in2 = cp.random.random_sample((n_samples, n_features), dtype=cp.float32) output = pairwise_distance(in1, in2, metric="euclidean") ``` The `output` array in the above example is of type `raft.common.device_ndarray`, which supports [__cuda_array_interface__](https://numba.pydata.org/numba-doc/dev/cuda/cuda_array_interface.html#cuda-array-interface-version-2) making it interoperable with other libraries like CuPy, Numba, and PyTorch that also support it. CuPy supports DLPack, which also enables zero-copy conversion from `raft.common.device_ndarray` to JAX and Tensorflow. Below is an example of converting the output `pylibraft.common.device_ndarray` to a CuPy array: ```python cupy_array = cp.asarray(output) ``` And converting to a PyTorch tensor: ```python import torch torch_tensor = torch.as_tensor(output, device='cuda') ``` When the corresponding library has been installed and available in your environment, this conversion can also be done automatically by all RAFT compute APIs by setting a global configuration option: ```python import pylibraft.config pylibraft.config.set_output_as("cupy") # All compute APIs will return cupy arrays pylibraft.config.set_output_as("torch") # All compute APIs will return torch tensors ``` You can also specify a `callable` that accepts a `pylibraft.common.device_ndarray` and performs a custom conversion. The following example converts all output to `numpy` arrays: ```python pylibraft.config.set_output_as(lambda device_ndarray: return device_ndarray.copy_to_host()) ``` `pylibraft` also supports writing to a pre-allocated output array so any `__cuda_array_interface__` supported array can be written to in-place: ```python import cupy as cp from pylibraft.distance import pairwise_distance n_samples = 5000 n_features = 50 in1 = cp.random.random_sample((n_samples, n_features), dtype=cp.float32) in2 = cp.random.random_sample((n_samples, n_features), dtype=cp.float32) output = cp.empty((n_samples, n_samples), dtype=cp.float32) pairwise_distance(in1, in2, out=output, metric="euclidean") ```
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/using_libraft.md
# Using The Pre-Compiled Binary At its core, RAFT is a header-only template library, which makes it very powerful in that APIs can be called with various different combinations of data types and only the templates which are actually used will be compiled into your binaries. This increased flexibility comes with a drawback that all the APIs need to be declared inline and thus calls which are made frequently in your code could be compiled again in each source file for which they are invoked. For most functions, compile-time overhead is minimal but some of RAFT's APIs take a substantial time to compile. As a rule of thumb, most functionality in `raft::distance`, `raft::neighbors`, and `raft::cluster` is expensive to compile and most functionality in other namespaces has little compile-time overhead. There are three ways to speed up compile times: 1. Continue to use RAFT as a header-only library and create a CUDA source file in your project to explicitly instantiate the templates which are slow to compile. This can be tedious and will still require compiling the slow code at least once, but it's the most flexible option if you are using types that aren't already compiled into `libraft` 2. If you are able to use one of the template types that are already being compiled into `libraft`, you can use the pre-compiled template instantiations, which are described in more detail in the following section. 3. If you would like to use RAFT but either cannot or would prefer not to compile any CUDA code yourself, you can simply add `libraft` to your link libraries and use the growing set of `raft::runtime` APIs. ### How do I verify template instantiations didn't compile into my binary? To verify that you are not accidentally instantiating templates that have not been pre-compiled in RAFT, set the `RAFT_EXPLICIT_INSTANTIATE_ONLY` macro. This only works if you are linking with the pre-compiled libraft (i.e., when `RAFT_COMPILED` has been defined). To check if, for instance, `raft::distance::distance` has been precompiled with specific template arguments, you can set `RAFT_EXPLICIT_INSTANTIATE_ONLY` at the top of the file you are compiling, as in the following example: ```c++ #ifdef RAFT_COMPILED #define RAFT_EXPLICIT_INSTANTIATE_ONLY #endif #include <cstdint> #include <raft/core/resources.hpp> #include <raft/distance/distance.cuh> int main() { raft::resources handle{}; // Change IdxT to uint64_t and you will get an error because you are // instantiating a template that has not been pre-compiled. using IdxT = int; const float* x = nullptr; const float* y = nullptr; float* out = nullptr; int m = 1024; int n = 1024; int k = 1024; bool row_major = true; raft::distance::distance<raft::distance::DistanceType::L1, float, float, float, IdxT>( handle, x, y, out, m, n, k, row_major, 2.0f); } ``` ## Runtime APIs RAFT contains a growing list of runtime APIs that, unlike the pre-compiled template instantiations, allow you to link against `libraft` and invoke RAFT directly from `cpp` files. The benefit to RAFT's runtime APIs is that they can be used from code that is compiled with a `c++` compiler (rather than the CUDA compiler `nvcc`). This enables the `runtime` APIs to power `pylibraft`.
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/raft_ann_benchmarks.md
# RAFT ANN Benchmarks This project provides a benchmark program for various ANN search implementations. It's especially suitable for comparing GPU implementations as well as comparing GPU against CPU. ## Table of Contents - [Installing the benchmarks](#installing-the-benchmarks) - [Conda](#conda) - [Docker](#docker) - [How to run the benchmarks](#how-to-run-the-benchmarks) - [Step 1: prepare dataset](#step-1-prepare-dataset) - [Step 2: build and search index](#step-2-build-and-search-index) - [Step 3: data export](#step-3-data-export) - [Step 4: plot results](#step-4-plot-results) - [Running the benchmarks](#running-the-benchmarks) - [End to end: small-scale (<1M to 10M)](#end-to-end-small-scale-benchmarks-1m-to-10m) - [End to end: large-scale (>10M)](#end-to-end-large-scale-benchmarks-10m-vectors) - [Running with Docker containers](#running-with-docker-containers) - [Evaluating the results](#evaluating-the-results) - [Creating and customizing dataset configurations](#creating-and-customizing-dataset-configurations) - [Adding a new ANN algorithm](#adding-a-new-ann-algorithm) - [Parameter tuning guide](https://docs.rapids.ai/api/raft/nightly/ann_benchmarks_param_tuning/) - [Wiki-all RAG/LLM Dataset](https://docs.rapids.ai/api/raft/nightly/wiki_all_dataset/) ## Installing the benchmarks There are two main ways pre-compiled benchmarks are distributed: - [Conda](#Conda): For users not using containers but want an easy to install and use Python package. Pip wheels are planned to be added as an alternative for users that cannot use conda and prefer to not use containers. - [Docker](#Docker): Only needs docker and [NVIDIA docker](https://github.com/NVIDIA/nvidia-docker) to use. Provides a single docker run command for basic dataset benchmarking, as well as all the functionality of the conda solution inside the containers. ## Conda If containers are not an option or not preferred, the easiest way to install the ANN benchmarks is through conda. We provide packages for GPU enabled systems, as well for systems without a GPU. We suggest using mamba as it generally leads to a faster install time: ```bash mamba create --name raft_ann_benchmarks conda activate raft_ann_benchmarks # to install GPU package: mamba install -c rapidsai -c conda-forge -c nvidia raft-ann-bench cuda-version=11.8* # to install CPU package for usage in CPU-only systems: mamba install -c rapidsai -c conda-forge raft-ann-bench-cpu ``` The channel `rapidsai` can easily be substituted `rapidsai-nightly` if nightly benchmarks are desired. The CPU package currently allows to run the HNSW benchmarks. Please see the [build instructions](ann_benchmarks_build.md) to build the benchmarks from source. ## Docker We provide images for GPU enabled systems, as well as systems without a GPU. The following images are available: - `raft-ann-bench`: Contains GPU and CPU benchmarks, can run all algorithms supported. Will download million-scale datasets as required. Best suited for users that prefer a smaller container size for GPU based systems. Requires the NVIDIA Container Toolkit to run GPU algorithms, can run CPU algorithms without it. - `raft-ann-bench-datasets`: Contains the GPU and CPU benchmarks with million-scale datasets already included in the container. Best suited for users that want to run multiple million scale datasets already included in the image. - `raft-ann-bench-cpu`: Contains only CPU benchmarks with minimal size. Best suited for users that want the smallest containers to reproduce benchmarks on systems without a GPU. Nightly images are located in [dockerhub](https://hub.docker.com/r/rapidsai/raft-ann-bench/tags), meanwhile release (stable) versions are located in [NGC](https://hub.docker.com/r/rapidsai/raft-ann-bench), starting with release 23.12. - The following command pulls the nightly container for python version 10, cuda version 12, and RAFT version 23.10: ```bash docker pull rapidsai/raft-ann-bench:24.02a-cuda12.0-py3.10 #substitute raft-ann-bench for the exact desired container. ``` The CUDA and python versions can be changed for the supported values: Supported CUDA versions: 11.2 and 12.0 Supported Python versions: 3.9 and 3.10. You can see the exact versions as well in the dockerhub site: - [RAFT ANN Benchmark images](https://hub.docker.com/r/rapidsai/raft-ann-bench/tags) - [RAFT ANN Benchmark with datasets preloaded images](https://hub.docker.com/r/rapidsai/raft-ann-bench-cpu/tags) - [RAFT ANN Benchmark CPU only images](https://hub.docker.com/r/rapidsai/raft-ann-bench-datasets/tags) **Note:** GPU containers use the CUDA toolkit from inside the container, the only requirement is a driver installed on the host machine that supports that version. So, for example, CUDA 11.8 containers can run in systems with a CUDA 12.x capable driver. Please also note that the Nvidia-Docker runtime from the [Nvidia Container Toolkit](https://github.com/NVIDIA/nvidia-docker) is required to use GPUs inside docker containers. [//]: # (- The following command &#40;only available after RAPIDS 23.10 release&#41; pulls the container:) [//]: # () [//]: # (```bash) [//]: # (docker pull nvcr.io/nvidia/rapidsai/raft-ann-bench:24.02-cuda11.8-py3.10 #substitute raft-ann-bench for the exact desired container.) [//]: # (```) ## How to run the benchmarks We provide a collection of lightweight Python scripts to run the benchmarks. There are 4 general steps to running the benchmarks and visualizing the results. 1. Prepare Dataset 2. Build Index and Search Index 3. Data Export 4. Plot Results ### Step 1: Prepare Dataset The script `raft-ann-bench.get_dataset` will download and unpack the dataset in directory that the user provides. As of now, only million-scale datasets are supported by this script. For more information on [datasets and formats](ann_benchmarks_dataset.md). The usage of this script is: ```bash usage: get_dataset.py [-h] [--name NAME] [--dataset-path DATASET_PATH] [--normalize] options: -h, --help show this help message and exit --dataset DATASET dataset to download (default: glove-100-angular) --dataset-path DATASET_PATH path to download dataset (default: ${RAPIDS_DATASET_ROOT_DIR}) --normalize normalize cosine distance to inner product (default: False) ``` When option `normalize` is provided to the script, any dataset that has cosine distances will be normalized to inner product. So, for example, the dataset `glove-100-angular` will be written at location `datasets/glove-100-inner/`. ### Step 2: Build and Search Index The script `raft-ann-bench.run` will build and search indices for a given dataset and its specified configuration. The usage of the script `raft-ann-bench.run` is: ```bash usage: __main__.py [-h] [--subset-size SUBSET_SIZE] [-k COUNT] [-bs BATCH_SIZE] [--dataset-configuration DATASET_CONFIGURATION] [--configuration CONFIGURATION] [--dataset DATASET] [--dataset-path DATASET_PATH] [--build] [--search] [--algorithms ALGORITHMS] [--groups GROUPS] [--algo-groups ALGO_GROUPS] [-f] [-m SEARCH_MODE] options: -h, --help show this help message and exit --subset-size SUBSET_SIZE the number of subset rows of the dataset to build the index (default: None) -k COUNT, --count COUNT the number of nearest neighbors to search for (default: 10) -bs BATCH_SIZE, --batch-size BATCH_SIZE number of query vectors to use in each query trial (default: 10000) --dataset-configuration DATASET_CONFIGURATION path to YAML configuration file for datasets (default: None) --configuration CONFIGURATION path to YAML configuration file or directory for algorithms Any run groups found in the specified file/directory will automatically override groups of the same name present in the default configurations, including `base` (default: None) --dataset DATASET name of dataset (default: glove-100-inner) --dataset-path DATASET_PATH path to dataset folder, by default will look in RAPIDS_DATASET_ROOT_DIR if defined, otherwise a datasets subdirectory from the calling directory (default: os.getcwd()/datasets/) --build --search --algorithms ALGORITHMS run only comma separated list of named algorithms. If parameters `groups` and `algo-groups are both undefined, then group `base` is run by default (default: None) --groups GROUPS run only comma separated groups of parameters (default: base) --algo-groups ALGO_GROUPS add comma separated <algorithm>.<group> to run. Example usage: "--algo-groups=raft_cagra.large,hnswlib.large" (default: None) -f, --force re-run algorithms even if their results already exist (default: False) -m SEARCH_MODE, --search-mode SEARCH_MODE run search in 'latency' (measure individual batches) or 'throughput' (pipeline batches and measure end-to-end) mode (default: throughput) -t SEARCH_THREADS, --search-threads SEARCH_THREADS specify the number threads to use for throughput benchmark. Single value or a pair of min and max separated by ':'. Example --search-threads=1:4. Power of 2 values between 'min' and 'max' will be used. If only 'min' is specified, then a single test is run with 'min' threads. By default min=1, max=<num hyper threads>. (default: None) -r, --dry-run dry-run mode will convert the yaml config for the specified algorithms and datasets to the json format that's consumed by the lower-level c++ binaries and then print the command to run execute the benchmarks but will not actually execute the command. (default: False) ``` `dataset`: name of the dataset to be searched in [datasets.yaml](#yaml-dataset-config) `dataset-configuration`: optional filepath to custom dataset YAML config which has an entry for arg `dataset` `configuration`: optional filepath to YAML configuration for an algorithm or to directory that contains YAML configurations for several algorithms. [Here's how to configure an algorithm.](#yaml-algo-config) `algorithms`: runs all algorithms that it can find in YAML configs found by `configuration`. By default, only `base` group will be run. `groups`: run only specific groups of parameters configurations for an algorithm. Groups are defined in YAML configs (see `configuration`), and by default run `base` group `algo-groups`: this parameter is helpful to append any specific algorithm+group combination to run the benchmark for in addition to all the arguments from `algorithms` and `groups`. It is of the format `<algorithm>.<group>`, or for example, `raft_cagra.large` For every algorithm run by this script, it outputs an index build statistics JSON file in `<dataset-path/<dataset>/result/build/<algo_{group}-{k}-{batch_size}.json>` and an index search statistics JSON file in `<dataset-path/<dataset>/result/search/<algo_{group}-{k}-{batch_size}.json>`. NOTE: The filenams will not have "_{group}" if `group = "base"`. `dataset-path` : 1. data is read from `<dataset-path>/<dataset>` 2. indices are built in `<dataset-path>/<dataset>/index` 3. build/search results are stored in `<dataset-path>/<dataset>/result` `build` and `search` : if both parameters are not supplied to the script then it is assumed both are `True`. `indices` and `algorithms` : these parameters ensure that the algorithm specified for an index is available in `algos.yaml` and not disabled, as well as having an associated executable. ### Step 3: Data Export The script `raft-ann-bench.data_export` will convert the intermediate JSON outputs produced by `raft-ann-bench.run` to more easily readable CSV files, which are needed to build charts made by `raft-ann-bench.plot`. ```bash usage: data_export.py [-h] [--dataset DATASET] [--dataset-path DATASET_PATH] options: -h, --help show this help message and exit --dataset DATASET dataset to download (default: glove-100-inner) --dataset-path DATASET_PATH path to dataset folder (default: ${RAPIDS_DATASET_ROOT_DIR}) ``` Build statistics CSV file is stored in `<dataset-path/<dataset>/result/build/<algo_group.csv>` and index search statistics CSV file in `<dataset-path/<dataset>/result/search/<algo_group-k{k}-batch_size{batch_size}_{suffix}.csv>`, where suffix has three values: 1. `raw`: All search results are exported 2. `throughput`: Pareto frontier of throughput results is exported 3. `latency`: Pareto frontier of latency results is exported ### Step 4: Plot Results The script `raft-ann-bench.plot` will plot results for all algorithms found in index search statistics CSV files `<dataset-path/<dataset>/result/search/*.csv`. The usage of this script is: ```bash usage: [-h] [--dataset DATASET] [--dataset-path DATASET_PATH] [--output-filepath OUTPUT_FILEPATH] [--algorithms ALGORITHMS] [--groups GROUPS] [--algo-groups ALGO_GROUPS] [-k COUNT] [-bs BATCH_SIZE] [--build] [--search] [--x-scale X_SCALE] [--y-scale {linear,log,symlog,logit}] [--mode {throughput,latency}] [--time-unit {s,ms,us}] [--raw] options: -h, --help show this help message and exit --dataset DATASET dataset to plot (default: glove-100-inner) --dataset-path DATASET_PATH path to dataset folder (default: /home/coder/raft/datasets/) --output-filepath OUTPUT_FILEPATH directory for PNG to be saved (default: /home/coder/raft) --algorithms ALGORITHMS plot only comma separated list of named algorithms. If parameters `groups` and `algo-groups are both undefined, then group `base` is plot by default (default: None) --groups GROUPS plot only comma separated groups of parameters (default: base) --algo-groups ALGO_GROUPS, --algo-groups ALGO_GROUPS add comma separated <algorithm>.<group> to plot. Example usage: "--algo-groups=raft_cagra.large,hnswlib.large" (default: None) -k COUNT, --count COUNT the number of nearest neighbors to search for (default: 10) -bs BATCH_SIZE, --batch-size BATCH_SIZE number of query vectors to use in each query trial (default: 10000) --build --search --x-scale X_SCALE Scale to use when drawing the X-axis. Typically linear, logit or a2 (default: linear) --y-scale {linear,log,symlog,logit} Scale to use when drawing the Y-axis (default: linear) --mode {throughput,latency} search mode whose Pareto frontier is used on the y-axis (default: throughput) --time-unit {s,ms,us} time unit to plot when mode is latency (default: ms) --raw Show raw results (not just Pareto frontier) of mode arg (default: False) ``` `mode`: plots pareto frontier of `throughput` or `latency` results exported in the previous step `algorithms`: plots all algorithms that it can find results for the specified `dataset`. By default, only `base` group will be plotted. `groups`: plot only specific groups of parameters configurations for an algorithm. Groups are defined in YAML configs (see `configuration`), and by default run `base` group `algo-groups`: this parameter is helpful to append any specific algorithm+group combination to plot results for in addition to all the arguments from `algorithms` and `groups`. It is of the format `<algorithm>.<group>`, or for example, `raft_cagra.large` The figure below is the resulting plot of running our benchmarks as of August 2023 for a batch size of 10, on an NVIDIA H100 GPU and an Intel Xeon Platinum 8480CL CPU. It presents the throughput (in Queries-Per-Second) performance for every level of recall. ![Throughput vs recall plot comparing popular ANN algorithms with RAFT's at batch size 10](../../img/raft-vector-search-batch-10.png) ## Running the benchmarks ### End to end: small-scale benchmarks (<1M to 10M) The steps below demonstrate how to download, install, and run benchmarks on a subset of 10M vectors from the Yandex Deep-1B dataset By default the datasets will be stored and used from the folder indicated by the `RAPIDS_DATASET_ROOT_DIR` environment variable if defined, otherwise a datasets sub-folder from where the script is being called: ```bash # (1) prepare dataset. python -m raft-ann-bench.get_dataset --dataset deep-image-96-angular --normalize # (2) build and search index python -m raft-ann-bench.run --dataset deep-image-96-inner # (3) export data python -m raft-ann-bench.data_export --dataset deep-image-96-inner # (4) plot results python -m raft-ann-bench.plot --dataset deep-image-96-inner ``` Configuration files already exist for the following list of the million-scale datasets. Please refer to [ann-benchmarks datasets](https://github.com/erikbern/ann-benchmarks/#data-sets) for more information, including actual train and sizes. These all work out-of-the-box with the `--dataset` argument. Other million-scale datasets from `ann-benchmarks.com` will work, but will require a json configuration file to be created in `$CONDA_PREFIX/lib/python3.xx/site-packages/raft-ann-bench/run/conf`, or you can specify the `--configuration` option to use a specific file. | Dataset Name | Train Rows | Columns | Test Rows | Distance | |-----|------------|----|----------------|------------| | `deep-image-96-angular` | 10M | 96 | 10K | Angular | | `fashion-mnist-784-euclidean` | 60K | 784 | 10K | Euclidean | | `glove-50-angular` | 1.1M | 50 | 10K | Angular | | `glove-100-angular` | 1.1M | 100 | 10K | Angular | | `mnist-784-euclidean` | 60K | 784 | 10K | Euclidean | | `nytimes-256-angular` | 290K | 256 | 10K | Angular | | `sift-128-euclidean` | 1M | 128 | 10K | Euclidean| All of the datasets above contain ground test datasets with 100 neighbors. Thus `k` for these datasets must be less than or equal to 100. ### End to end: large-scale benchmarks (>10M vectors) `raft-ann-bench.get_dataset` cannot be used to download the [billion-scale datasets](ann_benchmarks_dataset.md#billion-scale) due to their size. You should instead use our billion-scale datasets guide to download and prepare them. All other python commands mentioned below work as intended once the billion-scale dataset has been downloaded. To download billion-scale datasets, visit [big-ann-benchmarks](http://big-ann-benchmarks.com/neurips21.html) We also provide a new dataset called `wiki-all` containing 88 million 768-dimensional vectors. This dataset is meant for benchmarking a realistic retrieval-augmented generation (RAG)/LLM embedding size at scale. It also contains 1M and 10M vector subsets for smaller-scale experiments. See our [Wiki-all Dataset Guide](https://docs.rapids.ai/api/raft/nightly/wiki_all_dataset/) for more information and to download the dataset. The steps below demonstrate how to download, install, and run benchmarks on a subset of 100M vectors from the Yandex Deep-1B dataset. Please note that datasets of this scale are recommended for GPUs with larger amounts of memory, such as the A100 or H100. ```bash mkdir -p datasets/deep-1B # (1) prepare dataset # download manually "Ground Truth" file of "Yandex DEEP" # suppose the file name is deep_new_groundtruth.public.10K.bin python -m raft-ann-bench.split_groundtruth --groundtruth datasets/deep-1B/deep_new_groundtruth.public.10K.bin # two files 'groundtruth.neighbors.ibin' and 'groundtruth.distances.fbin' should be produced # (2) build and search index python -m raft-ann-bench.run --dataset deep-1B # (3) export data python -m raft-ann-bench.data_export --dataset deep-1B # (4) plot results python -m raft-ann-bench.plot --dataset deep-1B ``` The usage of `python -m raft-ann-bench.split_groundtruth` is: ```bash usage: split_groundtruth.py [-h] --groundtruth GROUNDTRUTH options: -h, --help show this help message and exit --groundtruth GROUNDTRUTH Path to billion-scale dataset groundtruth file (default: None) ``` ### Running with Docker containers Two methods are provided for running the benchmarks with the Docker containers. #### End-to-end run on GPU When no other entrypoint is provided, an end-to-end script will run through all the steps in [Running the benchmarks](#running-the-benchmarks) above. For GPU-enabled systems, the `DATA_FOLDER` variable should be a local folder where you want datasets stored in `$DATA_FOLDER/datasets` and results in `$DATA_FOLDER/result` (we highly recommend `$DATA_FOLDER` to be a dedicated folder for the datasets and results of the containers): ```bash export DATA_FOLDER=path/to/store/datasets/and/results docker run --gpus all --rm -it -u $(id -u) \ -v $DATA_FOLDER:/data/benchmarks \ rapidsai/raft-ann-bench:24.02a-cuda11.8-py3.10 \ "--dataset deep-image-96-angular" \ "--normalize" \ "--algorithms raft_cagra,raft_ivf_pq --batch-size 10 -k 10" \ "" ``` Usage of the above command is as follows: | Argument | Description | |-----------------------------------------------------------|----------------------------------------------------------------------------------------------------| | `rapidsai/raft-ann-bench:24.02a-cuda11.8-py3.10` | Image to use. Can be either `raft-ann-bench` or `raft-ann-bench-datasets` | | `"--dataset deep-image-96-angular"` | Dataset name | | `"--normalize"` | Whether to normalize the dataset | | `"--algorithms raft_cagra,hnswlib --batch-size 10 -k 10"` | Arguments passed to the `run` script, such as the algorithms to benchmark, the batch size, and `k` | | `""` | Additional (optional) arguments that will be passed to the `plot` script. | ***Note about user and file permissions:*** The flag `-u $(id -u)` allows the user inside the container to match the `uid` of the user outside the container, allowing the container to read and write to the mounted volume indicated by the `$DATA_FOLDER` variable. #### End-to-end run on CPU The container arguments in the above section also be used for the CPU-only container, which can be used on systems that don't have a GPU installed. ***Note:*** the image changes to `raft-ann-bench-cpu` container and the `--gpus all` argument is no longer used: ```bash export DATA_FOLDER=path/to/store/datasets/and/results docker run --rm -it -u $(id -u) \ -v $DATA_FOLDER:/data/benchmarks \ rapidsai/raft-ann-bench-cpu:24.02a-py3.10 \ "--dataset deep-image-96-angular" \ "--normalize" \ "--algorithms hnswlib --batch-size 10 -k 10" \ "" ``` #### Manually run the scripts inside the container All of the `raft-ann-bench` images contain the Conda packages, so they can be used directly by logging directly into the container itself: ```bash export DATA_FOLDER=path/to/store/datasets/and/results docker run --gpus all --rm -it -u $(id -u) \ --entrypoint /bin/bash \ --workdir /data/benchmarks \ -v $DATA_FOLDER:/data/benchmarks \ rapidsai/raft-ann-bench:24.02a-cuda11.8-py3.10 ``` This will drop you into a command line in the container, with the `raft-ann-bench` python package ready to use, as described in the [Running the benchmarks](#running-the-benchmarks) section above: ``` (base) root@00b068fbb862:/data/benchmarks# python -m raft-ann-bench.get_dataset --dataset deep-image-96-angular --normalize ``` Additionally, the containers can be run in detached mode without any issue. ### Evaluating the results The benchmarks capture several different measurements. The table below describes each of the measurements for index build benchmarks: | Name | Description | |------------|--------------------------------------------------------| | Benchmark | A name that uniquely identifies the benchmark instance | | Time | Wall-time spent training the index | | CPU | CPU time spent training the index | | Iterations | Number of iterations (this is usually 1) | | GPU | GPU time spent building | | index_size | Number of vectors used to train index | The table below describes each of the measurements for the index search benchmarks. The most important measurements `Latency`, `items_per_second`, `end_to_end`. | Name | Description | |------------|-------------------------------------------------------------------------------------------------------------------------------------------------------| | Benchmark | A name that uniquely identifies the benchmark instance | | Time | The wall-clock time of a single iteration (batch) divided by the number of threads. | | CPU | The average CPU time (user + sys time). This does not include idle time (which can also happen while waiting for GPU sync). | | Iterations | Total number of batches. This is going to be `total_queries` / `n_queries`. | | GPU | GPU latency of a single batch (seconds). In throughput mode this is averaged over multiple threads. | | Latency | Latency of a single batch (seconds), calculated from wall-clock time. In throughput mode this is averaged over multiple threads. | | Recall | Proportion of correct neighbors to ground truth neighbors. Note this column is only present if groundtruth file is specified in dataset configuration.| | items_per_second | Total throughput, a.k.a Queries per second (QPS). This is approximately `total_queries` / `end_to_end`. | | k | Number of neighbors being queried in each iteration | | end_to_end | Total time taken to run all batches for all iterations | | n_queries | Total number of query vectors in each batch | | total_queries | Total number of vectors queries across all iterations ( = `iterations` * `n_queries`) | Note the following: - A slightly different method is used to measure `Time` and `end_to_end`. That is why `end_to_end` = `Time` * `Iterations` holds only approximately. - The actual table displayed on the screen may differ slightly as the hyper-parameters will also be displayed for each different combination being benchmarked. - Recall calculation: the number of queries processed per test depends on the number of iterations. Because of this, recall can show slight fluctuations if less neighbors are processed then it is available for the benchmark. ## Creating and customizing dataset configurations A single configuration will often define a set of algorithms, with associated index and search parameters, that can be generalize across datasets. We use YAML to define dataset specific and algorithm specific configurations. <a id='yaml-dataset-config'></a>A default `datasets.yaml` is provided by RAFT in `${RAFT_HOME}/python/raft-ann-bench/src/raft-ann-bench/run/conf` with configurations available for several datasets. Here's a simple example entry for the `sift-128-euclidean` dataset: ```yaml - name: sift-128-euclidean base_file: sift-128-euclidean/base.fbin query_file: sift-128-euclidean/query.fbin groundtruth_neighbors_file: sift-128-euclidean/groundtruth.neighbors.ibin dims: 128 distance: euclidean ``` <a id='yaml-algo-config'></a>Configuration files for ANN algorithms supported by `raft-ann-bench` are provided in `${RAFT_HOME}/python/raft-ann-bench/src/raft-ann-bench/run/conf`. `raft_cagra` algorithm configuration looks like: ```yaml name: raft_cagra groups: base: build: graph_degree: [32, 64] intermediate_graph_degree: [64, 96] search: itopk: [32, 64, 128] large: build: graph_degree: [32, 64] search: itopk: [32, 64, 128] ``` The default parameters for which the benchmarks are run can be overridden by creating a custom YAML file for algorithms with a `base` group. There config above has 2 fields: 1. `name` - define the name of the algorithm for which the parameters are being specified. 2. `groups` - define a run group which has a particular set of parameters. Each group helps create a cross-product of all hyper-parameter fields for `build` and `search`. The table below contains all algorithms supported by RAFT. Each unique algorithm will have its own set of `build` and `search` settings. The [ANN Algorithm Parameter Tuning Guide](ann_benchmarks_param_tuning.md) contains detailed instructions on choosing build and search parameters for each supported algorithm. | Library | Algorithms | |-----------|------------------------------------------------------------------| | FAISS GPU | `faiss_gpu_flat`, `faiss_gpu_ivf_flat`, `faiss_gpu_ivf_pq` | | FAISS CPU | `faiss_cpu_flat`, `faiss_cpu_ivf_flat`, `faiss_cpu_ivf_pq` | | GGNN | `ggnn` | | HNSWlib | `hnswlib` | | RAFT | `raft_brute_force`, `raft_cagra`, `raft_ivf_flat`, `raft_ivf_pq` | ## Adding a new ANN algorithm ### Implementation and Configuration Implementation of a new algorithm should be a C++ class that inherits `class ANN` (defined in `cpp/bench/ann/src/ann.h`) and implements all the pure virtual functions. In addition, it should define two `struct`s for building and searching parameters. The searching parameter class should inherit `struct ANN<T>::AnnSearchParam`. Take `class HnswLib` as an example, its definition is: ```c++ template<typename T> class HnswLib : public ANN<T> { public: struct BuildParam { int M; int ef_construction; int num_threads; }; using typename ANN<T>::AnnSearchParam; struct SearchParam : public AnnSearchParam { int ef; int num_threads; }; // ... }; ``` <a id='json-index-config'></a>The benchmark program uses JSON format in a configuration file to specify indexes to build, along with the build and search parameters. To add the new algorithm to the benchmark, need be able to specify `build_param`, whose value is a JSON object, and `search_params`, whose value is an array of JSON objects, for this algorithm in configuration file. The `build_param` and `search_param` arguments will vary depending on the algorithm. Take the configuration for `HnswLib` as an example: ```json { "name" : "hnswlib.M12.ef500.th32", "algo" : "hnswlib", "build_param": {"M":12, "efConstruction":500, "numThreads":32}, "file" : "/path/to/file", "search_params" : [ {"ef":10, "numThreads":1}, {"ef":20, "numThreads":1}, {"ef":40, "numThreads":1}, ], "search_result_file" : "/path/to/file" }, ``` How to interpret these JSON objects is totally left to the implementation and should be specified in `cpp/bench/ann/src/factory.cuh`: 1. First, add two functions for parsing JSON object to `struct BuildParam` and `struct SearchParam`, respectively: ```c++ template<typename T> void parse_build_param(const nlohmann::json& conf, typename cuann::HnswLib<T>::BuildParam& param) { param.ef_construction = conf.at("efConstruction"); param.M = conf.at("M"); if (conf.contains("numThreads")) { param.num_threads = conf.at("numThreads"); } } template<typename T> void parse_search_param(const nlohmann::json& conf, typename cuann::HnswLib<T>::SearchParam& param) { param.ef = conf.at("ef"); if (conf.contains("numThreads")) { param.num_threads = conf.at("numThreads"); } } ``` 2. Next, add corresponding `if` case to functions `create_algo()` (in `cpp/bench/ann/) and `create_search_param()` by calling parsing functions. The string literal in `if` condition statement must be the same as the value of `algo` in configuration file. For example, ```c++ // JSON configuration file contains a line like: "algo" : "hnswlib" if (algo == "hnswlib") { // ... } ``` ### Adding a CMake Target In `raft/cpp/bench/ann/CMakeLists.txt`, we provide a `CMake` function to configure a new Benchmark target with the following signature: ``` ConfigureAnnBench( NAME <algo_name> PATH </path/to/algo/benchmark/source/file> INCLUDES <additional_include_directories> CXXFLAGS <additional_cxx_flags> LINKS <additional_link_library_targets> ) ``` To add a target for `HNSWLIB`, we would call the function as: ``` ConfigureAnnBench( NAME HNSWLIB PATH bench/ann/src/hnswlib/hnswlib_benchmark.cpp INCLUDES ${CMAKE_CURRENT_BINARY_DIR}/_deps/hnswlib-src/hnswlib CXXFLAGS "${HNSW_CXX_FLAGS}" ) ``` This will create an executable called `HNSWLIB_ANN_BENCH`, which can then be used to run `HNSWLIB` benchmarks. Add a new entry to `algos.yaml` to map the name of the algorithm to its binary executable and specify whether the algorithm requires GPU support. ```yaml raft_ivf_pq: executable: RAFT_IVF_PQ_ANN_BENCH requires_gpu: true ``` `executable` : specifies the name of the binary that will build/search the index. It is assumed to be available in `raft/cpp/build/`. `requires_gpu` : denotes whether an algorithm requires GPU to run.
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/conf.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. import os import sys # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. sys.path.insert(0, os.path.abspath("sphinxext")) from github_link import make_linkcode_resolve # noqa # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "numpydoc", "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.doctest", "sphinx.ext.intersphinx", "sphinx.ext.linkcode", "IPython.sphinxext.ipython_console_highlighting", "IPython.sphinxext.ipython_directive", "breathe", "recommonmark", "sphinx_markdown_tables", "sphinx_copybutton" ] breathe_default_project = "RAFT" breathe_projects = { "RAFT": "../../cpp/doxygen/_xml/", } ipython_mplbackend = "str" # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # generate autosummary even if no references # autosummary_generate = True # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = {".rst": "restructuredtext", ".md": "markdown"} # The master toctree document. master_doc = "index" # General information about the project. project = "raft" copyright = "2023, NVIDIA Corporation" author = "NVIDIA Corporation" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '24.02' # The full version, including alpha/beta/rc tags. release = '24.02.00' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "pydata_sphinx_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { "external_links": [], # https://github.com/pydata/pydata-sphinx-theme/issues/1220 "icon_links": [], "github_url": "https://github.com/rapidsai/raft", "twitter_url": "https://twitter.com/rapidsai", "show_toc_level": 1, "navbar_align": "right", } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] html_js_files = [] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = "raftdoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, "raft.tex", "RAFT Documentation", "NVIDIA Corporation", "manual"), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, "raft", "RAFT Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "raft", "RAFT Documentation", author, "raft", "One line description of project.", "Miscellaneous", ), ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { "python": ("https://docs.python.org/", None), "scipy": ("https://docs.scipy.org/doc/scipy/", None), } # Config numpydoc numpydoc_show_inherited_class_members = False numpydoc_class_members_toctree = False def setup(app): app.add_css_file("references.css") app.add_css_file("https://docs.rapids.ai/assets/css/custom.css") app.add_js_file( "https://docs.rapids.ai/assets/js/custom.js", loading_method="defer" ) # The following is used by sphinx.ext.linkcode to provide links to github linkcode_resolve = make_linkcode_resolve( "pylibraft", "https://github.com/rapidsai/raft" "raft/blob/{revision}/python/pylibraft" "{package}/{path}#L{lineno}", ) # Set the default role for interpreted code (anything surrounded in `single # backticks`) to be a python object. See # https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-default_role default_role = "py:obj"
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/build.md
# Installation RAFT currently provides libraries for C++ and Python. The C++ libraries, including the header-only and optional shared library, can be installed with Conda. Both the C++ and Python APIs require CMake to build from source. ## Table of Contents - [Install C++ and Python through Conda](#installing-c-and-python-through-conda) - [Installing Python through Pip](#installing-python-through-pip) - [Building C++ and Python from source](#building-c-and-python-from-source) - [CUDA/GPU requirements](#cudagpu-requirements) - [Build dependencies](#build-dependencies) - [Required](#required) - [Optional](#optional) - [Conda environment scripts](#conda-environment-scripts) - [Header-only C++](#header-only-c) - [C++ shared library](#c-shared-library-optional) - [ccache and sccache](#ccache-and-sccache) - [C++ tests](#c-tests) - [C++ primitives microbenchmarks](#c-primitives-microbenchmarks) - [Python libraries](#python-libraries) - [Using CMake directly](#using-cmake-directly) - [Build documentation](#build-documentation) - [Using RAFT in downstream projects](#using-raft-c-in-downstream-projects) - [CMake targets](#cmake-targets) ------ ## Installing C++ and Python through Conda The easiest way to install RAFT is through conda and several packages are provided. - `libraft-headers` C++ headers - `libraft` (optional) C++ shared library containing pre-compiled template instantiations and runtime API. - `pylibraft` (optional) Python library - `raft-dask` (optional) Python library for deployment of multi-node multi-GPU algorithms that use the RAFT `raft::comms` abstraction layer in Dask clusters. - `raft-ann-bench` (optional) Benchmarking tool for easily producing benchmarks that compare RAFT's vector search algorithms against other state-of-the-art implementations. - `raft-ann-bench-cpu` (optional) Reproducible benchmarking tool similar to above, but doesn't require CUDA to be installed on the machine. Can be used to test in environments with competitive CPUs. Use the following command, depending on your CUDA version, to install all of the RAFT packages with conda (replace `rapidsai` with `rapidsai-nightly` to install more up-to-date but less stable nightly packages). `mamba` is preferred over the `conda` command. ```bash # for CUDA 11.8 mamba install -c rapidsai -c conda-forge -c nvidia raft-dask pylibraft cuda-version=11.8 ``` ```bash # for CUDA 12.0 mamba install -c rapidsai -c conda-forge -c nvidia raft-dask pylibraft cuda-version=12.0 ``` Note that the above commands will also install `libraft-headers` and `libraft`. You can also install the conda packages individually using the `mamba` command above. For example, if you'd like to install RAFT's headers and pre-compiled shared library to use in your project: ```bash # for CUDA 12.0 mamba install -c rapidsai -c conda-forge -c nvidia libraft libraft-headers cuda-version=12.0 ``` If installing the C++ APIs Please see [using libraft](https://docs.rapids.ai/api/raft/nightly/using_libraft/) for more information on using the pre-compiled shared library. You can also refer to the [example C++ template project](https://github.com/rapidsai/raft/tree/branch-24.02/cpp/template) for a ready-to-go CMake configuration that you can drop into your project and build against installed RAFT development artifacts above. ## Installing Python through Pip `pylibraft` and `raft-dask` both have packages that can be [installed through pip](https://rapids.ai/pip.html#install). For CUDA 11 packages: ```bash pip install pylibraft-cu11 --extra-index-url=https://pypi.nvidia.com pip install raft-dask-cu11 --extra-index-url=https://pypi.nvidia.com ``` And CUDA 12 packages: ```bash pip install pylibraft-cu12 --extra-index-url=https://pypi.nvidia.com pip install raft-dask-cu12 --extra-index-url=https://pypi.nvidia.com ``` These packages statically build RAFT's pre-compiled instantiations, so the C++ headers and pre-compiled shared library won't be readily available to use in your code. ## Building C++ and Python from source ### CUDA/GPU Requirements - cmake 3.26.4+ - GCC 9.3+ (9.5.0+ recommended) - CUDA Toolkit 11.2+ - NVIDIA driver 450.80.02+ - Pascal architecture or better (compute capability >= 6.0) ### Build Dependencies In addition to the libraries included with cudatoolkit 11.0+, there are some other dependencies below for building RAFT from source. Many of the dependencies are optional and depend only on the primitives being used. All of these can be installed with cmake or [rapids-cpm](https://github.com/rapidsai/rapids-cmake#cpm) and many of them can be installed with [conda](https://anaconda.org). #### Required - [RMM](https://github.com/rapidsai/rmm) corresponding to RAFT version. - [Thrust](https://github.com/NVIDIA/thrust) v1.17 / [CUB](https://github.com/NVIDIA/cub) - [cuCollections](https://github.com/NVIDIA/cuCollections) - Used in `raft::sparse::distance` API. - [CUTLASS](https://github.com/NVIDIA/cutlass) v2.9.1 - Used in `raft::distance` API. #### Optional - [NCCL](https://github.com/NVIDIA/nccl) - Used in `raft::comms` API and needed to build `raft-dask`. - [UCX](https://github.com/openucx/ucx) - Used in `raft::comms` API and needed to build `raft-dask`. - [Googletest](https://github.com/google/googletest) - Needed to build tests - [Googlebench](https://github.com/google/benchmark) - Needed to build benchmarks - [Doxygen](https://github.com/doxygen/doxygen) - Needed to build docs #### Conda environment scripts Conda environment scripts are provided for installing the necessary dependencies to build both the C++ and Python libraries from source. It is preferred to use `mamba`, as it provides significant speedup over `conda`: ```bash mamba env create --name rapids_raft -f conda/environments/all_cuda-120_arch-x86_64.yaml mamba activate rapids_raft ``` All of RAFT's C++ APIs can be used header-only and optional pre-compiled shared libraries provide some host-accessible runtime APIs and template instantiations to accelerate compile times. The process for building from source with CUDA 11 differs slightly in that your host system will also need to have CUDA toolkit installed which is greater than, or equal to, the version you install into you conda environment. Installing CUDA toolkit into your host system is necessary because `nvcc` is not provided with Conda's cudatoolkit dependencies for CUDA 11. The following example will install create and install dependencies for a CUDA 11.8 conda environment ```bash mamba env create --name rapids_raft -f conda/environments/all_cuda-118_arch-x86_64.yaml mamba activate rapids_raft ``` The recommended way to build and install RAFT from source is to use the `build.sh` script in the root of the repository. This script can build both the C++ and Python artifacts and provides CMake options for building and installing the headers, tests, benchmarks, and the pre-compiled shared library. ### Header-only C++ `build.sh` uses [rapids-cmake](https://github.com/rapidsai/rapids-cmake), which will automatically download any dependencies which are not already installed. It's important to note that while all the headers will be installed and available, some parts of the RAFT API depend on libraries like CUTLASS, which will need to be explicitly enabled in `build.sh`. The following example will download the needed dependencies and install the RAFT headers into `$INSTALL_PREFIX/include/raft`. ```bash ./build.sh libraft ``` The `-n` flag can be passed to just have the build download the needed dependencies. Since RAFT's C++ headers are primarily used during build-time in downstream projects, the dependencies will never be installed by the RAFT build. ```bash ./build.sh libraft -n ``` Once installed, `libraft` headers (and dependencies which were downloaded and installed using `rapids-cmake`) can be uninstalled also using `build.sh`: ```bash ./build.sh libraft --uninstall ``` ### C++ Shared Library (optional) A shared library can be built for speeding up compile times. The shared library also contains a runtime API that allows you to invoke RAFT APIs directly from C++ source files (without `nvcc`). The shared library can also significantly improve re-compile times both while developing RAFT and using its APIs to develop applications. Pass the `--compile-lib` flag to `build.sh` to build the library: ```bash ./build.sh libraft --compile-lib ``` In above example the shared library is installed by default into `$INSTALL_PREFIX/lib`. To disable this, pass `-n` flag. Once installed, the shared library, headers (and any dependencies downloaded and installed via `rapids-cmake`) can be uninstalled using `build.sh`: ```bash ./build.sh libraft --uninstall ``` ### ccache and sccache `ccache` and `sccache` can be used to better cache parts of the build when rebuilding frequently, such as when working on a new feature. You can also use `ccache` or `sccache` with `build.sh`: ```bash ./build.sh libraft --cache-tool=ccache ``` ### C++ Tests Compile the tests using the `tests` target in `build.sh`. ```bash ./build.sh libraft tests ``` Test compile times can be improved significantly by using the optional shared libraries. If installed, they will be used automatically when building the tests but `--compile-libs` can be used to add additional compilation units and compile them with the tests. ```bash ./build.sh libraft tests --compile-lib ``` The tests are broken apart by algorithm category, so you will find several binaries in `cpp/build/` named `*_TEST`. For example, to run the distance tests: ```bash ./cpp/build/DISTANCE_TEST ``` It can take sometime to compile all of the tests. You can build individual tests by providing a semicolon-separated list to the `--limit-tests` option in `build.sh`: ```bash ./build.sh libraft tests -n --limit-tests=NEIGHBORS_TEST;DISTANCE_TEST;MATRIX_TEST ``` ### C++ Primitives Microbenchmarks The benchmarks are broken apart by algorithm category, so you will find several binaries in `cpp/build/` named `*_PRIMS_BENCH`. ```bash ./build.sh libraft bench-prims ``` It can take sometime to compile all of the benchmarks. You can build individual benchmarks by providing a semicolon-separated list to the `--limit-bench-prims` option in `build.sh`: ```bash ./build.sh libraft bench-prims -n --limit-bench=NEIGHBORS_PRIMS_BENCH;DISTANCE_PRIMS_BENCH;LINALG_PRIMS_BENCH ``` In addition to microbenchmarks for individual primitives, RAFT contains a reproducible benchmarking tool for evaluating the performance of RAFT's vector search algorithms against the existing state-of-the-art. Please refer to the [RAFT ANN Benchmarks](https://docs.rapids.ai/api/raft/nightly/raft_ann_benchmarks/) guide for more information on this tool. ### Python libraries The Python libraries can be built and installed using the `build.sh` script: ```bash # to build pylibraft ./build.sh libraft pylibraft --compile-lib # to build raft-dask (depends on pylibraft) ./build.sh libraft pylibraft raft-dask --compile-lib ``` `setup.py` can also be used to build the Python libraries manually: ```bash cd python/raft-dask python setup.py build_ext --inplace python setup.py install cd python/pylibraft python setup.py build_ext --inplace python setup.py install ``` Python tests are automatically installed with the corresponding libraries. To run Python tests: ```bash cd python/raft-dask py.test -s -v cd python/pylibraft py.test -s -v ``` The Python packages can also be uninstalled using the `build.sh` script: ```bash ./build.sh pylibraft raft-dask --uninstall ``` ### Using CMake directly When building RAFT from source, the `build.sh` script offers a nice wrapper around the `cmake` commands to ease the burdens of manually configuring the various available cmake options. When more fine-grained control over the CMake configuration is desired, the `cmake` command can be invoked directly as the below example demonstrates. The `CMAKE_INSTALL_PREFIX` installs RAFT into a specific location. The example below installs RAFT into the current Conda environment: ```bash cd cpp mkdir build cd build cmake -D BUILD_TESTS=ON -DRAFT_COMPILE_LIBRARY=ON -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX ../ make -j<parallel_level> install ``` RAFT's CMake has the following configurable flags available: | Flag | Possible Values | Default Value | Behavior | |---------------------------------|----------------------| --- |------------------------------------------------------------------------------| | BUILD_TESTS | ON, OFF | ON | Compile Googletests | | BUILD_PRIMS_BENCH | ON, OFF | OFF | Compile benchmarks | | BUILD_ANN_BENCH | ON, OFF | OFF | Compile end-to-end ANN benchmarks | | CUDA_ENABLE_KERNELINFO | ON, OFF | OFF | Enables `kernelinfo` in nvcc. This is useful for `compute-sanitizer` | | CUDA_ENABLE_LINEINFO | ON, OFF | OFF | Enable the -lineinfo option for nvcc | | CUDA_STATIC_RUNTIME | ON, OFF | OFF | Statically link the CUDA runtime | | DETECT_CONDA_ENV | ON, OFF | ON | Enable detection of conda environment for dependencies | | raft_FIND_COMPONENTS | compiled distributed | | Configures the optional components as a space-separated list | | RAFT_COMPILE_LIBRARY | ON, OFF | ON if either BUILD_TESTS or BUILD_PRIMS_BENCH is ON; otherwise OFF | Compiles all `libraft` shared libraries (these are required for Googletests) | | RAFT_ENABLE_CUBLAS_DEPENDENCY | ON, OFF | ON | Link against cublas library in `raft::raft` | | RAFT_ENABLE_CUSOLVER_DEPENDENCY | ON, OFF | ON | Link against cusolver library in `raft::raft` | | RAFT_ENABLE_CUSPARSE_DEPENDENCY | ON, OFF | ON | Link against cusparse library in `raft::raft` | | RAFT_ENABLE_CUSOLVER_DEPENDENCY | ON, OFF | ON | Link against curand library in `raft::raft` | | RAFT_NVTX | ON, OFF | OFF | Enable NVTX Markers | ### Build documentation The documentation requires that the C++ and Python libraries have been built and installed. The following will build the docs along with the C++ and Python packages: ``` ./build.sh libraft pylibraft raft-dask docs --compile-lib ``` ## Using RAFT C++ in downstream projects There are a few different strategies for including RAFT in downstream projects, depending on whether the [required build dependencies](#build-dependencies) have already been installed and are available on the `lib` and `include` search paths. When using the GPU parts of RAFT, you will need to enable CUDA support in your CMake project declaration: ```cmake project(YOUR_PROJECT VERSION 0.1 LANGUAGES CXX CUDA) ``` Note that some additional compiler flags might need to be added when building against RAFT. For example, if you see an error like this `The experimental flag '--expt-relaxed-constexpr' can be used to allow this.`. The necessary flags can be set with CMake: ```cmake target_compile_options(your_target_name PRIVATE $<$<COMPILE_LANGUAGE:CUDA>:--expt-extended-lambda --expt-relaxed-constexpr>) ``` Further, it's important that the language level be set to at least C++ 17. This can be done with cmake: ```cmake set_target_properties(your_target_name PROPERTIES CXX_STANDARD 17 CXX_STANDARD_REQUIRED ON CUDA_STANDARD 17 CUDA_STANDARD_REQUIRED ON POSITION_INDEPENDENT_CODE ON INTERFACE_POSITION_INDEPENDENT_CODE ON) ``` The [C++ example template project](https://github.com/rapidsai/raft/tree/HEAD/cpp/template) provides an end-to-end buildable example of what a `CMakeLists.txt` that uses RAFT should look like. The items below point out some of the needed details. #### CMake Targets The `raft::raft` CMake target is made available when including RAFT into your CMake project but additional CMake targets can be made available by adding to the `COMPONENTS` option in CMake's `find_package(raft)` (refer to [CMake docs](https://cmake.org/cmake/help/latest/command/find_package.html#basic-signature) to learn more). The components should be separated by spaces. The `raft::raft` target will always be available. Note that the `distributed` component also exports additional dependencies. | Component | Target | Description | Base Dependencies | |-------------|---------------------|----------------------------------------------------------|----------------------------------------| | n/a | `raft::raft` | Full RAFT header library | CUDA toolkit, RMM, NVTX, CCCL, CUTLASS | | compiled | `raft::compiled` | Pre-compiled template instantiations and runtime library | raft::raft | | distributed | `raft::distributed` | Dependencies for `raft::comms` APIs | raft::raft, UCX, NCCL
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/using_raft_comms.rst
Using RAFT Comms ================ RAFT provides a communications abstraction for writing distributed algorithms which can scale up to multiple GPUs and scale out to multiple nodes. The communications abstraction is largely based on MPI and NCCL, and allows the user to decouple the design of algorithms from the environments where the algorithms are executed, enabling “write-once deploy everywhere” semantics. Currently, the distributed algorithms in both cuGraph and cuML are being deployed in both MPI and Dask clusters while cuML’s distributed algorithms are also being deployed on GPUs in Apache Spark clusters. This is a powerful concept as distributed algorithms can be non-trivial to write and so maintainability is eased and bug fixes reach further by increasing reuse as much as possible. While users of RAFT’s communications layer largely get MPI integration for free just by installing MPI and using `mpirun` to run their applications, the `raft-dask` Python package provides a mechanism for executing algorithms written using RAFT’s communications layer in a Dask cluster. It will help to walk through a small example of how one would build an algorithm with RAFT’s communications layer. First, an instance of `raft::comms_t` is passed through the `raft::resources` instance and code is written to utilize collective and/or point-to-point communications as needed. .. code-block:: cpp :caption: Example function written with the RAFT comms API #include <raft/core/comms.hpp> #include <raft/core/device_mdspan.hpp> #include <raft/util/cudart_utils.hpp> void test_allreduce(raft::resources const &handle, int root) { raft::comms::comms_t const& communicator = resource::get_comms(handle); cudaStream_t stream = resource::get_cuda_stream(handle); raft::device_scalar<int> temp_scalar(stream); int to_send = 1; raft::copy(temp_scalar.data(), &to_send, 1, stream); communicator.allreduce(temp_scalar.data(), temp_scalar.data(), 1, raft::comms::opt_t::SUM, stream); resource::sync_stream(handle); } This exact function can now be executed in several different types of GPU clusters. For example, it can be executed with MPI by initializing an instance of `raft::comms::mpi_comms` with the `MPI_Comm`: .. code-block:: cpp :caption: Example of running test_allreduce() in MPI #include <raft/core/mpi_comms.hpp> #include <raft/core/resources.hpp> raft::resources resource_handle; // ... // initialize MPI_Comm // ... raft::comms::initialize_mpi_comms(resource_handle, mpi_comm); // ... test_allreduce(resource_handle, 0); Deploying our`test_allreduce` function in Dask requires a lightweight Python interface, which we can accomplish using `pylibraft` and exposing the function through Cython: .. code-block:: cython :caption: Example of wrapping test_allreduce() w/ cython from pylibraft.common.handle cimport device_resources from cython.operator cimport dereference as deref cdef extern from “allreduce_test.hpp”: void test_allreduce(device_resources const &handle, int root) except + def run_test_allreduce(handle, root): cdef const device_resources* h = <device_resources*><size_t>handle.getHandle() test_allreduce(deref(h), root) Finally, we can use `raft_dask` to execute our new algorithm in a Dask cluster (please note this also uses `LocalCUDACluster` from the RAPIDS dask-cuda library): .. code-block:: python :caption: Example of running test_allreduce() in Dask from raft_dask.common import Comms, local_handle from dask.distributed import Client, wait from dask_cuda import LocalCUDACluster cluster = LocalCUDACluster() client = Client(cluster) # Create and initialize Comms instance comms = Comms(client=client) comms.init() def func_run_allreduce(sessionId, root): handle = local_handle(sessionId) run_test_allreduce(handle, root) # Invoke run_test_allreduce on all workers dfs = [ client.submit( func_run_allreduce, comms.sessionId, 0, pure=False, workers=[w] ) for w in comms.worker_addresses ] # Wait until processing is done wait(dfs, timeout=5) comms.destroy() client.close() cluster.close()
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/index.rst
RAPIDS RAFT: Reusable Accelerated Functions and Tools for Vector Search and More ================================================================================ .. image:: ../../img/raft-tech-stack-vss.png :width: 800 :alt: RAFT Tech Stack Useful Resources ################ .. _raft_reference: https://docs.rapids.ai/api/raft/stable/ - `Example Notebooks <https://github.com/rapidsai/raft/tree/HEAD/notebooks>`_: Example Jupyter notebooks - `RAPIDS Community <https://rapids.ai/community.html>`_: Get help, contribute, and collaborate. - `GitHub repository <https://github.com/rapidsai/raft>`_: Download the RAFT source code. - `Issue tracker <https://github.com/rapidsai/raft/issues>`_: Report issues or request features. What is RAFT? ############# RAFT contains fundamental widely-used algorithms and primitives for machine learning and information retrieval. The algorithms are CUDA-accelerated and form building blocks for more easily writing high performance applications. By taking a primitives-based approach to algorithm development, RAFT - accelerates algorithm construction time - reduces the maintenance burden by maximizing reuse across projects, and - centralizes core reusable computations, allowing future optimizations to benefit all algorithms that use them. While not exhaustive, the following general categories help summarize the accelerated building blocks that RAFT contains: .. list-table:: :widths: 25 50 :header-rows: 1 * - Category - Examples * - Nearest Neighbors - pairwise distances, vector search, epsilon neighborhoods, neighborhood graph construction * - Data Formats - sparse & dense, conversions, data generation * - Dense Operations - linear algebra, matrix and vector operations, slicing, norms, factorization, least squares, svd & eigenvalue problems * - Sparse Operations - linear algebra, eigenvalue problems, slicing, norms, reductions, factorization, symmetrization, components & labeling * - Basic Clustering - spectral clustering, hierarchical clustering, k-means * - Solvers - combinatorial optimization, iterative solvers * - Statistics - sampling, moments and summary statistics, metrics * - Tools & Utilities - common utilities for developing CUDA applications, multi-node multi-gpu infrastructure .. toctree:: :maxdepth: 1 :caption: Contents: quick_start.md build.md cpp_api.rst pylibraft_api.rst using_libraft.md vector_search_tutorial.md raft_ann_benchmarks.md raft_dask_api.rst using_raft_comms.rst developer_guide.md contributing.md Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search`
0
rapidsai_public_repos/raft/docs
rapidsai_public_repos/raft/docs/source/developer_guide.md
# Developer Guide ## General Please start by reading the [Contributor Guide](contributing.md). ## Performance 1. In performance critical sections of the code, favor `cudaDeviceGetAttribute` over `cudaDeviceGetProperties`. See corresponding CUDA devblog [here](https://devblogs.nvidia.com/cuda-pro-tip-the-fast-way-to-query-device-properties/) to know more. 2. If an algo requires you to launch GPU work in multiple cuda streams, do not create multiple `raft::resources` objects, one for each such work stream. Instead, use the stream pool configured on the given `raft::resources` instance's `raft::resources::get_stream_from_stream_pool()` to pick up the right cuda stream. Refer to the section on [CUDA Resources](#resource-management) and the section on [Threading](#threading-model) for more details. TIP: use `raft::resources::get_stream_pool_size()` to know how many such streams are available at your disposal. ## Local Development Developing features and fixing bugs for the RAFT library itself is straightforward and only requires building and installing the relevant RAFT artifacts. The process for working on a CUDA/C++ feature which might span RAFT and one or more consuming libraries can vary slightly depending on whether the consuming project relies on a source build (as outlined in the [BUILD](BUILD.md#install_header_only_cpp) docs). In such a case, the option `CPM_raft_SOURCE=/path/to/raft/source` can be passed to the cmake of the consuming project in order to build the local RAFT from source. The PR with relevant changes to the consuming project can also pin the RAFT version temporarily by explicitly changing the `FORK` and `PINNED_TAG` arguments to the RAFT branch containing their changes when invoking `find_and_configure_raft`. The pin should be reverted after the changed is merged to the RAFT project and before it is merged to the dependent project(s) downstream. If building a feature which spans projects and not using the source build in cmake, the RAFT changes (both C++ and Python) will need to be installed into the environment of the consuming project before they can be used. The ideal integration of RAFT into consuming projects will enable both the source build in the consuming project only for this case but also rely on a more stable packaging (such as conda packaging) otherwise. ## Threading Model With the exception of the `raft::resources`, RAFT algorithms should maintain thread-safety and are, in general, assumed to be single threaded. This means they should be able to be called from multiple host threads so long as different instances of `raft::resources` are used. Exceptions are made for algorithms that can take advantage of multiple CUDA streams within multiple host threads in order to oversubscribe or increase occupancy on a single GPU. In these cases, the use of multiple host threads within RAFT algorithms should be used only to maintain concurrency of the underlying CUDA streams. Multiple host threads should be used sparingly, be bounded, and should steer clear of performing CPU-intensive computations. A good example of an acceptable use of host threads within a RAFT algorithm might look like the following ```cpp #include <raft/core/resources.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/cuda_stream_pool.hpp> raft::resources res; ... sync_stream(res); ... int n_streams = get_stream_pool_size(res); #pragma omp parallel for num_threads(n_threads) for(int i = 0; i < n; i++) { int thread_num = omp_get_thread_num() % n_threads; cudaStream_t s = get_stream_from_stream_pool(res, thread_num); ... possible light cpu pre-processing ... my_kernel1<<<b, tpb, 0, s>>>(...); ... ... some possible async d2h / h2d copies ... my_kernel2<<<b, tpb, 0, s>>>(...); ... sync_stream(res, s); ... possible light cpu post-processing ... } ``` In the example above, if there is no CPU pre-processing at the beginning of the for-loop, an event can be registered in each of the streams within the for-loop to make them wait on the stream from the handle. If there is no CPU post-processing at the end of each for-loop iteration, `sync_stream(res, s)` can be replaced with a single `sync_stream_pool(res)` after the for-loop. To avoid compatibility issues between different threading models, the only threading programming allowed in RAFT is OpenMP. Though RAFT's build enables OpenMP by default, RAFT algorithms should still function properly even when OpenMP has been disabled. If the CPU pre- and post-processing were not needed in the example above, OpenMP would not be needed. The use of threads in third-party libraries is allowed, though they should still avoid depending on a specific OpenMP runtime. ## Public Interface ### General guidelines Functions exposed via the C++ API must be stateless. Things that are OK to be exposed on the interface: 1. Any [POD](https://en.wikipedia.org/wiki/Passive_data_structure) - see [std::is_pod](https://en.cppreference.com/w/cpp/types/is_pod) as a reference for C++11 POD types. 2. `raft::resources` - since it stores resource-related state which has nothing to do with model/algo state. 3. Avoid using pointers to POD types (explicitly putting it out, even though it can be considered as a POD) and pass the structures by reference instead. Internal to the C++ API, these stateless functions are free to use their own temporary classes, as long as they are not exposed on the interface. 4. Accept single- (`raft::span`) and multi-dimensional views (`raft::mdspan`) and validate their metadata wherever possible. 5. Prefer `std::optional` for any optional arguments (e.g. do not accept `nullptr`) 6. All public APIs should be lightweight wrappers around calls to private APIs inside the `detail` namespace. ### API stability Since RAFT is a core library with multiple consumers, it's important that the public APIs maintain stability across versions and any changes to them are done with caution, adding new functions and deprecating the old functions over a couple releases as necessary. ### Stateless C++ APIs Using the IVF-PQ algorithm as an example, the following way of exposing its API would be wrong according to the guidelines in this section, since it exposes a non-POD C++ class object in the C++ API: ```cpp template <typename value_t, typename idx_t> class ivf_pq { ivf_pq_params params_; raft::resources const& res_; public: ivf_pq(raft::resources const& res); void train(raft::device_matrix<value_t, idx_t, raft::row_major> dataset); void search(raft::device_matrix<value_t, idx_t, raft::row_major> queries, raft::device_matrix<value_t, idx_t, raft::row_major> out_inds, raft::device_matrix<value_t, idx_t, raft::row_major> out_dists); }; ``` An alternative correct way to expose this could be: ```cpp namespace raft::ivf_pq { template<typename value_t, typename value_idx> void ivf_pq_train(raft::resources const& res, const raft::ivf_pq_params &params, raft::ivf_pq_index &index, raft::device_matrix<value_t, idx_t, raft::row_major> dataset); template<typename value_t, typename value_idx> void ivf_pq_search(raft::resources const& res, raft::ivf_pq_params const&params, raft::ivf_pq_index const & index, raft::device_matrix<value_t, idx_t, raft::row_major> queries, raft::device_matrix<value_t, idx_t, raft::row_major> out_inds, raft::device_matrix<value_t, idx_t, raft::row_major> out_dists); } ``` ### Other functions on state These guidelines also mean that it is the responsibility of C++ API to expose methods to load and store (aka marshalling) such a data structure. Further continuing the IVF-PQ example, the following methods could achieve this: ```cpp namespace raft::ivf_pq { void save(raft::ivf_pq_index const& model, std::ostream &os); void load(raft::ivf_pq_index& model, std::istream &is); } ``` ## Coding style ### Code Formatting #### Using pre-commit hooks RAFT uses [pre-commit](https://pre-commit.com/) to execute all code linters and formatters. These tools ensure a consistent code format throughout the project. Using pre-commit ensures that linter versions and options are aligned for all developers. Additionally, there is a CI check in place to enforce that committed code follows our standards. To use `pre-commit`, install via `conda` or `pip`: ```bash conda install -c conda-forge pre-commit ``` ```bash pip install pre-commit ``` Then run pre-commit hooks before committing code: ```bash pre-commit run ``` By default, pre-commit runs on staged files (only changes and additions that will be committed). To run pre-commit checks on all files, execute: ```bash pre-commit run --all-files ``` Optionally, you may set up the pre-commit hooks to run automatically when you make a git commit. This can be done by running: ```bash pre-commit install ``` Now code linters and formatters will be run each time you commit changes. You can skip these checks with `git commit --no-verify` or with the short version `git commit -n`. #### Summary of pre-commit hooks The following section describes some of the core pre-commit hooks used by the repository. See `.pre-commit-config.yaml` for a full list. C++/CUDA is formatted with [`clang-format`](https://clang.llvm.org/docs/ClangFormat.html). RAFT relies on `clang-format` to enforce code style across all C++ and CUDA source code. The coding style is based on the [Google style guide](https://google.github.io/styleguide/cppguide.html#Formatting). The only digressions from this style are the following. 1. Do not split empty functions/records/namespaces. 2. Two-space indentation everywhere, including the line continuations. 3. Disable reflowing of comments. The reasons behind these deviations from the Google style guide are given in comments [here](https://github.com/rapidsai/raft/blob/branch-24.02/cpp/.clang-format). [`doxygen`](https://doxygen.nl/) is used as documentation generator and also as a documentation linter. In order to run doxygen as a linter on C++/CUDA code, run ```bash ./ci/checks/doxygen.sh ``` Python code runs several linters including [Black](https://black.readthedocs.io/en/stable/), [isort](https://pycqa.github.io/isort/), and [flake8](https://flake8.pycqa.org/en/latest/). RAFT also uses [codespell](https://github.com/codespell-project/codespell) to find spelling mistakes, and this check is run as a pre-commit hook. To apply the suggested spelling fixes, you can run `codespell -i 3 -w .` from the repository root directory. This will bring up an interactive prompt to select which spelling fixes to apply. ### #include style [include_checker.py](https://github.com/rapidsai/raft/blob/branch-24.02/cpp/scripts/include_checker.py) is used to enforce the include style as follows: 1. `#include "..."` should be used for referencing local files only. It is acceptable to be used for referencing files in a sub-folder/parent-folder of the same algorithm, but should never be used to include files in other algorithms or between algorithms and the primitives or other dependencies. 2. `#include <...>` should be used for referencing everything else Manually, run the following to bulk-fix include style issues: ```bash python ./cpp/scripts/include_checker.py --inplace [cpp/include cpp/test ... list of folders which you want to fix] ``` ### Copyright header [copyright.py](https://github.com/rapidsai/raft/blob/branch-24.02/ci/checks/copyright.py) checks the Copyright header for all git-modified files Manually, you can run the following to bulk-fix the header if only the years need to be updated: ```bash python ./ci/checks/copyright.py --update-current-year ``` Keep in mind that this only applies to files tracked by git and having been modified. ## Error handling Call CUDA APIs via the provided helper macros `RAFT_CUDA_TRY`, `RAFT_CUBLAS_TRY` and `RAFT_CUSOLVER_TRY`. These macros take care of checking the return values of the used API calls and generate an exception when the command is not successful. If you need to avoid an exception, e.g. inside a destructor, use `RAFT_CUDA_TRY_NO_THROW`, `RAFT_CUBLAS_TRY_NO_THROW ` and `RAFT_CUSOLVER_TRY_NO_THROW`. These macros log the error but do not throw an exception. ## Logging ### Introduction Anything and everything about logging is defined inside [logger.hpp](https://github.com/rapidsai/raft/blob/branch-24.02/cpp/include/raft/core/logger.hpp). It uses [spdlog](https://github.com/gabime/spdlog) underneath, but this information is transparent to all. ### Usage ```cpp #include <raft/core/logger.hpp> // Inside your method or function, use any of these macros RAFT_LOG_TRACE("Hello %s!", "world"); RAFT_LOG_DEBUG("Hello %s!", "world"); RAFT_LOG_INFO("Hello %s!", "world"); RAFT_LOG_WARN("Hello %s!", "world"); RAFT_LOG_ERROR("Hello %s!", "world"); RAFT_LOG_CRITICAL("Hello %s!", "world"); ``` ### Changing logging level There are 7 logging levels with each successive level becoming quieter: 1. RAFT_LEVEL_TRACE 2. RAFT_LEVEL_DEBUG 3. RAFT_LEVEL_INFO 4. RAFT_LEVEL_WARN 5. RAFT_LEVEL_ERROR 6. RAFT_LEVEL_CRITICAL 7. RAFT_LEVEL_OFF Pass one of these as per your needs into the `set_level()` method as follows: ```cpp raft::logger::get().set_level(RAFT_LEVEL_WARN); // From now onwards, this will print only WARN and above kind of messages ``` ### Changing logging pattern Pass the [format string](https://github.com/gabime/spdlog/wiki/3.-Custom-formatting) as follows in order use a different logging pattern than the default. ```cpp raft::logger::get.set_pattern(YourFavoriteFormat); ``` One can also use the corresponding `get_pattern()` method to know the current format as well. ### Temporarily changing the logging pattern Sometimes, we need to temporarily change the log pattern (eg: for reporting decision tree structure). This can be achieved in a RAII-like approach as follows: ```cpp { PatternSetter _(MyNewTempFormat); // new log format is in effect from here onwards doStuff(); // once the above temporary object goes out-of-scope, the old format will be restored } ``` ### Tips * Do NOT end your logging messages with a newline! It is automatically added by spdlog. * The `RAFT_LOG_TRACE()` is by default not compiled due to the `RAFT_ACTIVE_LEVEL` macro setup, for performance reasons. If you need it to be enabled, change this macro accordingly during compilation time ## Common Design Considerations 1. Use the `hpp` extension for files which can be compiled with `gcc` against the CUDA-runtime. Use the `cuh` extension for files which require `nvcc` to be compiled. `hpp` can also be used for functions marked `__host__ __device__` only if proper checks are in place to remove the `__device__` designation when not compiling with `nvcc`. 2. When additional classes, structs, or general POCO types are needed to be used for representing data in the public API, place them in a new file called `<primitive_name>_types.hpp`. This tells users they are safe to expose these types on their own public APIs without bringing in device code. At a minimum, the definitions for these types, at least, should not require `nvcc`. In general, these classes should only store very simple state and should not perform their own computations. Instead, new functions should be exposed on the public API which accept these objects, reading or updating their state as necessary. 3. Documentation for public APIs should be well documented, easy to use, and it is highly preferred that they include usage instructions. 4. Before creating a new primitive, check to see if one exists already. If one exists but the API isn't flexible enough to include your use-case, consider first refactoring the existing primitive. If that is not possible without an extreme number of changes, consider how the public API could be made more flexible. If the new primitive is different enough from all existing primitives, consider whether an existing public API could invoke the new primitive as an option or argument. If the new primitive is different enough from what exists already, add a header for the new public API function to the appropriate subdirectory and namespace. ## Header organization of expensive function templates RAFT is a heavily templated library. Several core functions are expensive to compile and we want to prevent duplicate compilation of this functionality. To limit build time, RAFT provides a precompiled library (libraft.so) where expensive function templates are instantiated for the most commonly used template parameters. To prevent (1) accidental instantiation of these templates and (2) unnecessary dependency on the internals of these templates, we use a split header structure and define macros to control template instantiation. This section describes the macros and header structure. **Macros.** We define the macros `RAFT_COMPILED` and `RAFT_EXPLICIT_INSTANTIATE_ONLY`. The `RAFT_COMPILED` macro is defined by `CMake` when compiling code that (1) is part of `libraft.so` or (2) is linked with `libraft.so`. It indicates that a precompiled `libraft.so` is present at runtime. The `RAFT_EXPLICIT_INSTANTIATE_ONLY` macro is defined by `CMake` during compilation of `libraft.so` itself. When defined, it indicates that implicit instantiations of expensive function templates are forbidden (they result in a compiler error). In the RAFT project, we additionally define this macro during compilation of the tests and benchmarks. Below, we summarize which combinations of `RAFT_COMPILED` and `RAFT_EXPLICIT_INSTANTIATE_ONLY` are used in practice and what the effect of the combination is. | RAFT_COMPILED | RAFT_EXPLICIT_INSTANTIATE_ONLY | Which targets | |---------------|--------------------------------|------------------------------------------------------------------------------------------------------| | defined | defined | `raft::compiled`, RAFT tests, RAFT benchmarks | | defined | | Downstream libraries depending on `libraft` like cuML, cuGraph. | | | | Downstream libraries depending on `libraft-headers` like cugraph-ops. | | RAFT_COMPILED | RAFT_EXPLICIT_INSTANTIATE_ONLY | Effect | |---------------|--------------------------------|-------------------------------------------------------------------------------------------------------| | defined | defined | Templates are precompiled. Compiler error on accidental instantiation of expensive function template. | | defined | | Templates are precompiled. Implicit instantiation allowed. | | | | Nothing precompiled. Implicit instantiation allowed. | | | defined | Avoid this: nothing precompiled. Compiler error on any instantiation of expensive function template. | **Header organization.** Any header file that defines an expensive function template (say `expensive.cuh`) should be split in three parts: `expensive.cuh`, `expensive-inl.cuh`, and `expensive-ext.cuh`. The file `expensive-inl.cuh` ("inl" for "inline") contains the template definitions, i.e., the actual code. The file `expensive.cuh` includes one or both of the other two files, depending on the values of the `RAFT_COMPILED` and `RAFT_EXPLICIT_INSTANTIATE_ONLY` macros. The file `expensive-ext.cuh` contains `extern template` instantiations. In addition, if `RAFT_EXPLICIT_INSTANTIATE_ONLY` is set, it contains template definitions to ensure that a compiler error is raised in case of accidental instantiation. The dispatching by `expensive.cuh` is performed as follows: ``` c++ #ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY // If implicit instantiation is allowed, include template definitions. #include "expensive-inl.cuh" #endif #ifdef RAFT_COMPILED // Include extern template instantiations when RAFT is compiled. #include "expensive-ext.cuh" #endif ``` The file `expensive-inl.cuh` is unchanged: ``` c++ namespace raft { template <typename T> void expensive(T arg) { // .. function body } } // namespace raft ``` The file `expensive-ext.cuh` contains the following: ``` c++ #include <raft/util/raft_explicit.cuh> // RAFT_EXPLICIT #ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY namespace raft { // (1) define templates to raise an error in case of accidental instantiation template <typename T> void expensive(T arg) RAFT_EXPLICIT; } // namespace raft #endif //RAFT_EXPLICIT_INSTANTIATE_ONLY // (2) Provide extern template instantiations. extern template void raft::expensive<int>(int); extern template void raft::expensive<float>(float); ``` This header has two responsibilities: (1) define templates to raise an error in case of accidental instantiation and (2) provide `extern template` instantiations. First, if `RAFT_EXPLICIT_INSTANTIATE_ONLY` is set, `expensive` is defined. This is done for two reasons: (1) to give a definition, because the definition in `expensive-inl.cuh` was skipped and (2) to indicate that the template should be explicitly instantiated by taging it with the `RAFT_EXPLICIT` macro. This macro defines the function body, and it ensures that an informative error message is generated when an implicit instantiation erroneously occurs. Finally, the `extern template` instantiations are listed. To actually generate the code for the template instances, the file `src/expensive.cu` contains the following. Note that the only difference between the extern template instantiations in `expensive-ext.cuh` and these lines are the removal of the word `extern`: ``` c++ #include <raft/expensive-inl.cuh> template void raft::expensive<int>(int); template void raft::expensive<float>(float); ``` **Design considerations**: 1. In the `-ext.cuh` header, do not include implementation headers. Only include function parameter types and types that are used to instantiate the templates. If a primitive takes custom parameter types, define them in a separate header called `<primitive_name>_types.hpp`. (see [Common Design Considerations](https://github.com/rapidsai/raft/blob/7b065aff81a0b1976e2a9e2f3de6690361a1111b/docs/source/developer_guide.md#common-design-considerations)). 2. Keep docstrings in the `-inl.cuh` header, as it is closer to the code. Remove docstrings from template definitions in the `-ext.cuh` header. Make sure to explicitly include public APIs in the RAFT API docs. That is, add `#include <raft/expensive.cuh>` to the docs in `docs/source/cpp_api/expensive.rst` (instead of `#include <raft/expensive-inl.cuh>`). 3. The order of inclusion in `expensive.cuh` is extremely important. If `RAFT_EXPLICIT_INSTANTIATE_ONLY` is not defined, but `RAFT_COMPILED` is defined, then we must include the template definitions before the `extern template` instantiations. 4. If a header file defines multiple expensive templates, it can be that one of them is not instantiated. In this case, **do define** the template with `RAFT_EXPLICIT` in the `-ext` header. This way, when the template is instantiated, the developer gets a helpful error message instead of a confusing "function not found". This header structure was proposed in [issue #1416](https://github.com/rapidsai/raft/issues/1416), which contains more background on the motivation of this structure and the mechanics of C++ template instantiation. ## Testing It's important for RAFT to maintain a high test coverage of the public APIs in order to minimize the potential for downstream projects to encounter unexpected build or runtime behavior as a result of changes. A well-defined public API can help maintain compile-time stability but means more focus should be placed on testing the functional requirements and verifying execution on the various edge cases within RAFT itself. Ideally, bug fixes and new features should be able to be made to RAFT independently of the consuming projects. ## Documentation Public APIs always require documentation since those will be exposed directly to users. For C++, we use [doxygen](http://www.doxygen.nl) and for Python/cython we use [pydoc](https://docs.python.org/3/library/pydoc.html). In addition to summarizing the purpose of each class / function in the public API, the arguments (and relevant templates) should be documented along with brief usage examples. ## Asynchronous operations and stream ordering All RAFT algorithms should be as asynchronous as possible avoiding the use of the default stream (aka as NULL or `0` stream). Implementations that require only one CUDA Stream should use the stream from `raft::resources`: ```cpp #include <raft/core/resources.hpp> #include <raft/core/resource/cuda_stream.hpp> void foo(const raft::resources& res, ...) { cudaStream_t stream = get_cuda_stream(res); } ``` When multiple streams are needed, e.g. to manage a pipeline, use the internal streams available in `raft::resources` (see [CUDA Resources](#cuda-resources)). If multiple streams are used all operations still must be ordered according to `raft::resource::get_cuda_stream()` (from `raft/core/resource/cuda_stream.hpp`). Before any operation in any of the internal CUDA streams is started, all previous work in `raft::resource::get_cuda_stream()` must have completed. Any work enqueued in `raft::resource::get_cuda_stream()` after a RAFT function returns should not start before all work enqueued in the internal streams has completed. E.g. if a RAFT algorithm is called like this: ```cpp #include <raft/core/resources.hpp> #include <raft/core/resource/cuda_stream.hpp> void foo(const double* srcdata, double* result) { cudaStream_t stream; CUDA_RT_CALL( cudaStreamCreate( &stream ) ); raft::resources res; set_cuda_stream(res, stream); ... RAFT_CUDA_TRY( cudaMemcpyAsync( srcdata, h_srcdata.data(), n*sizeof(double), cudaMemcpyHostToDevice, stream ) ); raft::algo(raft::resources, dopredict, srcdata, result, ... ); RAFT_CUDA_TRY( cudaMemcpyAsync( h_result.data(), result, m*sizeof(int), cudaMemcpyDeviceToHost, stream ) ); ... } ``` No work in any stream should start in `raft::algo` before the `cudaMemcpyAsync` in `stream` launched before the call to `raft::algo` is done. And all work in all streams used in `raft::algo` should be done before the `cudaMemcpyAsync` in `stream` launched after the call to `raft::algo` starts. This can be ensured by introducing interstream dependencies with CUDA events and `cudaStreamWaitEvent`. For convenience, the header `raft/core/device_resources.hpp` provides the class `raft::stream_syncer` which lets all `raft::resources` internal CUDA streams wait on `raft::resource::get_cuda_stream()` in its constructor and in its destructor and lets `raft::resource::get_cuda_stream()` wait on all work enqueued in the `raft::resources` internal CUDA streams. The intended use would be to create a `raft::stream_syncer` object as the first thing in an entry function of the public RAFT API: ```cpp namespace raft { void algo(const raft::resources& res, ...) { raft::streamSyncer _(res); } } ``` This ensures the stream ordering behavior described above. ### Using Thrust To ensure that thrust algorithms are executed in the intended stream the `thrust::cuda::par` execution policy should be used. To ensure that thrust algorithms allocate temporary memory via the provided device memory allocator, use the `rmm::exec_policy` available in `raft/core/resource/thrust_policy.hpp`, which can be used through `raft::resources`: ```cpp #include <raft/core/resources.hpp> #include <raft/core/resource/thrust_policy.hpp> void foo(const raft::resources& res, ...) { auto execution_policy = get_thrust_policy(res); thrust::for_each(execution_policy, ... ); } ``` ## Resource Management Do not create reusable CUDA resources directly in implementations of RAFT algorithms. Instead, use the existing resources in `raft::resources` to avoid constant creation and deletion of reusable resources such as CUDA streams, CUDA events or library handles. Please file a feature request if a resource handle is missing in `raft::resources`. The resources can be obtained like this ```cpp #include <raft/core/resources.hpp> #include <raft/core/resource/cublas_handle.hpp> #include <raft/core/resource/cuda_stream_pool.hpp> void foo(const raft::resources& h, ...) { cublasHandle_t cublasHandle = get_cublas_handle(h); const int num_streams = get_stream_pool_size(h); const int stream_idx = ... cudaStream_t stream = get_stream_from_stream_pool(stream_idx); ... } ``` The example below shows one way to create `n_stream` number of internal cuda streams with an `rmm::stream_pool` which can later be used by the algos inside RAFT. ```cpp #include <raft/core/resources.hpp> #include <raft/core/resource/cuda_stream_pool.hpp> #include <rmm/cuda_stream_pool.hpp> int main(int argc, char** argv) { int n_streams = argc > 1 ? atoi(argv[1]) : 0; raft::resources res; set_cuda_stream_pool(res, std::make_shared<rmm::cuda_stream_pool>(n_streams)); foo(res, ...); } ``` ## Multi-GPU The multi-GPU paradigm of RAFT is **O**ne **P**rocess per **G**PU (OPG). Each algorithm should be implemented in a way that it can run with a single GPU without any specific dependencies to a particular communication library. A multi-GPU implementation should use the methods offered by the class `raft::comms::comms_t` from [raft/core/comms.hpp] for inter-rank/GPU communication. It is the responsibility of the user of cuML to create an initialized instance of `raft::comms::comms_t`. E.g. with a CUDA-aware MPI, a RAFT user could use code like this to inject an initialized instance of `raft::comms::mpi_comms` into a `raft::resources`: ```cpp #include <mpi.h> #include <raft/core/resources.hpp> #include <raft/comms/mpi_comms.hpp> #include <raft/algo.hpp> ... int main(int argc, char * argv[]) { MPI_Init(&argc, &argv); int rank = -1; MPI_Comm_rank(MPI_COMM_WORLD, &rank); int local_rank = -1; { MPI_Comm local_comm; MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, rank, MPI_INFO_NULL, &local_comm); MPI_Comm_rank(local_comm, &local_rank); MPI_Comm_free(&local_comm); } cudaSetDevice(local_rank); mpi_comms raft_mpi_comms; MPI_Comm_dup(MPI_COMM_WORLD, &raft_mpi_comms); { raft::resources res; initialize_mpi_comms(res, raft_mpi_comms); ... raft::algo(res, ... ); } MPI_Comm_free(&raft_mpi_comms); MPI_Finalize(); return 0; } ``` A RAFT developer can assume the following: * A instance of `raft::comms::comms_t` was correctly initialized. * All processes that are part of `raft::comms::comms_t` call into the RAFT algorithm cooperatively. The initialized instance of `raft::comms::comms_t` can be accessed from the `raft::resources` instance: ```cpp #include <raft/core/resources.hpp> #include <raft/core/resource/comms.hpp> void foo(const raft::resources& res, ...) { const raft::comms_t& communicator = get_comms(res); const int rank = communicator.get_rank(); const int size = communicator.get_size(); ... } ```
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/_static/references.css
/* Fix references to not look like parameters */ dl.citation > dt.label { display: unset !important; float: left !important; border: unset !important; background: unset !important; padding: unset !important; margin: unset !important; font-size: unset !important; line-height: unset !important; padding-right: 0.5rem !important; } /* Add opening bracket */ dl.citation > dt.label > span::before { content: "["; } /* Add closing bracket */ dl.citation > dt.label > span::after { content: "]"; }
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/pylibraft_api/distance.rst
Distance ======== This page provides `pylibraft` class references for the publicly-exposed elements of the `pylibraft.distance` package. RAFT's distances have been highly optimized and support a wide assortment of different distance measures. .. role:: py(code) :language: python :class: highlight .. autofunction:: pylibraft.distance.pairwise_distance .. autofunction:: pylibraft.distance.fused_l2_nn_argmin
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/pylibraft_api/neighbors.rst
Neighbors ========= This page provides pylibraft class references for the publicly-exposed elements of the neighbors package. .. role:: py(code) :language: python :class: highlight Brute Force ########### .. autofunction:: pylibraft.neighbors.brute_force.knn CAGRA ##### .. autoclass:: pylibraft.neighbors.cagra.IndexParams :members: .. autofunction:: pylibraft.neighbors.cagra.build .. autoclass:: pylibraft.neighbors.cagra.SearchParams :members: .. autofunction:: pylibraft.neighbors.cagra.search Serializer Methods ------------------ .. autofunction:: pylibraft.neighbors.cagra.save .. autofunction:: pylibraft.neighbors.cagra.load IVF-Flat ######## .. autoclass:: pylibraft.neighbors.ivf_flat.IndexParams :members: .. autofunction:: pylibraft.neighbors.ivf_flat.build .. autofunction:: pylibraft.neighbors.ivf_flat.extend .. autoclass:: pylibraft.neighbors.ivf_flat.SearchParams :members: .. autofunction:: pylibraft.neighbors.ivf_flat.search Serializer Methods ------------------ .. autofunction:: pylibraft.neighbors.ivf_flat.save .. autofunction:: pylibraft.neighbors.ivf_flat.load IVF-PQ ###### .. autoclass:: pylibraft.neighbors.ivf_pq.IndexParams :members: .. autofunction:: pylibraft.neighbors.ivf_pq.build .. autofunction:: pylibraft.neighbors.ivf_pq.extend .. autoclass:: pylibraft.neighbors.ivf_pq.SearchParams :members: .. autofunction:: pylibraft.neighbors.ivf_pq.search Serializer Methods ------------------ .. autofunction:: pylibraft.neighbors.ivf_pq.save .. autofunction:: pylibraft.neighbors.ivf_pq.load Candidate Refinement -------------------- .. autofunction:: pylibraft.neighbors.refine
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/pylibraft_api/matrix.rst
Matrix ====== This page provides `pylibraft` class references for the publicly-exposed elements of the `pylibraft.matrix` package. .. role:: py(code) :language: python :class: highlight .. autofunction:: pylibraft.matrix.select_k
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/pylibraft_api/cluster.rst
Cluster ======= This page provides pylibraft class references for the publicly-exposed elements of the `pylibraft.cluster` package. .. role:: py(code) :language: python :class: highlight KMeans ###### .. autoclass:: pylibraft.cluster.kmeans.KMeansParams :members: .. autofunction:: pylibraft.cluster.kmeans.fit .. autofunction:: pylibraft.cluster.kmeans.cluster_cost .. autofunction:: pylibraft.cluster.kmeans.compute_new_centroids
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/pylibraft_api/common.rst
Common ====== This page provides `pylibraft` class references for the publicly-exposed elements of the `pylibraft.common` package. .. role:: py(code) :language: python :class: highlight Basic Vocabulary ################ .. autoclass:: pylibraft.common.DeviceResources :members: .. autoclass:: pylibraft.common.Stream :members: .. autoclass:: pylibraft.common.device_ndarray :members: Interruptible ############# .. autofunction:: pylibraft.common.interruptible.cuda_interruptible .. autofunction:: pylibraft.common.interruptible.synchronize .. autofunction:: pylibraft.common.interruptible.cuda_yield CUDA Array Interface Helpers ############################ .. autoclass:: pylibraft.common.cai_wrapper :members:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/pylibraft_api/random.rst
Random ====== This page provides pylibraft class references for the publicly-exposed elements of the `pylibraft.random` package. .. role:: py(code) :language: python :class: highlight .. autofunction:: pylibraft.random.rmat
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/sphinxext/github_link.py
# This contains code with copyright by the scikit-learn project, subject to the # license in /thirdparty/LICENSES/LICENSE.scikit_learn import inspect import os import re import subprocess import sys from functools import partial from operator import attrgetter orig = inspect.isfunction # See https://opendreamkit.org/2017/06/09/CythonSphinx/ def isfunction(obj): orig_val = orig(obj) new_val = hasattr(type(obj), "__code__") if (orig_val != new_val): return new_val return orig_val inspect.isfunction = isfunction REVISION_CMD = 'git rev-parse --short HEAD' source_regex = re.compile(r"^File: (.*?) \(starting at line ([0-9]*?)\)$", re.MULTILINE) def _get_git_revision(): try: revision = subprocess.check_output(REVISION_CMD.split()).strip() except (subprocess.CalledProcessError, OSError): print('Failed to execute git to get revision') return None return revision.decode('utf-8') def _linkcode_resolve(domain, info, package, url_fmt, revision): """Determine a link to online source for a class/method/function This is called by sphinx.ext.linkcode An example with a long-untouched module that everyone has >>> _linkcode_resolve('py', {'module': 'tty', ... 'fullname': 'setraw'}, ... package='tty', ... url_fmt='http://hg.python.org/cpython/file/' ... '{revision}/Lib/{package}/{path}#L{lineno}', ... revision='xxxx') 'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18' """ if revision is None: return if domain not in ('py', 'pyx'): return if not info.get('module') or not info.get('fullname'): return class_name = info['fullname'].split('.')[0] module = __import__(info['module'], fromlist=[class_name]) obj = attrgetter(info['fullname'])(module) # Unwrap the object to get the correct source # file in case that is wrapped by a decorator obj = inspect.unwrap(obj) fn: str = None lineno: str = None try: fn = inspect.getsourcefile(obj) except Exception: fn = None if not fn: try: fn = inspect.getsourcefile(sys.modules[obj.__module__]) except Exception: fn = None if not fn: # Possibly Cython code. Search docstring for source m = source_regex.search(obj.__doc__) if (m is not None): source_file = m.group(1) lineno = m.group(2) # fn is expected to be the absolute path. fn = os.path.relpath(source_file, start=package) print("{}:{}".format( os.path.abspath(os.path.join("..", "python", "cuml", fn)), lineno)) else: return else: # Test if we are absolute or not (pyx are relative) if (not os.path.isabs(fn)): # Should be relative to docs right now fn = os.path.abspath(os.path.join("..", "python", fn)) # Convert to relative from module root fn = os.path.relpath(fn, start=os.path.dirname( __import__(package).__file__)) # Get the line number if we need it. (Can work without it) if (lineno is None): try: lineno = inspect.getsourcelines(obj)[1] except Exception: # Can happen if its a cyfunction. See if it has `__code__` if (hasattr(obj, "__code__")): lineno = obj.__code__.co_firstlineno else: lineno = '' return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno) def make_linkcode_resolve(package, url_fmt): """Returns a linkcode_resolve function for the given URL format revision is a git commit reference (hash or name) package is the name of the root module of the package url_fmt is along the lines of ('https://github.com/USER/PROJECT/' 'blob/{revision}/{package}/' '{path}#L{lineno}') """ revision = _get_git_revision() return partial(_linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt)
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/cluster_kmeans.rst
K-Means ======= .. role:: py(code) :language: c++ :class: highlight ``#include <raft/cluster/kmeans.cuh>`` .. doxygennamespace:: raft::cluster::kmeans :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/random_sampling_multivariable.rst
Multi-Variable Random Sampling ============================== .. role:: py(code) :language: c++ :class: highlight Multi-Variable Gaussian ----------------------- ``#include <raft/random/multi_variable_gaussian.hpp>`` namespace *raft::random* .. doxygengroup:: multi_variable_gaussian :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/neighbors_epsilon_neighborhood.rst
Epsilon Neighborhood ==================== .. role:: py(code) :language: c++ :class: highlight ``#include <raft/neighbors/epsilon_neighborhood.cuh>`` namespace *raft::neighbors::epsilon_neighborhood* .. doxygengroup:: epsilon_neighbors :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/distance.rst
Distance ======== This page provides C++ class references for the publicly-exposed elements of the `raft/distance` package. RAFT's distances have been highly optimized and support a wide assortment of different distance measures. .. role:: py(code) :language: c++ :class: highlight Distance Types -------------- ``#include <raft/distance/distance_types.hpp>`` namespace *raft::distance* .. doxygenenum:: raft::distance::DistanceType :project: RAFT .. toctree:: :maxdepth: 2 :caption: Contents: distance_pairwise.rst distance_1nn.rst
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/mdspan_mdspan.rst
mdspan: Multi-dimensional Non-owning View ========================================== .. role:: py(code) :language: c++ :class: highlight ``#include <raft/core/mdspan.hpp>`` .. doxygentypedef:: raft::mdspan :project: RAFT .. doxygenfunction:: raft::make_mdspan :project: RAFT .. doxygenfunction:: raft::make_extents :project: RAFT .. doxygenfunction:: raft::make_strided_layout(Extents extents, Strides strides) :project: RAFT .. doxygengroup:: mdspan_unravel :project: RAFT :members: :content-only: .. doxygengroup:: mdspan_contiguous :project: RAFT :members: :content-only: .. doxygengroup:: mdspan_make_const :project: RAFT :members: :content-only: Device Vocabulary ----------------- ``#include <raft/core/device_mdspan.hpp>`` .. doxygentypedef:: raft::device_mdspan :project: RAFT .. doxygenstruct:: raft::is_device_mdspan :project: RAFT .. doxygentypedef:: raft::is_device_mdspan_t :project: RAFT .. doxygentypedef:: raft::is_input_device_mdspan_t :project: RAFT .. doxygentypedef:: raft::is_output_device_mdspan_t :project: RAFT .. doxygentypedef:: raft::enable_if_device_mdspan :project: RAFT .. doxygentypedef:: raft::enable_if_input_device_mdspan :project: RAFT .. doxygentypedef:: raft::enable_if_output_device_mdspan :project: RAFT .. doxygentypedef:: raft::device_matrix_view :project: RAFT .. doxygentypedef:: raft::device_vector_view :project: RAFT .. doxygentypedef:: raft::device_scalar_view :project: RAFT Device Factories ---------------- ``#include <raft/core/device_mdspan.hpp>`` .. doxygenfunction:: raft::make_device_matrix_view :project: RAFT .. doxygenfunction:: raft::make_device_vector_view(ElementType* ptr, IndexType n) :project: RAFT .. doxygenfunction:: raft::make_device_scalar_view :project: RAFT Managed Vocabulary ------------------ ``#include <raft/core/device_mdspan.hpp>`` ..doxygentypedef:: raft::managed_mdspan :project: RAFT .. doxygenstruct:: raft::is_managed_mdspan :project: RAFT .. doxygentypedef:: raft::is_managed_mdspan_t :project: RAFT .. doxygentypedef:: raft::is_input_managed_mdspan_t :project: RAFT .. doxygentypedef:: raft::is_output_managed_mdspan_t :project: RAFT .. doxygentypedef:: raft::enable_if_managed_mdspan :project: RAFT .. doxygentypedef:: raft::enable_if_input_managed_mdspan :project: RAFT .. doxygentypedef:: raft::enable_if_output_managed_mdspan :project: RAFT Managed Factories ----------------- ``#include <raft/core/device_mdspan.hpp>`` .. doxygenfunction:: make_managed_mdspan(ElementType* ptr, extents<IndexType, Extents...> exts) :project: RAFT Host Vocabulary --------------- ``#include <raft/core/host_mdspan.hpp>`` .. doxygentypedef:: raft::host_mdspan :project: RAFT .. doxygenstruct:: raft::is_host_mdspan :project: RAFT .. doxygentypedef:: raft::is_host_mdspan_t :project: RAFT .. doxygentypedef:: raft::is_input_host_mdspan_t :project: RAFT .. doxygentypedef:: raft::is_output_host_mdspan_t :project: RAFT .. doxygentypedef:: raft::enable_if_host_mdspan :project: RAFT .. doxygentypedef:: raft::enable_if_input_host_mdspan :project: RAFT .. doxygentypedef:: raft::enable_if_output_host_mdspan :project: RAFT .. doxygentypedef:: raft::host_matrix_view :project: RAFT .. doxygentypedef:: raft::host_vector_view :project: RAFT .. doxygentypedef:: raft::host_scalar_view :project: RAFT Host Factories -------------- ``#include <raft/core/host_mdspan.hpp>`` .. doxygenfunction:: raft::make_host_matrix_view :project: RAFT .. doxygenfunction:: raft::make_host_vector_view :project: RAFT .. doxygenfunction:: raft::make_device_scalar_view :project: RAFT Validation Routines ------------------- ``#include <raft/core/mdspan.hpp>`` .. doxygenstruct:: raft::is_mdspan :project: RAFT :members: .. doxygentypedef:: raft::is_mdspan_t :project: RAFT .. doxygenstruct:: raft::is_input_mdspan :project: RAFT :members: .. doxygentypedef:: raft::is_input_mdspan_t :project: RAFT .. doxygenstruct:: raft::is_output_mdspan :project: RAFT :members: .. doxygentypedef:: raft::is_output_mdspan_t :project: RAFT .. doxygentypedef:: raft::enable_if_mdspan :project: RAFT .. doxygentypedef:: raft::enable_if_input_mdspan :project: RAFT .. doxygentypedef:: raft::enable_if_output_mdspan :project: RAFT
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/neighbors.rst
Neighbors ========= This page provides C++ class references for the publicly-exposed elements of the neighbors package. .. role:: py(code) :language: c++ :class: highlight .. toctree:: :maxdepth: 2 :caption: Contents: neighbors_brute_force.rst neighbors_ivf_flat.rst neighbors_ivf_pq.rst neighbors_epsilon_neighborhood.rst neighbors_ball_cover.rst neighbors_cagra.rst
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/neighbors_ivf_pq.rst
IVF-PQ ====== .. role:: py(code) :language: c++ :class: highlight ``#include <raft/neighbors/ivf_pq.cuh>`` namespace *raft::neighbors::ivf_pq* .. doxygengroup:: ivf_pq :project: RAFT :members: :content-only: Serializer Methods ------------------ ``#include <raft/neighbors/ivf_pq_serialize.cuh>`` namespace *raft::neighbors::ivf_pq* .. doxygengroup:: ivf_pq_serialize :project: RAFT :members: :content-only: Candidate Refinement -------------------- ``#include <raft/neighbors/refine.cuh>`` namespace *raft::neighbors* .. doxygengroup:: ann_refine :project: RAFT :members: :content-only: Helper Methods -------------- ``#include <raft/neighbors/ivf_pq_helpers.cuh>`` namespace *raft::neighbors::ivf_pq::helpers* .. doxygengroup:: ivf_pq_helpers :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/core_interruptible.rst
Interruptible ============= .. role:: py(code) :language: c++ :class: highlight ``#include <raft/core/interruptible.hpp>`` namespace *raft::core* .. doxygengroup:: interruptible :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/sparse_linalg.rst
Sparse Linear Algebra ===================== .. doxygennamespace:: raft::sparse::linalg :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/matrix_ordering.rst
Matrix Ordering =============== .. role:: py(code) :language: c++ :class: highlight Argmax ------ ``#include <raft/matrix/argmax.cuh>`` namespace *raft::matrix* .. doxygengroup:: argmax :project: RAFT :members: :content-only: Argmin ------ ``#include <raft/matrix/argmin.cuh>`` namespace *raft::matrix* .. doxygengroup:: argmin :project: RAFT :members: :content-only: Select-K -------- ``#include <raft/matrix/select_k.cuh>`` namespace *raft::matrix* .. doxygengroup:: select_k :project: RAFT :members: :content-only: Column-wise Sort ---------------- ``#include <raft/matrix/col_wise_sort.cuh>`` namespace *raft::matrix* .. doxygengroup:: col_wise_sort :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/stats_clustering.rst
Clustering Model Scoring ======================== .. role:: py(code) :language: c++ :class: highlight Adjusted Rand Index ------------------- ``#include <raft/stats/adjusted_rand_index.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_adj_rand_index :project: RAFT :members: :content-only: Completeness Score ------------------ ``#include <raft/stats/completeness_score.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_completeness :project: RAFT :members: :content-only: Cluster Dispersion ------------------ ``#include <raft/stats/dispersion.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_cluster_dispersion :project: RAFT :members: :content-only: Rand Index ---------- ``#include <raft/stats/rand_index.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_rand_index :project: RAFT :members: :content-only: Silhouette Score ---------------- ``#include <raft/stats/silhouette_score.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_silhouette_score :project: RAFT :members: :content-only: V Measure --------- ``#include <raft/stats/v_measure.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_vmeasure :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/random_sampling_without_replacement.rst
Sampling Without Replacement ============================ .. role:: py(code) :language: c++ :class: highlight ``#include <raft/random/sample_without_replacement.cuh>`` namespace *raft::random* .. doxygengroup:: sample_without_replacement :project: RAFT :members: :content-only: ``#include <raft/random/permute.cuh>`` namespace *raft::random* .. doxygengroup:: permute :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/linalg_matrix_vector.rst
Matrix-Vector Operations ======================== .. role:: py(code) :language: c++ :class: highlight Arithmetic ---------- ``#include <raft/linalg/matrix_vector.cuh>`` namespace *raft::linalg* .. doxygengroup:: matrix_vector :project: RAFT :members: :content-only: Operations ---------- ``#include <raft/linalg/matrix_vector_op.cuh>`` namespace *raft::linalg* .. doxygengroup:: matrix_vector_op :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/mnmg.rst
Multi-node Multi-GPU ==================== RAFT contains C++ infrastructure for abstracting the communications layer when writing applications that scale on multiple nodes and across multiple GPUs. This infrastructure assumes OPG (one-process per GPU) architectures where multiple physical parallel units (processes, ranks, or workers) might be executing code concurrently but where each parallel unit is communicating with only a single GPU and is the only process communicating with each GPU. The comms layer in RAFT is intended to provide a facade API for barrier synchronous collective communications, allowing users to write algorithms using a single abstraction layer and deploy in many different types of systems. Currently, RAFT communications code has been deployed in MPI, Dask, and Spark clusters. .. role:: py(code) :language: c++ :class: highlight Common Types ------------ ``#include <raft/core/comms.hpp>`` namespace *raft::comms* .. doxygengroup:: comms_types :project: RAFT :members: :content-only: Comms Interface --------------- .. doxygengroup:: comms_t :project: RAFT :members: :content-only: MPI Comms --------- .. doxygengroup:: mpi_comms_factory :project: RAFT :members: :content-only: NCCL+UCX Comms -------------- .. doxygengroup:: std_comms_factory :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/linalg_map_reduce.rst
Mapping and Reduction ===================== .. role:: py(code) :language: c++ :class: highlight Coalesced Reduction ------------------- ``#include <raft/linalg/coalesced_reduction.cuh>`` namespace *raft::linalg* .. doxygengroup:: coalesced_reduction :project: RAFT :members: :content-only: Map --- ``#include <raft/linalg/map.cuh>`` namespace *raft::linalg* .. doxygengroup:: map :project: RAFT :members: :content-only: Map Reduce ---------- ``#include <raft/linalg/map_reduce.cuh>`` namespace *raft::linalg* .. doxygengroup:: map_reduce :project: RAFT :members: :content-only: Mean Squared Error ------------------ ``#include <raft/linalg/mean_squared_error.cuh>`` namespace *raft::linalg* .. doxygengroup:: mean_squared_error :project: RAFT :members: :content-only: Norm ---- ``#include <raft/linalg/norm.cuh>`` namespace *raft::linalg* .. doxygengroup:: norm :project: RAFT :members: :content-only: Normalize --------- ``#include <raft/linalg/normalize.cuh>`` namespace *raft::linalg* .. doxygengroup:: normalize :project: RAFT :members: :content-only: Reduction --------- ``#include <raft/linalg/reduce.cuh>`` namespace *raft::linalg* .. doxygengroup:: reduction :project: RAFT :members: :content-only: Reduce Cols By Key ------------------ ``#include <raft/linalg/reduce_cols_by_key.cuh>`` namespace *raft::linalg* .. doxygengroup:: reduce_cols_by_key :project: RAFT :members: :content-only: Reduce Rows By Key ------------------ ``#include <raft/linalg/reduce_rows_by_key.cuh>`` namespace *raft::linalg* .. doxygengroup:: reduce_rows_by_key :project: RAFT :members: :content-only: Strided Reduction ----------------- ``#include <raft/linalg/strided_reduction.cuh>`` namespace *raft::linalg* .. doxygengroup:: strided_reduction :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/sparse_types_csr_matrix.rst
CSR Matrix ========== .. role:: py(code) :language: c++ :class: highlight Basic Vocabulary ---------------- ``#include <raft/core/csr_matrix.hpp>`` .. doxygengroup:: csr_matrix :project: RAFT :members: :content-only: Device CSR Matrix ----------------- ``#include <raft/core/device_csr_matrix.hpp>`` .. doxygengroup:: device_csr_matrix :project: RAFT :members: :content-only: Host CSR Matrix ----------------- ``#include <raft/core/host_csr_matrix.hpp>`` .. doxygengroup:: host_csr_matrix :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/mdspan_representation.rst
Multi-dimensional Representation ================================ .. role:: py(code) :language: c++ :class: highlight Data Layouts ------------- ``#include <raft/core/mdspan_types.hpp>`` .. doxygengroup:: mdspan_layout :project: RAFT :members: :content-only: Shapes ------ ``#include <raft/core/mdspan.hpp>`` .. doxygentypedef:: raft::matrix_extent :project: RAFT .. doxygentypedef:: raft::vector_extent :project: RAFT .. doxygentypedef:: raft::scalar_extent :project: RAFT .. doxygentypedef:: raft::extent_3d :project: RAFT .. doxygentypedef:: raft::extent_4d :project: RAFT .. doxygentypedef:: raft::extent_5d :project: RAFT .. doxygengroup:: mdspan_reshape :project: RAFT :members: :content-only: .. doxygengroup:: mdarray_reshape :project: RAFT :members: :content-only: Accessors --------- ``#include <raft/core/host_device_accessor.hpp>`` .. doxygenstruct:: raft::host_device_accessor :project: RAFT :members: .. doxygentypedef:: raft::host_accessor :project: RAFT .. doxygentypedef:: raft::device_accessor :project: RAFT .. doxygentypedef:: raft::managed_accessor :project: RAFT
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/sparse_types.rst
Sparse Types ============ .. role:: py(code) :language: c++ :class: highlight ``#include <raft/core/sparse_types.hpp>`` .. doxygengroup:: sparse_types :project: RAFT :members: :content-only: .. toctree:: :maxdepth: 2 :caption: Contents: sparse_types_coo_matrix.rst sparse_types_csr_matrix.rst
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/sparse_matrix.rst
Sparse Matrix Operations ======================== .. doxygennamespace:: raft::sparse::op :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/solver.rst
Solvers ======= This page provides C++ class references for the publicly-exposed elements of the iterative and combinatorial solvers package. .. role:: py(code) :language: c++ :class: highlight Linear Assignment Problem ######################### ``#include <raft/solver/linear_assignment.cuh>`` .. doxygenclass:: raft::solver::LinearAssignmentProblem :project: RAFT :members: Minimum Spanning Tree ##################### ``#include <raft/sparse/solver/mst.cuh>`` .. doxygenfunction:: raft::sparse::solver::mst :project: RAFT
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/neighbors_ball_cover.rst
Random Ball Cover ================= .. role:: py(code) :language: c++ :class: highlight ``#include <raft/neighbors/ball_cover.cuh>`` namespace *raft::neighbors::ball_cover* .. doxygengroup:: random_ball_cover :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/matrix.rst
Matrix ====== This page provides C++ class references for the publicly-exposed elements of the `raft/matrix` headers. The `raft/matrix` headers cover many operations on matrices that are otherwise not covered by `raft/linalg`. .. role:: py(code) :language: c++ :class: highlight .. toctree:: :maxdepth: 2 :caption: Contents: matrix_arithmetic.rst matrix_manipulation.rst matrix_ordering.rst matrix_reduction.rst matrix_selection.rst
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/cluster.rst
Cluster ======= This page provides C++ API references for the publicly-exposed elements of the `raft/cluster` headers. RAFT provides fundamental clustering algorithms which are, themselves, considered reusable building blocks for other algorithms. .. role:: py(code) :language: c++ :class: highlight .. toctree:: :maxdepth: 2 :caption: Contents: cluster_kmeans.rst cluster_kmeans_balanced.rst cluster_slhc.rst cluster_spectral.rst
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/distance_pairwise.rst
Pairwise Distance ================= .. role:: py(code) :language: c++ :class: highlight ``#include <raft/distance/distance.cuh>`` namespace *raft::distance* .. doxygengroup:: distance_mdspan :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/matrix_manipulation.rst
Matrix Manipulation =================== .. role:: py(code) :language: c++ :class: highlight Initialization -------------- ``#include <raft/matrix/init.cuh>`` namespace *raft::matrix* .. doxygengroup:: matrix_init :project: RAFT :members: :content-only: Reverse ------- ``#include <raft/matrix/reverse.cuh>`` namespace *raft::matrix* .. doxygengroup:: matrix_reverse :project: RAFT :members: :content-only: Threshold --------- ``#include <raft/matrix/threshold.cuh>`` namespace *raft::matrix* .. doxygengroup:: matrix_threshold :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/utils.rst
Utilities ========= RAFT contains numerous utility functions and primitives that are easily usable. This page provides C++ API references for the publicly-exposed utility functions. .. role:: py(code) :language: c++ :class: highlight Memory Pool ----------- ``#include <raft/utils/memory_pool.cuh>`` namespace *raft* .. doxygengroup:: memory_pool :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/mdspan_mdarray.rst
mdarray: Multi-dimensional Owning Container =========================================== .. role:: py(code) :language: c++ :class: highlight ``#include <raft/core/mdarray.hpp>`` .. doxygengroup:: mdarray_apis :project: RAFT :members: :content-only: Device Vocabulary ----------------- ``#include <raft/core/device_mdarray.hpp>`` .. doxygentypedef:: raft::device_mdarray :project: RAFT .. doxygentypedef:: raft::device_matrix :project: RAFT .. doxygentypedef:: raft::device_vector :project: RAFT .. doxygentypedef:: raft::device_scalar :project: RAFT Device Factories ---------------- ``#include <raft/core/device_mdarray.hpp>`` .. doxygenfunction:: raft::make_device_matrix :project: RAFT .. doxygenfunction:: raft::make_device_vector :project: RAFT .. doxygenfunction:: raft::make_device_scalar :project: RAFT Host Vocabulary --------------- ``#include <raft/core/host_mdarray.hpp>`` .. doxygentypedef:: raft::host_matrix :project: RAFT .. doxygentypedef:: raft::host_vector :project: RAFT .. doxygentypedef:: raft::host_scalar :project: RAFT Host Factories -------------- ``#include <raft/core/host_mdarray.hpp>`` .. doxygengroup:: host_mdarray_factories :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/stats_classification.rst
Classification Model Scoring ============================ .. role:: py(code) :language: c++ :class: highlight Accuracy -------- ``#include <raft/stats/accuracy.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_accuracy :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/stats_probability.rst
Probability & Information Theory ================================ .. role:: py(code) :language: c++ :class: highlight Contingency Matrix ------------------ ``#include <raft/stats/contingency_matrix.cuh>`` namespace *raft::stats* .. doxygengroup:: contingency_matrix :project: RAFT :members: :content-only: Entropy ------- ``#include <raft/stats/entropy.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_entropy :project: RAFT :members: :content-only: KL-Divergence ------------- ``#include <raft/stats/kl_divergence.cuh>`` namespace *raft::stats* .. doxygengroup:: kl_divergence :project: RAFT :members: :content-only: Mutual Information ------------------ ``#include <raft/stats/mutual_info_score.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_mutual_info :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/matrix_selection.rst
Matrix Selection ================ .. role:: py(code) :language: c++ :class: highlight Copy ---- ``#include <raft/matrix/copy.cuh>`` namespace *raft::matrix* .. doxygengroup:: matrix_copy :project: RAFT :members: :content-only: Diagonal -------- ``#include <raft/matrix/diagonal.cuh>`` namespace *raft::matrix* .. doxygengroup:: matrix_diagonal :project: RAFT :members: :content-only: Gather ------ ``#include <raft/matrix/gather.cuh>`` namespace *raft::matrix* .. doxygengroup:: matrix_gather :project: RAFT :members: :content-only: Slicing ------- ``#include <raft/matrix/slice.cuh>`` namespace *raft::matrix* .. doxygengroup:: matrix_slice :project: RAFT :members: :content-only: Triangular ---------- ``#include <raft/matrix/triangular.cuh>`` namespace *raft::matrix* .. doxygengroup:: matrix_triangular :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/sparse_types_coo_matrix.rst
COO Matrix ========== .. role:: py(code) :language: c++ :class: highlight Basic Vocabulary ---------------- ``#include <raft/core/coo_matrix.hpp>`` .. doxygengroup:: coo_matrix :project: RAFT :members: :content-only: Device COO Matrix ----------------- ``#include <raft/core/device_coo_matrix.hpp>`` .. doxygengroup:: device_coo_matrix :project: RAFT :members: :content-only: Host COO Matrix ----------------- ``#include <raft/core/host_coo_matrix.hpp>`` .. doxygengroup:: host_coo_matrix :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/core_logger.rst
logger ====== .. role:: py(code) :language: c++ :class: highlight ``#include <raft/core/logger.hpp>`` namespace *raft::core* .. doxygenclass:: raft::logger :project: RAFT :members:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/matrix_reduction.rst
Matrix Reductions ================= .. role:: py(code) :language: c++ :class: highlight Matrix Norm ----------- ``#include <raft/matrix/norm.cuh>`` namespace *raft::matrix* .. doxygengroup:: matrix_norm :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/neighbors_cagra.rst
CAGRA ===== CAGRA is a graph-based nearest neighbors implementation with state-of-the art query performance for both small- and large-batch sized search. Please note that the CAGRA implementation is currently experimental and the API is subject to change from release to release. We are currently working on promoting CAGRA to a top-level stable API within RAFT. .. role:: py(code) :language: c++ :class: highlight ``#include <raft/neighbors/cagra.cuh>`` namespace *raft::neighbors::cagra* .. doxygengroup:: cagra :project: RAFT :members: :content-only: Serializer Methods ------------------ ``#include <raft/neighbors/cagra_serialize.cuh>`` namespace *raft::neighbors::cagra* .. doxygengroup:: cagra_serialize :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/sparse_neighbors.rst
Sparse Neighbors ================ .. doxygennamespace:: raft::sparse::neighbors :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/sparse_solver.rst
Sparse Solvers ============== .. doxygennamespace:: raft::sparse::solver :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/random_datagen.rst
Data Generation =============== .. role:: py(code) :language: c++ :class: highlight make_blobs ---------- ``#include <raft/random/make_blobs.cuh>`` namespace *raft::random* .. doxygengroup:: make_blobs :project: RAFT :members: :content-only: make_regression --------------- ``#include <raft/random/make_regression.cuh>`` namespace *raft::random* .. doxygengroup:: make_regression :project: RAFT :members: :content-only: rmat ---- ``#include <raft/random/rmat_rectangular_generator.cuh>`` namespace *raft::random* .. doxygengroup:: rmat :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/linalg.rst
Linear Algebra ============== This page provides C++ class references for the publicly-exposed elements of the `raft/linalg` (dense) linear algebra headers. In addition to providing highly optimized arithmetic and matrix/vector operations, RAFT provides a consistent user experience by providing common BLAS routines, standard linear system solvers, factorization and eigenvalue solvers. Some of these routines hide the complexities of lower-level C-based libraries provided in the CUDA toolkit .. role:: py(code) :language: c++ :class: highlight .. toctree:: :maxdepth: 2 :caption: Contents: linalg_arithmetic.rst linalg_blas.rst linalg_map_reduce.rst linalg_matrix.rst linalg_matrix_vector.rst linalg_solver.rst
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/sparse_distance.rst
Sparse Distance =============== .. doxygennamespace:: raft::sparse::distance :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/cluster_spectral.rst
Spectral Clustering =================== .. role:: py(code) :language: c++ :class: highlight ``#include <raft/spectral/partition.cuh>`` .. doxygennamespace:: raft::spectral :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/stats_summary.rst
Summary Statistics ================== .. role:: py(code) :language: c++ :class: highlight Covariance ---------- ``#include <raft/stats/cov.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_cov :project: RAFT :members: :content-only: Histogram --------- ``#include <raft/stats/histogram.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_histogram :project: RAFT :members: :content-only: Mean ---- ``#include <raft/stats/mean.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_mean :project: RAFT :members: :content-only: Mean Center ----------- ``#include <raft/stats/mean_center.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_mean_center :project: RAFT :members: :content-only: Mean Variance ------------- ``#include <raft/stats/mean_var.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_mean_var :project: RAFT :members: :content-only: Min/Max ------- ``#include <raft/stats/minmax.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_minmax :project: RAFT :members: :content-only: Standard Deviation ------------------ ``#include <raft/stats/stddev.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_stddev :project: RAFT :members: :content-only: Sum --- ``#include <raft/stats/sum.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_sum :project: RAFT :members: :content-only: Weighted Average ---------------- ``#include <raft/stats/weighted_mean.cuh>`` namespace *raft::stats* .. doxygengroup:: stats_weighted_mean :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/random_sampling_univariate.rst
Univariate Random Sampling ========================== .. role:: py(code) :language: c++ :class: highlight ``#include <raft/random/rng.cuh>`` namespace *raft::random* .. doxygengroup:: univariate_random_sampling :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/distance_1nn.rst
1-Nearest Neighbors =================== .. role:: py(code) :language: c++ :class: highlight ``#include <raft/distance/fused_l2_nn.cuh>`` namespace *raft::distance* .. doxygengroup:: fused_l2_nn :project: RAFT :members: :content-only: ``#include <raft/distance/fused_l2_nn.cuh>`` namespace *raft::distance* .. doxygengroup:: masked_nn :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/core_math.rst
Mathematical Functions ====================== .. role:: py(code) :language: c++ :class: highlight The math functions APIs guarantee both CUDA and CPU compatibility, making it more straightforward to write `__host__ __device__` functions without being concerned whether the underlying intrinsics will build and work. ``#include <raft/core/math.hpp>`` namespace *raft::core* .. doxygengroup:: math_functions :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/neighbors_brute_force.rst
Brute-Force =========== .. role:: py(code) :language: c++ :class: highlight ``#include <raft/neighbors/brute_force.cuh>`` namespace *raft::neighbors::brute_force* .. doxygengroup:: brute_force_knn :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/neighbors_ivf_flat.rst
IVF-Flat ======== .. role:: py(code) :language: c++ :class: highlight ``#include <raft/neighbors/ivf_flat.cuh>`` namespace *raft::neighbors::ivf_flat* .. doxygengroup:: ivf_flat :project: RAFT :members: :content-only: Serializer Methods ------------------ ``#include <raft/neighbors/ivf_flat_serialize.cuh>`` namespace *raft::neighbors::ivf_flat* .. doxygengroup:: ivf_flat_serialize :project: RAFT :members: :content-only: Helper Methods -------------- ``#include <raft/neighbors/ivf_flat_helpers.cuh>`` namespace *raft::neighbors::ivf_flat::helpers* .. doxygengroup:: ivf_flat_helpers :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/core_operators.rst
Operators and Functors ====================== .. role:: py(code) :language: c++ :class: highlight ``#include <raft/core/operators.hpp>`` namespace *raft::core* .. doxygengroup:: operators :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/core_kvp.rst
Key-Value Pair ============== .. role:: py(code) :language: c++ :class: highlight ``#include <raft/core/kvp.hpp>`` namespace *raft::core* .. doxygenstruct:: raft::KeyValuePair :project: RAFT :members:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/matrix_arithmetic.rst
Matrix Arithmetic ================= .. role:: py(code) :language: c++ :class: highlight Line-wise Operation ------------------- ``#include <raft/matrix/linewise_op.cuh>`` namespace *raft::matrix* .. doxygengroup:: linewise_op :project: RAFT :members: :content-only: Power ----- ``#include <raft/matrix/power.cuh>`` namespace *raft::matrix* .. doxygengroup:: matrix_power :project: RAFT :members: :content-only: Ratio ----- ``#include <raft/matrix/ratio.cuh>`` namespace *raft::matrix* .. doxygengroup:: matrix_ratio :project: RAFT :members: :content-only: Reciprocal ---------- ``#include <raft/matrix/reciprocal.cuh>`` namespace *raft::matrix* .. doxygengroup:: matrix_reciprocal :project: RAFT :members: :content-only: Sign-flip --------- ``#include <raft/matrix/sign_flip.cuh>`` namespace *raft::matrix* .. doxygengroup:: matrix_sign_flip :project: RAFT :members: :content-only: Square Root ----------- ``#include <raft/matrix/sqrt.cuh>`` namespace *raft::matrix* .. doxygengroup:: matrix_sqrt :project: RAFT :members: :content-only:
0
rapidsai_public_repos/raft/docs/source
rapidsai_public_repos/raft/docs/source/cpp_api/core.rst
Core ==== This page provides C++ class references for the publicly-exposed elements of the `raft/core` package. The `raft/core` headers require minimal dependencies, can be compiled without `nvcc`, and thus are safe to expose on your own public APIs. Aside from the headers in the `raft/core` include directory, any headers in the codebase with the suffix `_types.hpp` are also safe to expose in public APIs. .. role:: py(code) :language: c++ :class: highlight .. toctree:: :maxdepth: 2 :caption: Contents: core_resources.rst core_logger.rst core_kvp.rst core_nvtx.rst core_interruptible.rst core_operators.rst core_math.rst core_bitset.rst
0