repo_id
stringlengths
21
96
file_path
stringlengths
31
155
content
stringlengths
1
92.9M
__index_level_0__
int64
0
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/distance/dist_l2_exp.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceEucExpTest : public DistanceTest<cuvs::distance::DistanceType::L2Expanded, DataType> { }; template <typename DataType> class DistanceEucExpTestXequalY : public DistanceTestSameBuffer<cuvs::distance::DistanceType::L2Expanded, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 2048, 4096, 128, true, 1234ULL}, {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.003f, 1021, 1021, 1021, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, {0.003f, 1021, 1021, 1021, false, 1234ULL}, }; const std::vector<DistanceInputs<float>> inputsXeqYf = { {0.01f, 2048, 4096, 128, true, 1234ULL}, {0.01f, 1024, 1024, 32, true, 1234ULL}, {0.01f, 1024, 32, 1024, true, 1234ULL}, {0.01f, 32, 1024, 1024, true, 1234ULL}, {0.03f, 1024, 1024, 1024, true, 1234ULL}, {0.03f, 1021, 1021, 1021, true, 1234ULL}, {0.01f, 1024, 1024, 32, false, 1234ULL}, {0.01f, 1024, 32, 1024, false, 1234ULL}, {0.01f, 32, 1024, 1024, false, 1234ULL}, {0.03f, 1024, 1024, 1024, false, 1234ULL}, {0.03f, 1021, 1021, 1021, false, 1234ULL}, }; typedef DistanceEucExpTest<float> DistanceEucExpTestF; TEST_P(DistanceEucExpTestF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceEucExpTestF, ::testing::ValuesIn(inputsf)); typedef DistanceEucExpTestXequalY<float> DistanceEucExpTestXequalYF; TEST_P(DistanceEucExpTestXequalYF, Result) { int m = params.m; ASSERT_TRUE(raft::devArrMatch(dist_ref[0].data(), dist[0].data(), m, m, raft::CompareApprox<float>(params.tolerance), stream)); ASSERT_TRUE(raft::devArrMatch(dist_ref[1].data(), dist[1].data(), m / 2, m, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceEucExpTestXequalYF, ::testing::ValuesIn(inputsXeqYf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001, 1024, 1024, 32, false, 1234ULL}, {0.001, 1024, 32, 1024, false, 1234ULL}, {0.001, 32, 1024, 1024, false, 1234ULL}, {0.003, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceEucExpTest<double> DistanceEucExpTestD; TEST_P(DistanceEucExpTestD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceEucExpTestD, ::testing::ValuesIn(inputsd)); class BigMatrixEucExp : public BigMatrixDistanceTest<cuvs::distance::DistanceType::L2Expanded> {}; TEST_F(BigMatrixEucExp, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/distance/dist_hamming.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceHamming : public DistanceTest<cuvs::distance::DistanceType::HammingUnexpanded, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceHamming<float> DistanceHammingF; TEST_P(DistanceHammingF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceHammingF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001, 1024, 1024, 32, false, 1234ULL}, {0.001, 1024, 32, 1024, false, 1234ULL}, {0.001, 32, 1024, 1024, false, 1234ULL}, {0.003, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceHamming<double> DistanceHammingD; TEST_P(DistanceHammingD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceHammingD, ::testing::ValuesIn(inputsd)); class BigMatrixHamming : public BigMatrixDistanceTest<cuvs::distance::DistanceType::HammingUnexpanded> {}; TEST_F(BigMatrixHamming, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/distance/dist_lp_unexp.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceLpUnexp : public DistanceTest<cuvs::distance::DistanceType::LpUnexpanded, DataType> { }; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, true, 1234ULL, 4.0f}, {0.001f, 1024, 32, 1024, true, 1234ULL, 3.0f}, {0.001f, 32, 1024, 1024, true, 1234ULL, 4.0f}, {0.003f, 1024, 1024, 1024, true, 1234ULL, 3.0f}, {0.001f, 1024, 1024, 32, false, 1234ULL, 4.0f}, {0.001f, 1024, 32, 1024, false, 1234ULL, 3.0f}, {0.001f, 32, 1024, 1024, false, 1234ULL, 4.0f}, {0.003f, 1024, 1024, 1024, false, 1234ULL, 3.0f}, }; typedef DistanceLpUnexp<float> DistanceLpUnexpF; TEST_P(DistanceLpUnexpF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceLpUnexpF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL, 4.0}, {0.001, 1024, 32, 1024, true, 1234ULL, 3.0}, {0.001, 32, 1024, 1024, true, 1234ULL, 4.0}, {0.003, 1024, 1024, 1024, true, 1234ULL, 3.0}, {0.001, 1024, 1024, 32, false, 1234ULL, 4.0}, {0.001, 1024, 32, 1024, false, 1234ULL, 3.0}, {0.001, 32, 1024, 1024, false, 1234ULL, 4.0}, {0.003, 1024, 1024, 1024, false, 1234ULL, 3.0}, }; typedef DistanceLpUnexp<double> DistanceLpUnexpD; TEST_P(DistanceLpUnexpD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceLpUnexpD, ::testing::ValuesIn(inputsd)); class BigMatrixLpUnexp : public BigMatrixDistanceTest<cuvs::distance::DistanceType::LpUnexpanded> { }; TEST_F(BigMatrixLpUnexp, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/distance/dist_l2_sqrt_exp.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceEucSqrtExpTest : public DistanceTest<cuvs::distance::DistanceType::L2SqrtExpanded, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 2048, 4096, 128, true, 1234ULL}, {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.003f, 1021, 1021, 1021, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, {0.003f, 1021, 1021, 1021, false, 1234ULL}, }; typedef DistanceEucSqrtExpTest<float> DistanceEucSqrtExpTestF; TEST_P(DistanceEucSqrtExpTestF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceEucSqrtExpTestF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001, 1024, 1024, 32, false, 1234ULL}, {0.001, 1024, 32, 1024, false, 1234ULL}, {0.001, 32, 1024, 1024, false, 1234ULL}, {0.003, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceEucSqrtExpTest<double> DistanceEucSqrtExpTestD; TEST_P(DistanceEucSqrtExpTestD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceEucSqrtExpTestD, ::testing::ValuesIn(inputsd)); class BigMatrixEucSqrtExp : public BigMatrixDistanceTest<cuvs::distance::DistanceType::L2SqrtExpanded> {}; TEST_F(BigMatrixEucSqrtExp, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/distance/dist_hellinger.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceHellingerExp : public DistanceTest<cuvs::distance::DistanceType::HellingerExpanded, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceHellingerExp<float> DistanceHellingerExpF; TEST_P(DistanceHellingerExpF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceHellingerExpF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001, 1024, 1024, 32, false, 1234ULL}, {0.001, 1024, 32, 1024, false, 1234ULL}, {0.001, 32, 1024, 1024, false, 1234ULL}, {0.003, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceHellingerExp<double> DistanceHellingerExpD; TEST_P(DistanceHellingerExpD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceHellingerExpD, ::testing::ValuesIn(inputsd)); class BigMatrixHellingerExp : public BigMatrixDistanceTest<cuvs::distance::DistanceType::HellingerExpanded> {}; TEST_F(BigMatrixHellingerExp, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/distance/masked_nn_compress_to_bits.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "../test_utils.h" #include <cstdio> #include <cuvs/distance/detail/compress_to_bits.cuh> #include <gtest/gtest.h> #include <iostream> #include <raft/core/device_mdarray.hpp> #include <raft/core/device_mdspan.hpp> #include <raft/core/handle.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/matrix/init.cuh> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <raft/util/integer_utils.hpp> #include <raft/util/itertools.hpp> namespace cuvs::distance::masked_nn::compress_to_bits { /** * @brief Transpose and decompress 2D bitfield to boolean matrix * * Inverse operation of compress_to_bits * * @tparam T * * @parameter[in] in An `m x n` bitfield matrix. Row major. * @parameter in_rows The number of rows of `in`, i.e. `m`. * @parameter in_cols The number of cols of `in`, i.e. `n`. * * @parameter[out] out An `(m * bits_per_elem) x n` boolean matrix. */ template <typename T = uint64_t, typename = std::enable_if_t<std::is_integral<T>::value>> RAFT_KERNEL decompress_bits_kernel(const T* in, int in_rows, int in_cols, bool* out) { constexpr int bits_per_element = 8 * sizeof(T); const size_t i = threadIdx.y + blockIdx.y * blockDim.y; const size_t j = threadIdx.x + blockIdx.x * blockDim.x; if (in_rows <= i || in_cols <= j) { return; } const size_t out_rows = in_rows * bits_per_element; const size_t out_cols = in_cols; const size_t out_i = i * bits_per_element; const size_t out_j = j; if (out_rows <= out_i && out_cols <= out_j) { return; } T bitfield = in[i * in_cols + j]; for (int bitpos = 0; bitpos < bits_per_element; ++bitpos) { bool bit = ((T(1) << bitpos) & bitfield) != 0; out[(out_i + bitpos) * out_cols + out_j] = bit; } } /** * @brief Transpose and decompress 2D bitfield to boolean matrix * * Inverse operation of compress_to_bits * * @tparam T * * @parameter[in] in An `m x n` bitfield matrix. Row major. * @parameter in_rows The number of rows of `in`, i.e. `m`. * @parameter in_cols The number of cols of `in`, i.e. `n`. * * @parameter[out] out An `n x (m * bits_per_elem)` boolean matrix. */ template <typename T = uint64_t, typename = std::enable_if_t<std::is_integral<T>::value>> void decompress_bits(const raft::handle_t& handle, const T* in, int in_rows, int in_cols, bool* out) { auto stream = resource::get_cuda_stream(handle); dim3 grid(raft::ceildiv(in_cols, 32), raft::ceildiv(in_rows, 32)); dim3 block(32, 32); decompress_bits_kernel<<<grid, block, 0, stream>>>(in, in_rows, in_cols, out); RAFT_CUDA_TRY(cudaGetLastError()); } // Params holds parameters for test case struct Params { int m, n; }; inline auto operator<<(std::ostream& os, const Params& p) -> std::ostream& { return os << "m: " << p.m << ", n: " << p.n; } // Check that the following holds // // decompress(compress(x)) == x // // for 2D boolean matrices x. template <typename T> void check_invertible(const Params& p) { using cuvs::distance::detail::compress_to_bits; constexpr int bits_per_elem = sizeof(T) * 8; // Make m and n that are safe to ceildiv. int m = raft::round_up_safe(p.m, bits_per_elem); int n = p.n; // Generate random input raft::handle_t handle{}; raft::random::RngState r(1ULL); auto in = raft::make_device_matrix<bool, int>(handle, m, n); raft::random::bernoulli(handle, r, in.data_handle(), m * n, 0.5f); int tmp_m = raft::ceildiv(m, bits_per_elem); int out_m = tmp_m * bits_per_elem; auto tmp = raft::make_device_matrix<T, int>(handle, tmp_m, n); auto out = raft::make_device_matrix<bool, int>(handle, out_m, n); resource::sync_stream(handle); RAFT_CUDA_TRY(cudaGetLastError()); ASSERT_EQ(in.extent(0), out.extent(0)) << "M does not match"; ASSERT_EQ(in.extent(1), out.extent(1)) << "N does not match"; compress_to_bits(handle, in.view(), tmp.view()); resource::sync_stream(handle); RAFT_CUDA_TRY(cudaGetLastError()); decompress_bits(handle, tmp.data_handle(), tmp.extent(0), tmp.extent(1), out.data_handle()); resource::sync_stream(handle); RAFT_CUDA_TRY(cudaGetLastError()); // Check for differences. ASSERT_TRUE(raft::devArrMatch(in.data_handle(), out.data_handle(), in.extent(0) * in.extent(1), raft::Compare<bool>(), resource::get_cuda_stream(handle))); resource::sync_stream(handle); RAFT_CUDA_TRY(cudaGetLastError()); } void check_all_true(const Params& p) { using cuvs::distance::detail::compress_to_bits; using T = uint64_t; constexpr int bits_per_elem = sizeof(T) * 8; // Make m and n that are safe to ceildiv. int m = raft::round_up_safe(p.m, bits_per_elem); int n = p.n; raft::handle_t handle{}; raft::random::RngState r(1ULL); auto in = raft::make_device_matrix<bool, int>(handle, m, n); raft::matrix::fill(handle, in.view(), true); int tmp_m = raft::ceildiv(m, bits_per_elem); auto tmp = raft::make_device_matrix<T, int>(handle, tmp_m, n); resource::sync_stream(handle); RAFT_CUDA_TRY(cudaGetLastError()); compress_to_bits(handle, in.view(), tmp.view()); resource::sync_stream(handle); RAFT_CUDA_TRY(cudaGetLastError()); auto expected = raft::make_device_matrix<T, int>(handle, tmp_m, n); raft::matrix::fill(handle, expected.view(), ~T(0)); // Check for differences. ASSERT_TRUE(raft::devArrMatch(expected.data_handle(), tmp.data_handle(), tmp.extent(0) * tmp.extent(1), raft::Compare<T>(), resource::get_cuda_stream(handle))); resource::sync_stream(handle); RAFT_CUDA_TRY(cudaGetLastError()); } class CompressToBitsTest : public ::testing::TestWithParam<Params> { // Empty. }; TEST_P(CompressToBitsTest, CheckTrue64) { check_all_true(GetParam()); } TEST_P(CompressToBitsTest, CheckInvertible64) { using T = uint64_t; check_invertible<T>(GetParam()); } TEST_P(CompressToBitsTest, CheckInvertible32) { using T = uint32_t; check_invertible<T>(GetParam()); } std::vector<Params> params = raft::util::itertools::product<Params>( {1, 3, 32, 33, 63, 64, 65, 128, 10013}, {1, 3, 32, 33, 63, 64, 65, 13001}); INSTANTIATE_TEST_CASE_P(CompressToBits, CompressToBitsTest, ::testing::ValuesIn(params)); } // namespace cuvs::distance::masked_nn::compress_to_bits
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/distance/fused_l2_nn.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <cuvs/distance/detail/fused_l2_nn.cuh> #include <cuvs/distance/fused_l2_nn.cuh> #include <gtest/gtest.h> #include <raft/core/kvp.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/linalg/norm.cuh> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> namespace raft { namespace distance { template <typename LabelT, typename DataT> struct RaftKVPMinReduce { typedef raft::KeyValuePair<LabelT, DataT> KVP; DI KVP operator()(LabelT rit, const KVP& a, const KVP& b) { return b.value < a.value ? b : a; } DI KVP operator()(const KVP& a, const KVP& b) { return b.value < a.value ? b : a; } }; // KVPMinReduce template <typename DataT, bool Sqrt, typename ReduceOpT, int NWARPS> RAFT_KERNEL naiveKernel(raft::KeyValuePair<int, DataT>* min, DataT* x, DataT* y, int m, int n, int k, int* workspace, DataT maxVal) { int midx = threadIdx.y + blockIdx.y * blockDim.y; int nidx = threadIdx.x + blockIdx.x * blockDim.x; DataT acc = DataT(0); for (int i = 0; i < k; ++i) { int xidx = i + midx * k; int yidx = i + nidx * k; auto diff = midx >= m || nidx >= n ? DataT(0) : x[xidx] - y[yidx]; acc += diff * diff; } if (Sqrt) { acc = raft::sqrt(acc); } ReduceOpT redOp; typedef cub::WarpReduce<raft::KeyValuePair<int, DataT>> WarpReduce; __shared__ typename WarpReduce::TempStorage temp[NWARPS]; int warpId = threadIdx.x / raft::WarpSize; raft::KeyValuePair<int, DataT> tmp; tmp.key = nidx; tmp.value = midx >= m || nidx >= n ? maxVal : acc; tmp = WarpReduce(temp[warpId]).Reduce(tmp, RaftKVPMinReduce<int, DataT>()); if (threadIdx.x % raft::WarpSize == 0 && midx < m) { while (atomicCAS(workspace + midx, 0, 1) == 1) ; __threadfence(); redOp(midx, min + midx, tmp); __threadfence(); atomicCAS(workspace + midx, 1, 0); } } template <typename DataT, bool Sqrt> void naive(raft::KeyValuePair<int, DataT>* min, DataT* x, DataT* y, int m, int n, int k, int* workspace, cudaStream_t stream) { static const dim3 TPB(32, 16, 1); dim3 nblks(raft::ceildiv(n, (int)TPB.x), raft::ceildiv(m, (int)TPB.y), 1); RAFT_CUDA_TRY(cudaMemsetAsync(workspace, 0, sizeof(int) * m, stream)); auto blks = raft::ceildiv(m, 256); MinAndDistanceReduceOp<int, DataT> op; detail::initKernel<DataT, raft::KeyValuePair<int, DataT>, int> <<<blks, 256, 0, stream>>>(min, m, std::numeric_limits<DataT>::max(), op); RAFT_CUDA_TRY(cudaGetLastError()); naiveKernel<DataT, Sqrt, MinAndDistanceReduceOp<int, DataT>, 16> <<<nblks, TPB, 0, stream>>>(min, x, y, m, n, k, workspace, std::numeric_limits<DataT>::max()); RAFT_CUDA_TRY(cudaGetLastError()); } template <typename DataT> struct Inputs { DataT tolerance; int m, n, k; unsigned long long int seed; friend std::ostream& operator<<(std::ostream& os, const Inputs& p) { return os << "m: " << p.m << ", " "n: " << p.n << ", " "k: " << p.k << ", " "seed: " << p.seed << ", " "tol: " << p.tolerance; } }; template <typename DataT, bool Sqrt> class FusedL2NNTest : public ::testing::TestWithParam<Inputs<DataT>> { public: FusedL2NNTest() : params(::testing::TestWithParam<Inputs<DataT>>::GetParam()), stream(resource::get_cuda_stream(handle)), x(params.m * params.k, stream), y(params.n * params.k, stream), xn(params.m, stream), yn(params.n, stream), min(params.m, stream), min_ref(params.m, stream), workspace(params.m * sizeof(int), stream) { } protected: void SetUp() override { raft::random::RngState r(params.seed); int m = params.m; int n = params.n; int k = params.k; uniform(handle, r, x.data(), m * k, DataT(-1.0), DataT(1.0)); uniform(handle, r, y.data(), n * k, DataT(-1.0), DataT(1.0)); generateGoldenResult(); raft::linalg::rowNorm(xn.data(), x.data(), k, m, raft::linalg::L2Norm, true, stream); raft::linalg::rowNorm(yn.data(), y.data(), k, n, raft::linalg::L2Norm, true, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } protected: raft::resources handle; cudaStream_t stream; Inputs<DataT> params; rmm::device_uvector<DataT> x; rmm::device_uvector<DataT> y; rmm::device_uvector<DataT> xn; rmm::device_uvector<DataT> yn; rmm::device_uvector<raft::KeyValuePair<int, DataT>> min; rmm::device_uvector<raft::KeyValuePair<int, DataT>> min_ref; rmm::device_uvector<char> workspace; virtual void generateGoldenResult() { int m = params.m; int n = params.n; int k = params.k; naive<DataT, Sqrt>(min_ref.data(), x.data(), y.data(), m, n, k, (int*)workspace.data(), stream); } void runTest(raft::KeyValuePair<int, DataT>* out) { int m = params.m; int n = params.n; int k = params.k; const bool init_out_buffer = true; fusedL2NNMinReduce<DataT, raft::KeyValuePair<int, DataT>, int>(out, x.data(), y.data(), xn.data(), yn.data(), m, n, k, (void*)workspace.data(), Sqrt, init_out_buffer, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } }; template <typename T> struct CompareApproxAbsKVP { typedef typename raft::KeyValuePair<int, T> KVP; CompareApproxAbsKVP(T eps_) : eps(eps_) {} bool operator()(const KVP& a, const KVP& b) const { T diff = std::abs(std::abs(a.value) - std::abs(b.value)); T m = std::max(std::abs(a.value), std::abs(b.value)); T ratio = m >= eps ? diff / m : diff; return (ratio <= eps); } private: T eps; }; template <typename T> struct CompareExactKVP { typedef typename raft::KeyValuePair<int, T> KVP; bool operator()(const KVP& a, const KVP& b) const { if (a.value != b.value) return false; return true; } }; template <typename K, typename V, typename L> ::testing::AssertionResult devArrMatch(const raft::KeyValuePair<K, V>* expected, const raft::KeyValuePair<K, V>* actual, size_t size, L eq_compare, cudaStream_t stream = 0) { typedef typename raft::KeyValuePair<K, V> KVP; std::shared_ptr<KVP> exp_h(new KVP[size]); std::shared_ptr<KVP> act_h(new KVP[size]); raft::update_host<KVP>(exp_h.get(), expected, size, stream); raft::update_host<KVP>(act_h.get(), actual, size, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); for (size_t i(0); i < size; ++i) { auto exp = exp_h.get()[i]; auto act = act_h.get()[i]; if (!eq_compare(exp, act)) { return ::testing::AssertionFailure() << "actual=" << act.key << "," << act.value << " != expected=" << exp.key << "," << exp.value << " @" << i; } } return ::testing::AssertionSuccess(); } const std::vector<Inputs<float>> inputsf = { {0.001f, 32, 32, 32, 1234ULL}, {0.001f, 32, 64, 32, 1234ULL}, {0.001f, 64, 32, 32, 1234ULL}, {0.001f, 64, 64, 32, 1234ULL}, {0.001f, 128, 32, 32, 1234ULL}, {0.001f, 128, 64, 32, 1234ULL}, {0.001f, 128, 128, 64, 1234ULL}, {0.001f, 64, 128, 128, 1234ULL}, {0.001f, 32, 32, 34, 1234ULL}, {0.001f, 32, 64, 34, 1234ULL}, {0.001f, 64, 32, 34, 1234ULL}, {0.001f, 64, 64, 34, 1234ULL}, {0.001f, 128, 32, 34, 1234ULL}, {0.001f, 128, 64, 34, 1234ULL}, {0.001f, 128, 128, 66, 1234ULL}, {0.001f, 64, 128, 130, 1234ULL}, {0.001f, 32, 32, 33, 1234ULL}, {0.001f, 32, 64, 33, 1234ULL}, {0.001f, 64, 32, 33, 1234ULL}, {0.001f, 64, 64, 33, 1234ULL}, {0.001f, 128, 32, 33, 1234ULL}, {0.001f, 128, 64, 33, 1234ULL}, {0.001f, 128, 128, 65, 1234ULL}, {0.001f, 64, 128, 129, 1234ULL}, {0.006f, 1805, 134, 2, 1234ULL}, {0.006f, 8192, 1024, 64, 1234ULL}, {0.006f, 8192, 1025, 64, 1234ULL}, // Repeat with smaller values of k {0.006f, 32, 32, 1, 1234ULL}, {0.001f, 32, 64, 2, 1234ULL}, {0.001f, 64, 32, 3, 1234ULL}, {0.001f, 64, 64, 4, 1234ULL}, {0.001f, 128, 32, 5, 1234ULL}, {0.001f, 128, 64, 6, 1234ULL}, {0.001f, 128, 128, 7, 1234ULL}, {0.001f, 64, 128, 8, 1234ULL}, {0.001f, 32, 32, 9, 1234ULL}, {0.001f, 32, 64, 10, 1234ULL}, {0.001f, 64, 32, 11, 1234ULL}, {0.001f, 64, 64, 12, 1234ULL}, {0.001f, 128, 32, 13, 1234ULL}, {0.001f, 128, 64, 14, 1234ULL}, {0.001f, 128, 128, 15, 1234ULL}, {0.001f, 64, 128, 16, 1234ULL}, {0.001f, 32, 32, 17, 1234ULL}, {0.001f, 32, 64, 18, 1234ULL}, {0.001f, 64, 32, 19, 1234ULL}, {0.001f, 64, 64, 20, 1234ULL}, {0.001f, 128, 32, 21, 1234ULL}, {0.001f, 128, 64, 22, 1234ULL}, {0.001f, 128, 128, 23, 1234ULL}, {0.00001, 64, 128, 24, 1234ULL}, {0.001f, 1805, 134, 25, 1234ULL}, {0.006f, 8192, 1024, 25, 1234ULL}, {0.006f, 8192, 1024, 66, 1234ULL}, }; typedef FusedL2NNTest<float, false> FusedL2NNTestF_Sq; TEST_P(FusedL2NNTestF_Sq, Result) { runTest(min.data()); ASSERT_TRUE(devArrMatch( min_ref.data(), min.data(), params.m, CompareApproxAbsKVP<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestF_Sq, ::testing::ValuesIn(inputsf)); typedef FusedL2NNTest<float, true> FusedL2NNTestF_Sqrt; TEST_P(FusedL2NNTestF_Sqrt, Result) { runTest(min.data()); ASSERT_TRUE(devArrMatch( min_ref.data(), min.data(), params.m, CompareApproxAbsKVP<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestF_Sqrt, ::testing::ValuesIn(inputsf)); const std::vector<Inputs<double>> inputsd = { {0.00001, 32, 32, 32, 1234ULL}, {0.00001, 32, 64, 32, 1234ULL}, {0.00001, 64, 32, 32, 1234ULL}, {0.00001, 64, 64, 32, 1234ULL}, {0.00001, 128, 32, 32, 1234ULL}, {0.00001, 128, 64, 32, 1234ULL}, {0.00001, 128, 128, 64, 1234ULL}, {0.00001, 64, 128, 128, 1234ULL}, {0.00001, 32, 32, 34, 1234ULL}, {0.00001, 32, 64, 34, 1234ULL}, {0.00001, 64, 32, 34, 1234ULL}, {0.00001, 64, 64, 34, 1234ULL}, {0.00001, 128, 32, 34, 1234ULL}, {0.00001, 128, 64, 34, 1234ULL}, {0.00001, 128, 128, 66, 1234ULL}, {0.00001, 64, 128, 130, 1234ULL}, {0.00001, 32, 32, 33, 1234ULL}, {0.00001, 32, 64, 33, 1234ULL}, {0.00001, 64, 32, 33, 1234ULL}, {0.00001, 64, 64, 33, 1234ULL}, {0.00001, 128, 32, 33, 1234ULL}, {0.00001, 128, 64, 33, 1234ULL}, {0.00001, 128, 128, 65, 1234ULL}, {0.00001, 64, 128, 129, 1234ULL}, {0.00001, 1805, 134, 2, 1234ULL}, //{0.00001, 8192, 1024, 25, 1234ULL}, }; typedef FusedL2NNTest<double, false> FusedL2NNTestD_Sq; TEST_P(FusedL2NNTestD_Sq, Result) { runTest(min.data()); ASSERT_TRUE(devArrMatch( min_ref.data(), min.data(), params.m, CompareApproxAbsKVP<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestD_Sq, ::testing::ValuesIn(inputsd)); typedef FusedL2NNTest<double, true> FusedL2NNTestD_Sqrt; TEST_P(FusedL2NNTestD_Sqrt, Result) { runTest(min.data()); ASSERT_TRUE(devArrMatch( min_ref.data(), min.data(), params.m, CompareApproxAbsKVP<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestD_Sqrt, ::testing::ValuesIn(inputsd)); /// This is to test output determinism of the prim template <typename DataT, bool Sqrt> class FusedL2NNDetTest : public FusedL2NNTest<DataT, Sqrt> { public: FusedL2NNDetTest() : stream(resource::get_cuda_stream(handle)), min1(0, stream) {} void SetUp() override { FusedL2NNTest<DataT, Sqrt>::SetUp(); int m = this->params.m; min1.resize(m, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } void TearDown() override { FusedL2NNTest<DataT, Sqrt>::TearDown(); } protected: raft::resources handle; cudaStream_t stream; rmm::device_uvector<raft::KeyValuePair<int, DataT>> min1; static const int NumRepeats = 3; void generateGoldenResult() override {} }; typedef FusedL2NNDetTest<float, false> FusedL2NNDetTestF_Sq; TEST_P(FusedL2NNDetTestF_Sq, Result) { runTest(min.data()); // assumed to be golden for (int i = 0; i < NumRepeats; ++i) { runTest(min1.data()); ASSERT_TRUE(devArrMatch(min.data(), min1.data(), params.m, CompareExactKVP<float>(), stream)); } } INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestF_Sq, ::testing::ValuesIn(inputsf)); typedef FusedL2NNDetTest<float, true> FusedL2NNDetTestF_Sqrt; TEST_P(FusedL2NNDetTestF_Sqrt, Result) { runTest(min.data()); // assumed to be golden for (int i = 0; i < NumRepeats; ++i) { runTest(min1.data()); ASSERT_TRUE(devArrMatch(min.data(), min1.data(), params.m, CompareExactKVP<float>(), stream)); } } INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestF_Sqrt, ::testing::ValuesIn(inputsf)); typedef FusedL2NNDetTest<double, false> FusedL2NNDetTestD_Sq; TEST_P(FusedL2NNDetTestD_Sq, Result) { runTest(min.data()); // assumed to be golden for (int i = 0; i < NumRepeats; ++i) { runTest(min1.data()); ASSERT_TRUE(devArrMatch(min.data(), min1.data(), params.m, CompareExactKVP<double>(), stream)); } } INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestD_Sq, ::testing::ValuesIn(inputsd)); typedef FusedL2NNDetTest<double, true> FusedL2NNDetTestD_Sqrt; TEST_P(FusedL2NNDetTestD_Sqrt, Result) { runTest(min.data()); // assumed to be golden for (int i = 0; i < NumRepeats; ++i) { runTest(min1.data()); ASSERT_TRUE(devArrMatch(min.data(), min1.data(), params.m, CompareExactKVP<double>(), stream)); } } INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestD_Sqrt, ::testing::ValuesIn(inputsd)); } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/distance/dist_cos.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceExpCos : public DistanceTest<cuvs::distance::DistanceType::CosineExpanded, DataType> { }; template <typename DataType> class DistanceExpCosXequalY : public DistanceTestSameBuffer<cuvs::distance::DistanceType::CosineExpanded, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; const std::vector<DistanceInputs<float>> inputsXeqYf = { {0.01f, 1024, 1024, 32, true, 1234ULL}, {0.01f, 1024, 32, 1024, true, 1234ULL}, {0.01f, 32, 1024, 1024, true, 1234ULL}, {0.03f, 1024, 1024, 1024, true, 1234ULL}, {0.01f, 1024, 1024, 32, false, 1234ULL}, {0.01f, 1024, 32, 1024, false, 1234ULL}, {0.01f, 32, 1024, 1024, false, 1234ULL}, {0.03f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceExpCos<float> DistanceExpCosF; TEST_P(DistanceExpCosF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceExpCosF, ::testing::ValuesIn(inputsf)); typedef DistanceExpCosXequalY<float> DistanceExpCosXequalYF; TEST_P(DistanceExpCosXequalYF, Result) { int m = params.m; int n = params.m; ASSERT_TRUE(raft::devArrMatch(dist_ref[0].data(), dist[0].data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); n = params.isRowMajor ? m : m / 2; m = params.isRowMajor ? m / 2 : m; ASSERT_TRUE(raft::devArrMatch(dist_ref[1].data(), dist[1].data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceExpCosXequalYF, ::testing::ValuesIn(inputsXeqYf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceExpCos<double> DistanceExpCosD; TEST_P(DistanceExpCosD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceExpCosD, ::testing::ValuesIn(inputsd)); class BigMatrixCos : public BigMatrixDistanceTest<cuvs::distance::DistanceType::CosineExpanded> {}; TEST_F(BigMatrixCos, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/distance/dist_correlation.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceCorrelation : public DistanceTest<cuvs::distance::DistanceType::CorrelationExpanded, DataType> {}; template <typename DataType> class DistanceCorrelationXequalY : public DistanceTestSameBuffer<cuvs::distance::DistanceType::CorrelationExpanded, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceCorrelation<float> DistanceCorrelationF; TEST_P(DistanceCorrelationF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceCorrelationF, ::testing::ValuesIn(inputsf)); typedef DistanceCorrelationXequalY<float> DistanceCorrelationXequalYF; TEST_P(DistanceCorrelationXequalYF, Result) { int m = params.m; ASSERT_TRUE(raft::devArrMatch(dist_ref[0].data(), dist[0].data(), m, m, raft::CompareApprox<float>(params.tolerance), stream)); ASSERT_TRUE(raft::devArrMatch(dist_ref[1].data(), dist[1].data(), m / 2, m, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceCorrelationXequalYF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001, 1024, 1024, 32, false, 1234ULL}, {0.001, 1024, 32, 1024, false, 1234ULL}, {0.001, 32, 1024, 1024, false, 1234ULL}, {0.003, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceCorrelation<double> DistanceCorrelationD; TEST_P(DistanceCorrelationD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceCorrelationD, ::testing::ValuesIn(inputsd)); class BigMatrixCorrelation : public BigMatrixDistanceTest<cuvs::distance::DistanceType::CorrelationExpanded> {}; TEST_F(BigMatrixCorrelation, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/neighbors/ball_cover.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "spatial_data.h" #include <cuvs/distance/distance_types.hpp> #include <cuvs/neighbors/ball_cover.cuh> #include <cuvs/neighbors/brute_force.cuh> #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/thrust_policy.hpp> #include <raft/random/make_blobs.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/count.h> #include <thrust/fill.h> #include <thrust/transform.h> #include <cstdint> #include <gtest/gtest.h> #include <iostream> #include <vector> namespace cuvs::neighbors::ball_cover { using namespace std; template <typename value_idx, typename value_t> RAFT_KERNEL count_discrepancies_kernel(value_idx* actual_idx, value_idx* expected_idx, value_t* actual, value_t* expected, uint32_t m, uint32_t n, uint32_t* out, float thres = 1e-3) { uint32_t row = blockDim.x * blockIdx.x + threadIdx.x; int n_diffs = 0; if (row < m) { for (uint32_t i = 0; i < n; i++) { value_t d = actual[row * n + i] - expected[row * n + i]; bool matches = (fabsf(d) <= thres) || (actual_idx[row * n + i] == expected_idx[row * n + i] && actual_idx[row * n + i] == row); if (!matches) { printf( "row=%ud, n=%ud, actual_dist=%f, actual_ind=%ld, expected_dist=%f, expected_ind=%ld\n", row, i, actual[row * n + i], actual_idx[row * n + i], expected[row * n + i], expected_idx[row * n + i]); } n_diffs += !matches; out[row] = n_diffs; } } } struct is_nonzero { __host__ __device__ bool operator()(uint32_t& i) { return i > 0; } }; template <typename value_idx, typename value_t> uint32_t count_discrepancies(value_idx* actual_idx, value_idx* expected_idx, value_t* actual, value_t* expected, uint32_t m, uint32_t n, uint32_t* out, cudaStream_t stream) { uint32_t tpb = 256; count_discrepancies_kernel<<<raft::ceildiv(m, tpb), tpb, 0, stream>>>( actual_idx, expected_idx, actual, expected, m, n, out); auto exec_policy = rmm::exec_policy(stream); uint32_t result = thrust::count_if(exec_policy, out, out + m, is_nonzero()); return result; } template <typename value_t> void compute_bfknn(const raft::resources& handle, const value_t* X1, const value_t* X2, uint32_t n_rows, uint32_t n_query_rows, uint32_t d, uint32_t k, const cuvs::distance::DistanceType metric, value_t* dists, int64_t* inds) { std::vector<raft::device_matrix_view<const value_t, uint32_t>> input_vec = { make_device_matrix_view(X1, n_rows, d)}; cuvs::neighbors::brute_force::knn(handle, input_vec, make_device_matrix_view(X2, n_query_rows, d), make_device_matrix_view(inds, n_query_rows, k), make_device_matrix_view(dists, n_query_rows, k), metric); } struct ToRadians { __device__ __host__ float operator()(float a) { return a * (CUDART_PI_F / 180.0); } }; template <typename value_int = std::uint32_t> struct BallCoverInputs { value_int k; value_int n_rows; value_int n_cols; float weight; value_int n_query; cuvs::distance::DistanceType metric; }; template <typename value_idx, typename value_t, typename value_int = std::uint32_t> class BallCoverKNNQueryTest : public ::testing::TestWithParam<BallCoverInputs<value_int>> { protected: void basicTest() { params = ::testing::TestWithParam<BallCoverInputs<value_int>>::GetParam(); raft::resources handle; uint32_t k = params.k; uint32_t n_centers = 25; float weight = params.weight; auto metric = params.metric; rmm::device_uvector<value_t> X(params.n_rows * params.n_cols, resource::get_cuda_stream(handle)); rmm::device_uvector<uint32_t> Y(params.n_rows, resource::get_cuda_stream(handle)); // Make sure the train and query sets are completely disjoint rmm::device_uvector<value_t> X2(params.n_query * params.n_cols, resource::get_cuda_stream(handle)); rmm::device_uvector<uint32_t> Y2(params.n_query, resource::get_cuda_stream(handle)); raft::random::make_blobs(X.data(), Y.data(), params.n_rows, params.n_cols, n_centers, resource::get_cuda_stream(handle)); raft::random::make_blobs(X2.data(), Y2.data(), params.n_query, params.n_cols, n_centers, resource::get_cuda_stream(handle)); rmm::device_uvector<value_idx> d_ref_I(params.n_query * k, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> d_ref_D(params.n_query * k, resource::get_cuda_stream(handle)); if (metric == cuvs::distance::DistanceType::Haversine) { thrust::transform( resource::get_thrust_policy(handle), X.data(), X.data() + X.size(), X.data(), ToRadians()); thrust::transform(resource::get_thrust_policy(handle), X2.data(), X2.data() + X2.size(), X2.data(), ToRadians()); } compute_bfknn(handle, X.data(), X2.data(), params.n_rows, params.n_query, params.n_cols, k, metric, d_ref_D.data(), d_ref_I.data()); resource::sync_stream(handle); // Allocate predicted arrays rmm::device_uvector<value_idx> d_pred_I(params.n_query * k, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> d_pred_D(params.n_query * k, resource::get_cuda_stream(handle)); auto X_view = raft::make_device_matrix_view<value_t, value_int>(X.data(), params.n_rows, params.n_cols); auto X2_view = raft::make_device_matrix_view<const value_t, value_int>( (const value_t*)X2.data(), params.n_query, params.n_cols); auto d_pred_I_view = raft::make_device_matrix_view<value_idx, value_int>(d_pred_I.data(), params.n_query, k); auto d_pred_D_view = raft::make_device_matrix_view<value_t, value_int>(d_pred_D.data(), params.n_query, k); BallCoverIndex<value_idx, value_t, value_int, value_int> index(handle, X_view, metric); build_index(handle, index); knn_query(handle, index, X2_view, d_pred_I_view, d_pred_D_view, k, true); resource::sync_stream(handle); // What we really want are for the distances to match exactly. The // indices may or may not match exactly, depending upon the ordering which // can be nondeterministic. rmm::device_uvector<uint32_t> discrepancies(params.n_query, resource::get_cuda_stream(handle)); thrust::fill(resource::get_thrust_policy(handle), discrepancies.data(), discrepancies.data() + discrepancies.size(), 0); // int res = count_discrepancies(d_ref_I.data(), d_pred_I.data(), d_ref_D.data(), d_pred_D.data(), params.n_query, k, discrepancies.data(), resource::get_cuda_stream(handle)); ASSERT_TRUE(res == 0); } void SetUp() override {} void TearDown() override {} protected: uint32_t d = 2; BallCoverInputs<value_int> params; }; template <typename value_idx, typename value_t, typename value_int = std::uint32_t> class BallCoverAllKNNTest : public ::testing::TestWithParam<BallCoverInputs<value_int>> { protected: void basicTest() { params = ::testing::TestWithParam<BallCoverInputs<value_int>>::GetParam(); raft::resources handle; uint32_t k = params.k; uint32_t n_centers = 25; float weight = params.weight; auto metric = params.metric; rmm::device_uvector<value_t> X(params.n_rows * params.n_cols, resource::get_cuda_stream(handle)); rmm::device_uvector<uint32_t> Y(params.n_rows, resource::get_cuda_stream(handle)); raft::random::make_blobs(X.data(), Y.data(), params.n_rows, params.n_cols, n_centers, resource::get_cuda_stream(handle)); rmm::device_uvector<value_idx> d_ref_I(params.n_rows * k, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> d_ref_D(params.n_rows * k, resource::get_cuda_stream(handle)); auto X_view = raft::make_device_matrix_view<const value_t, value_int>( (const value_t*)X.data(), params.n_rows, params.n_cols); if (metric == cuvs::distance::DistanceType::Haversine) { thrust::transform( resource::get_thrust_policy(handle), X.data(), X.data() + X.size(), X.data(), ToRadians()); } compute_bfknn(handle, X.data(), X.data(), params.n_rows, params.n_rows, params.n_cols, k, metric, d_ref_D.data(), d_ref_I.data()); resource::sync_stream(handle); // Allocate predicted arrays rmm::device_uvector<value_idx> d_pred_I(params.n_rows * k, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> d_pred_D(params.n_rows * k, resource::get_cuda_stream(handle)); auto d_pred_I_view = raft::make_device_matrix_view<value_idx, value_int>(d_pred_I.data(), params.n_rows, k); auto d_pred_D_view = raft::make_device_matrix_view<value_t, value_int>(d_pred_D.data(), params.n_rows, k); BallCoverIndex<value_idx, value_t> index(handle, X_view, metric); all_knn_query(handle, index, d_pred_I_view, d_pred_D_view, k, true); resource::sync_stream(handle); // What we really want are for the distances to match exactly. The // indices may or may not match exactly, depending upon the ordering which // can be nondeterministic. rmm::device_uvector<uint32_t> discrepancies(params.n_rows, resource::get_cuda_stream(handle)); thrust::fill(resource::get_thrust_policy(handle), discrepancies.data(), discrepancies.data() + discrepancies.size(), 0); // uint32_t res = count_discrepancies(d_ref_I.data(), d_pred_I.data(), d_ref_D.data(), d_pred_D.data(), params.n_rows, k, discrepancies.data(), resource::get_cuda_stream(handle)); // TODO: There seem to be discrepancies here only when // the entire test suite is executed. // Ref: https://github.com/rapidsai/raft/issues/ // 1-5 mismatches in 8000 samples is 0.0125% - 0.0625% ASSERT_TRUE(res <= 5); } void SetUp() override {} void TearDown() override {} protected: BallCoverInputs<value_int> params; }; typedef BallCoverAllKNNTest<int64_t, float> BallCoverAllKNNTestF; typedef BallCoverKNNQueryTest<int64_t, float> BallCoverKNNQueryTestF; const std::vector<BallCoverInputs<std::uint32_t>> ballcover_inputs = { {11, 5000, 2, 1.0, 10000, cuvs::distance::DistanceType::Haversine}, {25, 10000, 2, 1.0, 5000, cuvs::distance::DistanceType::Haversine}, {2, 10000, 2, 1.0, 5000, cuvs::distance::DistanceType::L2SqrtUnexpanded}, {2, 5000, 2, 1.0, 10000, cuvs::distance::DistanceType::Haversine}, {11, 10000, 2, 1.0, 5000, cuvs::distance::DistanceType::L2SqrtUnexpanded}, {25, 5000, 2, 1.0, 10000, cuvs::distance::DistanceType::L2SqrtUnexpanded}, {5, 8000, 3, 1.0, 10000, cuvs::distance::DistanceType::L2SqrtUnexpanded}, {11, 6000, 3, 1.0, 10000, cuvs::distance::DistanceType::L2SqrtUnexpanded}, {25, 10000, 3, 1.0, 5000, cuvs::distance::DistanceType::L2SqrtUnexpanded}}; INSTANTIATE_TEST_CASE_P(BallCoverAllKNNTest, BallCoverAllKNNTestF, ::testing::ValuesIn(ballcover_inputs)); INSTANTIATE_TEST_CASE_P(BallCoverKNNQueryTest, BallCoverKNNQueryTestF, ::testing::ValuesIn(ballcover_inputs)); TEST_P(BallCoverAllKNNTestF, Fit) { basicTest(); } TEST_P(BallCoverKNNQueryTestF, Fit) { basicTest(); } } // namespace cuvs::neighbors::ball_cover
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/neighbors/refine.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "ann_utils.cuh" #include <raft/core/resource/cuda_stream.hpp> #include <cuvs_internal/neighbors/refine_helper.cuh> #include <cuvs/distance/distance_types.hpp> #include <cuvs/neighbors/detail/refine.cuh> #include <cuvs/neighbors/refine.cuh> #include <cuvs/spatial/knn/ann.cuh> #include <raft/core/logger.hpp> #include <raft/core/resources.hpp> #include <raft/util/itertools.hpp> #include <rmm/cuda_stream_view.hpp> #include <gtest/gtest.h> #include <vector> namespace cuvs::neighbors { template <typename DataT, typename DistanceT, typename IdxT> class RefineTest : public ::testing::TestWithParam<RefineInputs<IdxT>> { public: RefineTest() : stream_(resource::get_cuda_stream(handle_)), data(handle_, ::testing::TestWithParam<RefineInputs<IdxT>>::GetParam()) { } protected: public: // tamas remove void testRefine() { std::vector<IdxT> indices(data.p.n_queries * data.p.k); std::vector<DistanceT> distances(data.p.n_queries * data.p.k); if (data.p.host_data) { cuvs::neighbors::refine<IdxT, DataT, DistanceT, IdxT>(handle_, data.dataset_host.view(), data.queries_host.view(), data.candidates_host.view(), data.refined_indices_host.view(), data.refined_distances_host.view(), data.p.metric); raft::copy(indices.data(), data.refined_indices_host.data_handle(), data.refined_indices_host.size(), stream_); raft::copy(distances.data(), data.refined_distances_host.data_handle(), data.refined_distances_host.size(), stream_); } else { cuvs::neighbors::refine<IdxT, DataT, DistanceT, IdxT>(handle_, data.dataset.view(), data.queries.view(), data.candidates.view(), data.refined_indices.view(), data.refined_distances.view(), data.p.metric); update_host(distances.data(), data.refined_distances.data_handle(), data.refined_distances.size(), stream_); update_host( indices.data(), data.refined_indices.data_handle(), data.refined_indices.size(), stream_); } resource::sync_stream(handle_); double min_recall = 1; ASSERT_TRUE(cuvs::neighbors::eval_neighbours(data.true_refined_indices_host, indices, data.true_refined_distances_host, distances, data.p.n_queries, data.p.k, 0.001, min_recall)); } public: raft::resources handle_; rmm::cuda_stream_view stream_; RefineHelper<DataT, DistanceT, IdxT> data; }; const std::vector<RefineInputs<int64_t>> inputs = raft::util::itertools::product<RefineInputs<int64_t>>( {static_cast<int64_t>(137)}, {static_cast<int64_t>(1000)}, {static_cast<int64_t>(16)}, {static_cast<int64_t>(1), static_cast<int64_t>(10), static_cast<int64_t>(33)}, {static_cast<int64_t>(33)}, {cuvs::distance::DistanceType::L2Expanded, cuvs::distance::DistanceType::InnerProduct}, {false, true}); typedef RefineTest<float, float, std::int64_t> RefineTestF; TEST_P(RefineTestF, AnnRefine) { this->testRefine(); } INSTANTIATE_TEST_CASE_P(RefineTest, RefineTestF, ::testing::ValuesIn(inputs)); typedef RefineTest<uint8_t, float, std::int64_t> RefineTestF_uint8; TEST_P(RefineTestF_uint8, AnnRefine) { this->testRefine(); } INSTANTIATE_TEST_CASE_P(RefineTest, RefineTestF_uint8, ::testing::ValuesIn(inputs)); typedef RefineTest<int8_t, float, std::int64_t> RefineTestF_int8; TEST_P(RefineTestF_int8, AnnRefine) { this->testRefine(); } INSTANTIATE_TEST_CASE_P(RefineTest, RefineTestF_int8, ::testing::ValuesIn(inputs)); } // namespace cuvs::neighbors
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_cagra.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #undef RAFT_EXPLICIT_INSTANTIATE_ONLY // Search with filter instantiation #include "../test_utils.cuh" #include "ann_utils.cuh" #include <raft/core/resource/cuda_stream.hpp> #include <cuvs_internal/neighbors/naive_knn.cuh> #include <cuvs/distance/distance_types.hpp> #include <cuvs/neighbors/cagra.cuh> #include <cuvs/neighbors/cagra_serialize.cuh> #include <cuvs/neighbors/sample_filter.cuh> #include <raft/core/device_mdspan.hpp> #include <raft/core/device_resources.hpp> #include <raft/core/logger.hpp> #include <raft/linalg/add.cuh> #include <raft/random/rng.cuh> #include <raft/util/itertools.hpp> #include <rmm/device_buffer.hpp> #include <gtest/gtest.h> #include <thrust/sequence.h> #include <cstddef> #include <iostream> #include <string> #include <vector> namespace cuvs::neighbors::cagra { namespace { /* A filter that excludes all indices below `offset`. */ struct test_cagra_sample_filter { static constexpr unsigned offset = 300; inline _RAFT_HOST_DEVICE auto operator()( // query index const uint32_t query_ix, // the index of the current sample inside the current inverted list const uint32_t sample_ix) const { return sample_ix >= offset; } }; // For sort_knn_graph test template <typename IdxT> void RandomSuffle(raft::host_matrix_view<IdxT, int64_t> index) { for (IdxT i = 0; i < index.extent(0); i++) { uint64_t rand = i; IdxT* const row_ptr = index.data_handle() + i * index.extent(1); for (unsigned j = 0; j < index.extent(1); j++) { // Swap two indices at random rand = cuvs::neighbors::cagra::detail::device::xorshift64(rand); const auto i0 = rand % index.extent(1); rand = cuvs::neighbors::cagra::detail::device::xorshift64(rand); const auto i1 = rand % index.extent(1); const auto tmp = row_ptr[i0]; row_ptr[i0] = row_ptr[i1]; row_ptr[i1] = tmp; } } } template <typename DistanceT, typename DatatT, typename IdxT> testing::AssertionResult CheckOrder(raft::host_matrix_view<IdxT, int64_t> index_test, raft::host_matrix_view<DatatT, int64_t> dataset) { for (IdxT i = 0; i < index_test.extent(0); i++) { const DatatT* const base_vec = dataset.data_handle() + i * dataset.extent(1); const IdxT* const index_row = index_test.data_handle() + i * index_test.extent(1); DistanceT prev_distance = 0; for (unsigned j = 0; j < index_test.extent(1) - 1; j++) { const DatatT* const target_vec = dataset.data_handle() + index_row[j] * dataset.extent(1); DistanceT distance = 0; for (unsigned l = 0; l < dataset.extent(1); l++) { const auto diff = static_cast<DistanceT>(target_vec[l]) - static_cast<DistanceT>(base_vec[l]); distance += diff * diff; } if (prev_distance > distance) { return testing::AssertionFailure() << "Wrong index order (row = " << i << ", neighbor_id = " << j << "). (distance[neighbor_id-1] = " << prev_distance << "should be larger than distance[neighbor_id] = " << distance << ")"; } prev_distance = distance; } } return testing::AssertionSuccess(); } // Generate dataset to ensure no rounding error occurs in the norm computation of any two vectors. // When testing the CAGRA index sorting function, rounding errors can affect the norm and alter the // order of the index. To ensure the accuracy of the test, we utilize the dataset. The generation // method is based on the error-free transformation (EFT) method. RAFT_KERNEL GenerateRoundingErrorFreeDataset_kernel(float* const ptr, const uint32_t size, const uint32_t resolution) { const auto tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= size) { return; } const float u32 = *reinterpret_cast<const uint32_t*>(ptr + tid); ptr[tid] = u32 / resolution; } void GenerateRoundingErrorFreeDataset(const raft::resources& handle, float* const ptr, const uint32_t n_row, const uint32_t dim, raft::random::RngState& rng) { auto cuda_stream = resource::get_cuda_stream(handle); const uint32_t size = n_row * dim; const uint32_t block_size = 256; const uint32_t grid_size = (size + block_size - 1) / block_size; const uint32_t resolution = 1u << static_cast<unsigned>(std::floor((24 - std::log2(dim)) / 2)); raft::random::uniformInt(handle, rng, reinterpret_cast<uint32_t*>(ptr), size, 0u, resolution - 1); GenerateRoundingErrorFreeDataset_kernel<<<grid_size, block_size, 0, cuda_stream>>>( ptr, size, resolution); } } // namespace struct AnnCagraInputs { int n_queries; int n_rows; int dim; int k; graph_build_algo build_algo; search_algo algo; int max_queries; int team_size; int itopk_size; int search_width; cuvs::distance::DistanceType metric; bool host_dataset; bool include_serialized_dataset; // std::optional<double> double min_recall; // = std::nullopt; }; inline ::std::ostream& operator<<(::std::ostream& os, const AnnCagraInputs& p) { std::vector<std::string> algo = {"single-cta", "multi_cta", "multi_kernel", "auto"}; std::vector<std::string> build_algo = {"IVF_PQ", "NN_DESCENT"}; os << "{n_queries=" << p.n_queries << ", dataset shape=" << p.n_rows << "x" << p.dim << ", k=" << p.k << ", " << algo.at((int)p.algo) << ", max_queries=" << p.max_queries << ", itopk_size=" << p.itopk_size << ", search_width=" << p.search_width << ", metric=" << static_cast<int>(p.metric) << (p.host_dataset ? ", host" : ", device") << ", build_algo=" << build_algo.at((int)p.build_algo) << '}' << std::endl; return os; } template <typename DistanceT, typename DataT, typename IdxT> class AnnCagraTest : public ::testing::TestWithParam<AnnCagraInputs> { public: AnnCagraTest() : stream_(resource::get_cuda_stream(handle_)), ps(::testing::TestWithParam<AnnCagraInputs>::GetParam()), database(0, stream_), search_queries(0, stream_) { } protected: void testCagra() { size_t queries_size = ps.n_queries * ps.k; std::vector<IdxT> indices_Cagra(queries_size); std::vector<IdxT> indices_naive(queries_size); std::vector<DistanceT> distances_Cagra(queries_size); std::vector<DistanceT> distances_naive(queries_size); { rmm::device_uvector<DistanceT> distances_naive_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_naive_dev(queries_size, stream_); naive_knn<DistanceT, DataT, IdxT>(handle_, distances_naive_dev.data(), indices_naive_dev.data(), search_queries.data(), database.data(), ps.n_queries, ps.n_rows, ps.dim, ps.k, ps.metric); update_host(distances_naive.data(), distances_naive_dev.data(), queries_size, stream_); update_host(indices_naive.data(), indices_naive_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } { rmm::device_uvector<DistanceT> distances_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_dev(queries_size, stream_); { cagra::index_params index_params; index_params.metric = ps.metric; // Note: currently ony the cagra::index_params metric is // not used for knn_graph building. index_params.build_algo = ps.build_algo; cagra::search_params search_params; search_params.algo = ps.algo; search_params.max_queries = ps.max_queries; search_params.team_size = ps.team_size; auto database_view = raft::make_device_matrix_view<const DataT, int64_t>( (const DataT*)database.data(), ps.n_rows, ps.dim); { cagra::index<DataT, IdxT> index(handle_); if (ps.host_dataset) { auto database_host = raft::make_host_matrix<DataT, int64_t>(ps.n_rows, ps.dim); raft::copy(database_host.data_handle(), database.data(), database.size(), stream_); auto database_host_view = raft::make_host_matrix_view<const DataT, int64_t>( (const DataT*)database_host.data_handle(), ps.n_rows, ps.dim); index = cagra::build<DataT, IdxT>(handle_, index_params, database_host_view); } else { index = cagra::build<DataT, IdxT>(handle_, index_params, database_view); }; cagra::serialize(handle_, "cagra_index", index, ps.include_serialized_dataset); } auto index = cagra::deserialize<DataT, IdxT>(handle_, "cagra_index"); if (!ps.include_serialized_dataset) { index.update_dataset(handle_, database_view); } auto search_queries_view = raft::make_device_matrix_view<const DataT, int64_t>( search_queries.data(), ps.n_queries, ps.dim); auto indices_out_view = raft::make_device_matrix_view<IdxT, int64_t>(indices_dev.data(), ps.n_queries, ps.k); auto dists_out_view = raft::make_device_matrix_view<DistanceT, int64_t>( distances_dev.data(), ps.n_queries, ps.k); cagra::search( handle_, search_params, index, search_queries_view, indices_out_view, dists_out_view); update_host(distances_Cagra.data(), distances_dev.data(), queries_size, stream_); update_host(indices_Cagra.data(), indices_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } // for (int i = 0; i < min(ps.n_queries, 10); i++) { // // std::cout << "query " << i << std::end; // print_vector("T", indices_naive.data() + i * ps.k, ps.k, std::cout); // print_vector("C", indices_Cagra.data() + i * ps.k, ps.k, std::cout); // print_vector("T", distances_naive.data() + i * ps.k, ps.k, std::cout); // print_vector("C", distances_Cagra.data() + i * ps.k, ps.k, std::cout); // } double min_recall = ps.min_recall; EXPECT_TRUE(eval_neighbours(indices_naive, indices_Cagra, distances_naive, distances_Cagra, ps.n_queries, ps.k, 0.003, min_recall)); EXPECT_TRUE(eval_distances(handle_, database.data(), search_queries.data(), indices_dev.data(), distances_dev.data(), ps.n_rows, ps.dim, ps.n_queries, ps.k, ps.metric, 1.0e-4)); } } void SetUp() override { database.resize(((size_t)ps.n_rows) * ps.dim, stream_); search_queries.resize(ps.n_queries * ps.dim, stream_); raft::random::RngState r(1234ULL); if constexpr (std::is_same<DataT, float>{}) { raft::random::normal(handle_, r, database.data(), ps.n_rows * ps.dim, DataT(0.1), DataT(2.0)); raft::random::normal( handle_, r, search_queries.data(), ps.n_queries * ps.dim, DataT(0.1), DataT(2.0)); } else { raft::random::uniformInt( handle_, r, database.data(), ps.n_rows * ps.dim, DataT(1), DataT(20)); raft::random::uniformInt( handle_, r, search_queries.data(), ps.n_queries * ps.dim, DataT(1), DataT(20)); } resource::sync_stream(handle_); } void TearDown() override { resource::sync_stream(handle_); database.resize(0, stream_); search_queries.resize(0, stream_); } private: raft::resources handle_; rmm::cuda_stream_view stream_; AnnCagraInputs ps; rmm::device_uvector<DataT> database; rmm::device_uvector<DataT> search_queries; }; template <typename DistanceT, typename DataT, typename IdxT> class AnnCagraSortTest : public ::testing::TestWithParam<AnnCagraInputs> { public: AnnCagraSortTest() : ps(::testing::TestWithParam<AnnCagraInputs>::GetParam()), database(0, handle_.get_stream()) { } protected: void testCagraSort() { { // Step 1: Build a sorted KNN graph by CAGRA knn build auto database_view = raft::make_device_matrix_view<const DataT, int64_t>( (const DataT*)database.data(), ps.n_rows, ps.dim); auto database_host = raft::make_host_matrix<DataT, int64_t>(ps.n_rows, ps.dim); raft::copy( database_host.data_handle(), database.data(), database.size(), handle_.get_stream()); auto database_host_view = raft::make_host_matrix_view<const DataT, int64_t>( (const DataT*)database_host.data_handle(), ps.n_rows, ps.dim); cagra::index_params index_params; auto knn_graph = raft::make_host_matrix<IdxT, int64_t>(ps.n_rows, index_params.intermediate_graph_degree); if (ps.build_algo == graph_build_algo::IVF_PQ) { if (ps.host_dataset) { cagra::build_knn_graph<DataT, IdxT>(handle_, database_host_view, knn_graph.view()); } else { cagra::build_knn_graph<DataT, IdxT>(handle_, database_view, knn_graph.view()); } } else { auto nn_descent_idx_params = experimental::nn_descent::index_params{}; nn_descent_idx_params.graph_degree = index_params.intermediate_graph_degree; nn_descent_idx_params.intermediate_graph_degree = index_params.intermediate_graph_degree; if (ps.host_dataset) { cagra::build_knn_graph<DataT, IdxT>( handle_, database_host_view, knn_graph.view(), nn_descent_idx_params); } else { cagra::build_knn_graph<DataT, IdxT>( handle_, database_host_view, knn_graph.view(), nn_descent_idx_params); } } handle_.sync_stream(); ASSERT_TRUE(CheckOrder<DistanceT>(knn_graph.view(), database_host.view())); RandomSuffle(knn_graph.view()); cagra::sort_knn_graph(handle_, database_view, knn_graph.view()); handle_.sync_stream(); ASSERT_TRUE(CheckOrder<DistanceT>(knn_graph.view(), database_host.view())); } } void SetUp() override { database.resize(((size_t)ps.n_rows) * ps.dim, handle_.get_stream()); raft::random::RngState r(1234ULL); if constexpr (std::is_same<DataT, float>{}) { GenerateRoundingErrorFreeDataset(handle_, database.data(), ps.n_rows, ps.dim, r); } else { raft::random::uniformInt( handle_, r, database.data(), ps.n_rows * ps.dim, DataT(1), DataT(20)); } handle_.sync_stream(); } void TearDown() override { handle_.sync_stream(); database.resize(0, handle_.get_stream()); } private: raft::device_resources handle_; AnnCagraInputs ps; rmm::device_uvector<DataT> database; }; template <typename DistanceT, typename DataT, typename IdxT> class AnnCagraFilterTest : public ::testing::TestWithParam<AnnCagraInputs> { public: AnnCagraFilterTest() : stream_(resource::get_cuda_stream(handle_)), ps(::testing::TestWithParam<AnnCagraInputs>::GetParam()), database(0, stream_), search_queries(0, stream_) { } protected: void testCagraFilter() { size_t queries_size = ps.n_queries * ps.k; std::vector<IdxT> indices_Cagra(queries_size); std::vector<IdxT> indices_naive(queries_size); std::vector<DistanceT> distances_Cagra(queries_size); std::vector<DistanceT> distances_naive(queries_size); { rmm::device_uvector<DistanceT> distances_naive_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_naive_dev(queries_size, stream_); auto* database_filtered_ptr = database.data() + test_cagra_sample_filter::offset * ps.dim; naive_knn<DistanceT, DataT, IdxT>(handle_, distances_naive_dev.data(), indices_naive_dev.data(), search_queries.data(), database_filtered_ptr, ps.n_queries, ps.n_rows - test_cagra_sample_filter::offset, ps.dim, ps.k, ps.metric); raft::linalg::addScalar(indices_naive_dev.data(), indices_naive_dev.data(), IdxT(test_cagra_sample_filter::offset), queries_size, stream_); update_host(distances_naive.data(), distances_naive_dev.data(), queries_size, stream_); update_host(indices_naive.data(), indices_naive_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } { rmm::device_uvector<DistanceT> distances_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_dev(queries_size, stream_); { cagra::index_params index_params; index_params.metric = ps.metric; // Note: currently ony the cagra::index_params metric is // not used for knn_graph building. index_params.nn_descent_niter = 50; cagra::search_params search_params; search_params.algo = ps.algo; search_params.max_queries = ps.max_queries; search_params.team_size = ps.team_size; search_params.hashmap_mode = cagra::hash_mode::HASH; auto database_view = raft::make_device_matrix_view<const DataT, int64_t>( (const DataT*)database.data(), ps.n_rows, ps.dim); cagra::index<DataT, IdxT> index(handle_); if (ps.host_dataset) { auto database_host = raft::make_host_matrix<DataT, int64_t>(ps.n_rows, ps.dim); raft::copy(database_host.data_handle(), database.data(), database.size(), stream_); auto database_host_view = raft::make_host_matrix_view<const DataT, int64_t>( (const DataT*)database_host.data_handle(), ps.n_rows, ps.dim); index = cagra::build<DataT, IdxT>(handle_, index_params, database_host_view); } else { index = cagra::build<DataT, IdxT>(handle_, index_params, database_view); } if (!ps.include_serialized_dataset) { index.update_dataset(handle_, database_view); } auto search_queries_view = raft::make_device_matrix_view<const DataT, int64_t>( search_queries.data(), ps.n_queries, ps.dim); auto indices_out_view = raft::make_device_matrix_view<IdxT, int64_t>(indices_dev.data(), ps.n_queries, ps.k); auto dists_out_view = raft::make_device_matrix_view<DistanceT, int64_t>( distances_dev.data(), ps.n_queries, ps.k); cagra::search_with_filtering(handle_, search_params, index, search_queries_view, indices_out_view, dists_out_view, test_cagra_sample_filter()); update_host(distances_Cagra.data(), distances_dev.data(), queries_size, stream_); update_host(indices_Cagra.data(), indices_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } // Test filter bool unacceptable_node = false; for (int q = 0; q < ps.n_queries; q++) { for (int i = 0; i < ps.k; i++) { const auto n = indices_Cagra[q * ps.k + i]; unacceptable_node = unacceptable_node | !test_cagra_sample_filter()(q, n); } } EXPECT_FALSE(unacceptable_node); double min_recall = ps.min_recall; EXPECT_TRUE(eval_neighbours(indices_naive, indices_Cagra, distances_naive, distances_Cagra, ps.n_queries, ps.k, 0.003, min_recall)); EXPECT_TRUE(eval_distances(handle_, database.data(), search_queries.data(), indices_dev.data(), distances_dev.data(), ps.n_rows, ps.dim, ps.n_queries, ps.k, ps.metric, 1.0e-4)); } } void testCagraRemoved() { size_t queries_size = ps.n_queries * ps.k; std::vector<IdxT> indices_Cagra(queries_size); std::vector<IdxT> indices_naive(queries_size); std::vector<DistanceT> distances_Cagra(queries_size); std::vector<DistanceT> distances_naive(queries_size); { rmm::device_uvector<DistanceT> distances_naive_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_naive_dev(queries_size, stream_); auto* database_filtered_ptr = database.data() + test_cagra_sample_filter::offset * ps.dim; naive_knn<DistanceT, DataT, IdxT>(handle_, distances_naive_dev.data(), indices_naive_dev.data(), search_queries.data(), database_filtered_ptr, ps.n_queries, ps.n_rows - test_cagra_sample_filter::offset, ps.dim, ps.k, ps.metric); raft::linalg::addScalar(indices_naive_dev.data(), indices_naive_dev.data(), IdxT(test_cagra_sample_filter::offset), queries_size, stream_); update_host(distances_naive.data(), distances_naive_dev.data(), queries_size, stream_); update_host(indices_naive.data(), indices_naive_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } { rmm::device_uvector<DistanceT> distances_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_dev(queries_size, stream_); { cagra::index_params index_params; index_params.metric = ps.metric; // Note: currently ony the cagra::index_params metric is // not used for knn_graph building. index_params.nn_descent_niter = 50; cagra::search_params search_params; search_params.algo = ps.algo; search_params.max_queries = ps.max_queries; search_params.team_size = ps.team_size; search_params.hashmap_mode = cagra::hash_mode::HASH; auto database_view = raft::make_device_matrix_view<const DataT, int64_t>( (const DataT*)database.data(), ps.n_rows, ps.dim); cagra::index<DataT, IdxT> index(handle_); if (ps.host_dataset) { auto database_host = raft::make_host_matrix<DataT, int64_t>(ps.n_rows, ps.dim); raft::copy(database_host.data_handle(), database.data(), database.size(), stream_); auto database_host_view = raft::make_host_matrix_view<const DataT, int64_t>( (const DataT*)database_host.data_handle(), ps.n_rows, ps.dim); index = cagra::build<DataT, IdxT>(handle_, index_params, database_host_view); } else { index = cagra::build<DataT, IdxT>(handle_, index_params, database_view); } if (!ps.include_serialized_dataset) { index.update_dataset(handle_, database_view); } auto search_queries_view = raft::make_device_matrix_view<const DataT, int64_t>( search_queries.data(), ps.n_queries, ps.dim); auto indices_out_view = raft::make_device_matrix_view<IdxT, int64_t>(indices_dev.data(), ps.n_queries, ps.k); auto dists_out_view = raft::make_device_matrix_view<DistanceT, int64_t>( distances_dev.data(), ps.n_queries, ps.k); auto removed_indices = raft::make_device_vector<IdxT, int64_t>(handle_, test_cagra_sample_filter::offset); thrust::sequence( resource::get_thrust_policy(handle_), thrust::device_pointer_cast(removed_indices.data_handle()), thrust::device_pointer_cast(removed_indices.data_handle() + removed_indices.extent(0))); resource::sync_stream(handle_); raft::core::bitset<std::uint32_t, IdxT> removed_indices_bitset( handle_, removed_indices.view(), ps.n_rows); cagra::search_with_filtering( handle_, search_params, index, search_queries_view, indices_out_view, dists_out_view, cuvs::neighbors::filtering::bitset_filter(removed_indices_bitset.view())); update_host(distances_Cagra.data(), distances_dev.data(), queries_size, stream_); update_host(indices_Cagra.data(), indices_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } double min_recall = ps.min_recall; EXPECT_TRUE(eval_neighbours(indices_naive, indices_Cagra, distances_naive, distances_Cagra, ps.n_queries, ps.k, 0.003, min_recall)); EXPECT_TRUE(eval_distances(handle_, database.data(), search_queries.data(), indices_dev.data(), distances_dev.data(), ps.n_rows, ps.dim, ps.n_queries, ps.k, ps.metric, 1.0e-4)); } } void SetUp() override { database.resize(((size_t)ps.n_rows) * ps.dim, stream_); search_queries.resize(ps.n_queries * ps.dim, stream_); raft::random::RngState r(1234ULL); if constexpr (std::is_same<DataT, float>{}) { raft::random::normal(handle_, r, database.data(), ps.n_rows * ps.dim, DataT(0.1), DataT(2.0)); raft::random::normal( handle_, r, search_queries.data(), ps.n_queries * ps.dim, DataT(0.1), DataT(2.0)); } else { raft::random::uniformInt( handle_, r, database.data(), ps.n_rows * ps.dim, DataT(1), DataT(20)); raft::random::uniformInt( handle_, r, search_queries.data(), ps.n_queries * ps.dim, DataT(1), DataT(20)); } resource::sync_stream(handle_); } void TearDown() override { resource::sync_stream(handle_); database.resize(0, stream_); search_queries.resize(0, stream_); } private: raft::resources handle_; rmm::cuda_stream_view stream_; AnnCagraInputs ps; rmm::device_uvector<DataT> database; rmm::device_uvector<DataT> search_queries; }; inline std::vector<AnnCagraInputs> generate_inputs() { // TODO(tfeher): test MULTI_CTA kernel with search_width > 1 to allow multiple CTA per queries std::vector<AnnCagraInputs> inputs = raft::util::itertools::product<AnnCagraInputs>( {100}, {1000}, {1, 8, 17}, {1, 16}, // k {graph_build_algo::IVF_PQ, graph_build_algo::NN_DESCENT}, {search_algo::SINGLE_CTA, search_algo::MULTI_CTA, search_algo::MULTI_KERNEL}, {0, 1, 10, 100}, // query size {0}, {256}, {1}, {cuvs::distance::DistanceType::L2Expanded}, {false}, {true}, {0.995}); auto inputs2 = raft::util::itertools::product<AnnCagraInputs>( {100}, {1000}, {1, 3, 5, 7, 8, 17, 64, 128, 137, 192, 256, 512, 619, 1024}, // dim {16}, // k {graph_build_algo::IVF_PQ, graph_build_algo::NN_DESCENT}, {search_algo::AUTO}, {10}, {0}, {64}, {1}, {cuvs::distance::DistanceType::L2Expanded}, {false}, {true}, {0.995}); inputs.insert(inputs.end(), inputs2.begin(), inputs2.end()); inputs2 = raft::util::itertools::product<AnnCagraInputs>( {100}, {1000}, {64}, {16}, {graph_build_algo::IVF_PQ, graph_build_algo::NN_DESCENT}, {search_algo::AUTO}, {10}, {0, 4, 8, 16, 32}, // team_size {64}, {1}, {cuvs::distance::DistanceType::L2Expanded}, {false}, {false}, {0.995}); inputs.insert(inputs.end(), inputs2.begin(), inputs2.end()); inputs2 = raft::util::itertools::product<AnnCagraInputs>( {100}, {1000}, {64}, {16}, {graph_build_algo::IVF_PQ, graph_build_algo::NN_DESCENT}, {search_algo::AUTO}, {10}, {0}, // team_size {32, 64, 128, 256, 512, 768}, {1}, {cuvs::distance::DistanceType::L2Expanded}, {false}, {true}, {0.995}); inputs.insert(inputs.end(), inputs2.begin(), inputs2.end()); inputs2 = raft::util::itertools::product<AnnCagraInputs>( {100}, {10000, 20000}, {32}, {10}, {graph_build_algo::IVF_PQ, graph_build_algo::NN_DESCENT}, {search_algo::AUTO}, {10}, {0}, // team_size {64}, {1}, {cuvs::distance::DistanceType::L2Expanded}, {false, true}, {false}, {0.995}); inputs.insert(inputs.end(), inputs2.begin(), inputs2.end()); return inputs; } const std::vector<AnnCagraInputs> inputs = generate_inputs(); } // namespace cuvs::neighbors::cagra
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_ivf_flat.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "../test_utils.cuh" #include "ann_utils.cuh" #include <cuvs/neighbors/ivf_flat_types.hpp> #include <cuvs/neighbors/ivf_list.hpp> #include <cuvs/neighbors/sample_filter.cuh> #include <raft/core/device_mdarray.hpp> #include <raft/core/host_mdarray.hpp> #include <raft/core/mdspan.hpp> #include <raft/core/mdspan_types.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/thrust_policy.hpp> #include <raft/linalg/map.cuh> #include <raft/util/cudart_utils.hpp> #include <raft/util/fast_int_div.cuh> #include <thrust/functional.h> #include <cuvs_internal/neighbors/naive_knn.cuh> #include <cuvs/distance/distance_types.hpp> #include <cuvs/neighbors/ivf_flat.cuh> #include <cuvs/neighbors/ivf_flat_helpers.cuh> #include <cuvs/spatial/knn/ann.cuh> #include <cuvs/spatial/knn/knn.cuh> #include <raft/core/device_mdspan.hpp> #include <raft/core/logger.hpp> #include <raft/matrix/gather.cuh> #include <raft/random/rng.cuh> #include <raft/stats/mean.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <gtest/gtest.h> #include <rmm/device_uvector.hpp> #include <thrust/sequence.h> #include <cstddef> #include <iostream> #include <vector> namespace cuvs::neighbors::ivf_flat { struct test_ivf_sample_filter { static constexpr unsigned offset = 300; }; template <typename IdxT> struct AnnIvfFlatInputs { IdxT num_queries; IdxT num_db_vecs; IdxT dim; IdxT k; IdxT nprobe; IdxT nlist; cuvs::distance::DistanceType metric; bool adaptive_centers; }; template <typename IdxT> ::std::ostream& operator<<(::std::ostream& os, const AnnIvfFlatInputs<IdxT>& p) { os << "{ " << p.num_queries << ", " << p.num_db_vecs << ", " << p.dim << ", " << p.k << ", " << p.nprobe << ", " << p.nlist << ", " << static_cast<int>(p.metric) << ", " << p.adaptive_centers << '}' << std::endl; return os; } template <typename T, typename DataT, typename IdxT> class AnnIVFFlatTest : public ::testing::TestWithParam<AnnIvfFlatInputs<IdxT>> { public: AnnIVFFlatTest() : stream_(resource::get_cuda_stream(handle_)), ps(::testing::TestWithParam<AnnIvfFlatInputs<IdxT>>::GetParam()), database(0, stream_), search_queries(0, stream_) { } void testIVFFlat() { size_t queries_size = ps.num_queries * ps.k; std::vector<IdxT> indices_ivfflat(queries_size); std::vector<IdxT> indices_naive(queries_size); std::vector<T> distances_ivfflat(queries_size); std::vector<T> distances_naive(queries_size); { rmm::device_uvector<T> distances_naive_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_naive_dev(queries_size, stream_); naive_knn<T, DataT, IdxT>(handle_, distances_naive_dev.data(), indices_naive_dev.data(), search_queries.data(), database.data(), ps.num_queries, ps.num_db_vecs, ps.dim, ps.k, ps.metric); update_host(distances_naive.data(), distances_naive_dev.data(), queries_size, stream_); update_host(indices_naive.data(), indices_naive_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } { // unless something is really wrong with clustering, this could serve as a lower bound on // recall double min_recall = static_cast<double>(ps.nprobe) / static_cast<double>(ps.nlist); rmm::device_uvector<T> distances_ivfflat_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_ivfflat_dev(queries_size, stream_); { // legacy interface cuvs::spatial::knn::IVFFlatParam ivfParams; ivfParams.nprobe = ps.nprobe; ivfParams.nlist = ps.nlist; cuvs::spatial::knn::knnIndex index; approx_knn_build_index(handle_, &index, dynamic_cast<cuvs::spatial::knn::knnIndexParam*>(&ivfParams), ps.metric, (IdxT)0, database.data(), ps.num_db_vecs, ps.dim); resource::sync_stream(handle_); approx_knn_search(handle_, distances_ivfflat_dev.data(), indices_ivfflat_dev.data(), &index, ps.k, search_queries.data(), ps.num_queries); update_host(distances_ivfflat.data(), distances_ivfflat_dev.data(), queries_size, stream_); update_host(indices_ivfflat.data(), indices_ivfflat_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } ASSERT_TRUE(eval_neighbours(indices_naive, indices_ivfflat, distances_naive, distances_ivfflat, ps.num_queries, ps.k, 0.001, min_recall)); { ivf_flat::index_params index_params; ivf_flat::search_params search_params; index_params.n_lists = ps.nlist; index_params.metric = ps.metric; index_params.adaptive_centers = ps.adaptive_centers; search_params.n_probes = ps.nprobe; index_params.add_data_on_build = false; index_params.kmeans_trainset_fraction = 0.5; index_params.metric_arg = 0; auto database_view = raft::make_device_matrix_view<const DataT, IdxT>( (const DataT*)database.data(), ps.num_db_vecs, ps.dim); auto idx = ivf_flat::build(handle_, index_params, database_view); rmm::device_uvector<IdxT> vector_indices(ps.num_db_vecs, stream_); thrust::sequence(resource::get_thrust_policy(handle_), thrust::device_pointer_cast(vector_indices.data()), thrust::device_pointer_cast(vector_indices.data() + ps.num_db_vecs)); resource::sync_stream(handle_); IdxT half_of_data = ps.num_db_vecs / 2; auto half_of_data_view = raft::make_device_matrix_view<const DataT, IdxT>( (const DataT*)database.data(), half_of_data, ps.dim); const std::optional<raft::device_vector_view<const IdxT, IdxT>> no_opt = std::nullopt; index<DataT, IdxT> index_2 = ivf_flat::extend(handle_, half_of_data_view, no_opt, idx); auto new_half_of_data_view = raft::make_device_matrix_view<const DataT, IdxT>( database.data() + half_of_data * ps.dim, IdxT(ps.num_db_vecs) - half_of_data, ps.dim); auto new_half_of_data_indices_view = raft::make_device_vector_view<const IdxT, IdxT>( vector_indices.data() + half_of_data, IdxT(ps.num_db_vecs) - half_of_data); ivf_flat::extend(handle_, new_half_of_data_view, std::make_optional<raft::device_vector_view<const IdxT, IdxT>>( new_half_of_data_indices_view), &index_2); auto search_queries_view = raft::make_device_matrix_view<const DataT, IdxT>( search_queries.data(), ps.num_queries, ps.dim); auto indices_out_view = raft::make_device_matrix_view<IdxT, IdxT>( indices_ivfflat_dev.data(), ps.num_queries, ps.k); auto dists_out_view = raft::make_device_matrix_view<T, IdxT>( distances_ivfflat_dev.data(), ps.num_queries, ps.k); ivf_flat::detail::serialize(handle_, "ivf_flat_index", index_2); auto index_loaded = ivf_flat::detail::deserialize<DataT, IdxT>(handle_, "ivf_flat_index"); ASSERT_EQ(index_2.size(), index_loaded.size()); ivf_flat::search(handle_, search_params, index_loaded, search_queries_view, indices_out_view, dists_out_view); update_host(distances_ivfflat.data(), distances_ivfflat_dev.data(), queries_size, stream_); update_host(indices_ivfflat.data(), indices_ivfflat_dev.data(), queries_size, stream_); resource::sync_stream(handle_); // Test the centroid invariants if (index_2.adaptive_centers()) { // The centers must be up-to-date with the corresponding data std::vector<uint32_t> list_sizes(index_2.n_lists()); std::vector<IdxT*> list_indices(index_2.n_lists()); rmm::device_uvector<float> centroid(ps.dim, stream_); raft::copy( list_sizes.data(), index_2.list_sizes().data_handle(), index_2.n_lists(), stream_); raft::copy( list_indices.data(), index_2.inds_ptrs().data_handle(), index_2.n_lists(), stream_); resource::sync_stream(handle_); for (uint32_t l = 0; l < index_2.n_lists(); l++) { if (list_sizes[l] == 0) continue; rmm::device_uvector<float> cluster_data(list_sizes[l] * ps.dim, stream_); cuvs::spatial::knn::detail::utils::copy_selected<float>((IdxT)list_sizes[l], (IdxT)ps.dim, database.data(), list_indices[l], (IdxT)ps.dim, cluster_data.data(), (IdxT)ps.dim, stream_); raft::stats::mean<float, uint32_t>( centroid.data(), cluster_data.data(), ps.dim, list_sizes[l], false, true, stream_); ASSERT_TRUE(raft::devArrMatch(index_2.centers().data_handle() + ps.dim * l, centroid.data(), ps.dim, raft::CompareApprox<float>(0.001), stream_)); } } else { // The centers must be immutable ASSERT_TRUE(raft::devArrMatch(index_2.centers().data_handle(), idx.centers().data_handle(), index_2.centers().size(), raft::Compare<float>(), stream_)); } } ASSERT_TRUE(eval_neighbours(indices_naive, indices_ivfflat, distances_naive, distances_ivfflat, ps.num_queries, ps.k, 0.001, min_recall)); } } void testPacker() { ivf_flat::index_params index_params; ivf_flat::search_params search_params; index_params.n_lists = ps.nlist; index_params.metric = ps.metric; index_params.adaptive_centers = false; search_params.n_probes = ps.nprobe; index_params.add_data_on_build = false; index_params.kmeans_trainset_fraction = 1.0; index_params.metric_arg = 0; auto database_view = raft::make_device_matrix_view<const DataT, IdxT>( (const DataT*)database.data(), ps.num_db_vecs, ps.dim); auto idx = ivf_flat::build(handle_, index_params, database_view); const std::optional<raft::device_vector_view<const IdxT, IdxT>> no_opt = std::nullopt; index<DataT, IdxT> extend_index = ivf_flat::extend(handle_, database_view, no_opt, idx); auto list_sizes = raft::make_host_vector<uint32_t>(idx.n_lists()); update_host(list_sizes.data_handle(), extend_index.list_sizes().data_handle(), extend_index.n_lists(), stream_); resource::sync_stream(handle_); auto& lists = idx.lists(); // conservative memory allocation for codepacking auto list_device_spec = list_spec<uint32_t, DataT, IdxT>{idx.dim(), false}; for (uint32_t label = 0; label < idx.n_lists(); label++) { uint32_t list_size = list_sizes.data_handle()[label]; ivf::resize_list(handle_, lists[label], list_device_spec, list_size, 0); } idx.recompute_internal_state(handle_); using interleaved_group = Pow2<kIndexGroupSize>; for (uint32_t label = 0; label < idx.n_lists(); label++) { uint32_t list_size = list_sizes.data_handle()[label]; if (list_size > 0) { uint32_t padded_list_size = interleaved_group::roundUp(list_size); uint32_t n_elems = padded_list_size * idx.dim(); auto list_data = lists[label]->data; auto list_inds = extend_index.lists()[label]->indices; // fetch the flat codes auto flat_codes = make_device_matrix<DataT, uint32_t>(handle_, list_size, idx.dim()); matrix::gather( handle_, make_device_matrix_view<const DataT, uint32_t>( (const DataT*)database.data(), static_cast<uint32_t>(ps.num_db_vecs), idx.dim()), make_device_vector_view<const IdxT, uint32_t>((const IdxT*)list_inds.data_handle(), list_size), flat_codes.view()); helpers::codepacker::pack<DataT, IdxT>( handle_, make_const_mdspan(flat_codes.view()), idx.veclen(), 0, list_data.view()); { auto mask = make_device_vector<bool>(handle_, n_elems); linalg::map_offset(handle_, mask.view(), [dim = idx.dim(), list_size, padded_list_size, chunk_size = util::FastIntDiv(idx.veclen())] __device__(auto i) { uint32_t max_group_offset = interleaved_group::roundDown(list_size); if (i < max_group_offset * dim) { return true; } uint32_t surplus = (i - max_group_offset * dim); uint32_t ingroup_id = interleaved_group::mod(surplus / chunk_size); return ingroup_id < (list_size - max_group_offset); }); // ensure that the correct number of indices are masked out ASSERT_TRUE(thrust::reduce(resource::get_thrust_policy(handle_), mask.data_handle(), mask.data_handle() + n_elems, 0) == list_size * ps.dim); auto packed_list_data = make_device_vector<DataT, uint32_t>(handle_, n_elems); linalg::map_offset(handle_, packed_list_data.view(), [mask = mask.data_handle(), list_data = list_data.data_handle()] __device__(uint32_t i) { if (mask[i]) return list_data[i]; return DataT{0}; }); auto extend_data = extend_index.lists()[label]->data; auto extend_data_filtered = make_device_vector<DataT, uint32_t>(handle_, n_elems); linalg::map_offset(handle_, extend_data_filtered.view(), [mask = mask.data_handle(), extend_data = extend_data.data_handle()] __device__(uint32_t i) { if (mask[i]) return extend_data[i]; return DataT{0}; }); ASSERT_TRUE(raft::devArrMatch(packed_list_data.data_handle(), extend_data_filtered.data_handle(), n_elems, raft::Compare<DataT>(), stream_)); } auto unpacked_flat_codes = make_device_matrix<DataT, uint32_t>(handle_, list_size, idx.dim()); helpers::codepacker::unpack<DataT, IdxT>( handle_, list_data.view(), idx.veclen(), 0, unpacked_flat_codes.view()); ASSERT_TRUE(raft::devArrMatch(flat_codes.data_handle(), unpacked_flat_codes.data_handle(), list_size * ps.dim, raft::Compare<DataT>(), stream_)); } } } void testFilter() { size_t queries_size = ps.num_queries * ps.k; std::vector<IdxT> indices_ivfflat(queries_size); std::vector<IdxT> indices_naive(queries_size); std::vector<T> distances_ivfflat(queries_size); std::vector<T> distances_naive(queries_size); { rmm::device_uvector<T> distances_naive_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_naive_dev(queries_size, stream_); auto* database_filtered_ptr = database.data() + test_ivf_sample_filter::offset * ps.dim; naive_knn<T, DataT, IdxT>(handle_, distances_naive_dev.data(), indices_naive_dev.data(), search_queries.data(), database_filtered_ptr, ps.num_queries, ps.num_db_vecs - test_ivf_sample_filter::offset, ps.dim, ps.k, ps.metric); raft::linalg::addScalar(indices_naive_dev.data(), indices_naive_dev.data(), IdxT(test_ivf_sample_filter::offset), queries_size, stream_); update_host(distances_naive.data(), distances_naive_dev.data(), queries_size, stream_); update_host(indices_naive.data(), indices_naive_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } { // unless something is really wrong with clustering, this could serve as a lower bound on // recall double min_recall = static_cast<double>(ps.nprobe) / static_cast<double>(ps.nlist); auto distances_ivfflat_dev = raft::make_device_matrix<T, IdxT>(handle_, ps.num_queries, ps.k); auto indices_ivfflat_dev = raft::make_device_matrix<IdxT, IdxT>(handle_, ps.num_queries, ps.k); { ivf_flat::index_params index_params; ivf_flat::search_params search_params; index_params.n_lists = ps.nlist; index_params.metric = ps.metric; index_params.adaptive_centers = ps.adaptive_centers; search_params.n_probes = ps.nprobe; index_params.add_data_on_build = true; index_params.kmeans_trainset_fraction = 0.5; index_params.metric_arg = 0; // Create IVF Flat index auto database_view = raft::make_device_matrix_view<const DataT, IdxT>( (const DataT*)database.data(), ps.num_db_vecs, ps.dim); auto index = ivf_flat::build(handle_, index_params, database_view); // Create Bitset filter auto removed_indices = raft::make_device_vector<IdxT, int64_t>(handle_, test_ivf_sample_filter::offset); thrust::sequence(resource::get_thrust_policy(handle_), thrust::device_pointer_cast(removed_indices.data_handle()), thrust::device_pointer_cast(removed_indices.data_handle() + test_ivf_sample_filter::offset)); resource::sync_stream(handle_); raft::core::bitset<std::uint32_t, IdxT> removed_indices_bitset( handle_, removed_indices.view(), ps.num_db_vecs); // Search with the filter auto search_queries_view = raft::make_device_matrix_view<const DataT, IdxT>( search_queries.data(), ps.num_queries, ps.dim); ivf_flat::search_with_filtering( handle_, search_params, index, search_queries_view, indices_ivfflat_dev.view(), distances_ivfflat_dev.view(), cuvs::neighbors::filtering::bitset_filter(removed_indices_bitset.view())); update_host( distances_ivfflat.data(), distances_ivfflat_dev.data_handle(), queries_size, stream_); update_host( indices_ivfflat.data(), indices_ivfflat_dev.data_handle(), queries_size, stream_); resource::sync_stream(handle_); } ASSERT_TRUE(eval_neighbours(indices_naive, indices_ivfflat, distances_naive, distances_ivfflat, ps.num_queries, ps.k, 0.001, min_recall)); } } void SetUp() override { database.resize(ps.num_db_vecs * ps.dim, stream_); search_queries.resize(ps.num_queries * ps.dim, stream_); raft::random::RngState r(1234ULL); if constexpr (std::is_same<DataT, float>{}) { raft::random::uniform( handle_, r, database.data(), ps.num_db_vecs * ps.dim, DataT(0.1), DataT(2.0)); raft::random::uniform( handle_, r, search_queries.data(), ps.num_queries * ps.dim, DataT(0.1), DataT(2.0)); } else { raft::random::uniformInt( handle_, r, database.data(), ps.num_db_vecs * ps.dim, DataT(1), DataT(20)); raft::random::uniformInt( handle_, r, search_queries.data(), ps.num_queries * ps.dim, DataT(1), DataT(20)); } resource::sync_stream(handle_); } void TearDown() override { resource::sync_stream(handle_); database.resize(0, stream_); search_queries.resize(0, stream_); } private: raft::resources handle_; rmm::cuda_stream_view stream_; AnnIvfFlatInputs<IdxT> ps; rmm::device_uvector<DataT> database; rmm::device_uvector<DataT> search_queries; }; const std::vector<AnnIvfFlatInputs<int64_t>> inputs = { // test various dims (aligned and not aligned to vector sizes) {1000, 10000, 1, 16, 40, 1024, cuvs::distance::DistanceType::L2Expanded, true}, {1000, 10000, 2, 16, 40, 1024, cuvs::distance::DistanceType::L2Expanded, false}, {1000, 10000, 3, 16, 40, 1024, cuvs::distance::DistanceType::L2Expanded, true}, {1000, 10000, 4, 16, 40, 1024, cuvs::distance::DistanceType::L2Expanded, false}, {1000, 10000, 5, 16, 40, 1024, cuvs::distance::DistanceType::InnerProduct, false}, {1000, 10000, 8, 16, 40, 1024, cuvs::distance::DistanceType::InnerProduct, true}, {1000, 10000, 5, 16, 40, 1024, cuvs::distance::DistanceType::L2SqrtExpanded, false}, {1000, 10000, 8, 16, 40, 1024, cuvs::distance::DistanceType::L2SqrtExpanded, true}, // test dims that do not fit into kernel shared memory limits {1000, 10000, 2048, 16, 40, 1024, cuvs::distance::DistanceType::L2Expanded, false}, {1000, 10000, 2049, 16, 40, 1024, cuvs::distance::DistanceType::L2Expanded, false}, {1000, 10000, 2050, 16, 40, 1024, cuvs::distance::DistanceType::InnerProduct, false}, {1000, 10000, 2051, 16, 40, 1024, cuvs::distance::DistanceType::InnerProduct, true}, {1000, 10000, 2052, 16, 40, 1024, cuvs::distance::DistanceType::InnerProduct, false}, {1000, 10000, 2053, 16, 40, 1024, cuvs::distance::DistanceType::L2Expanded, true}, {1000, 10000, 2056, 16, 40, 1024, cuvs::distance::DistanceType::L2Expanded, true}, // various random combinations {1000, 10000, 16, 10, 40, 1024, cuvs::distance::DistanceType::L2Expanded, false}, {1000, 10000, 16, 10, 50, 1024, cuvs::distance::DistanceType::L2Expanded, false}, {1000, 10000, 16, 10, 70, 1024, cuvs::distance::DistanceType::L2Expanded, false}, {100, 10000, 16, 10, 20, 512, cuvs::distance::DistanceType::L2Expanded, false}, {20, 100000, 16, 10, 20, 1024, cuvs::distance::DistanceType::L2Expanded, true}, {1000, 100000, 16, 10, 20, 1024, cuvs::distance::DistanceType::L2Expanded, true}, {10000, 131072, 8, 10, 20, 1024, cuvs::distance::DistanceType::L2Expanded, false}, {1000, 10000, 16, 10, 40, 1024, cuvs::distance::DistanceType::InnerProduct, true}, {1000, 10000, 16, 10, 50, 1024, cuvs::distance::DistanceType::InnerProduct, true}, {1000, 10000, 16, 10, 70, 1024, cuvs::distance::DistanceType::InnerProduct, false}, {100, 10000, 16, 10, 20, 512, cuvs::distance::DistanceType::InnerProduct, true}, {20, 100000, 16, 10, 20, 1024, cuvs::distance::DistanceType::InnerProduct, true}, {1000, 100000, 16, 10, 20, 1024, cuvs::distance::DistanceType::InnerProduct, false}, {10000, 131072, 8, 10, 50, 1024, cuvs::distance::DistanceType::InnerProduct, true}, {1000, 10000, 4096, 20, 50, 1024, cuvs::distance::DistanceType::InnerProduct, false}, // test splitting the big query batches (> max gridDim.y) into smaller batches {100000, 1024, 32, 10, 64, 64, cuvs::distance::DistanceType::InnerProduct, false}, {1000000, 1024, 32, 10, 256, 256, cuvs::distance::DistanceType::InnerProduct, false}, {98306, 1024, 32, 10, 64, 64, cuvs::distance::DistanceType::InnerProduct, true}, // test radix_sort for getting the cluster selection {1000, 10000, 16, 10, raft::matrix::detail::select::warpsort::kMaxCapacity * 2, raft::matrix::detail::select::warpsort::kMaxCapacity * 4, cuvs::distance::DistanceType::L2Expanded, false}, {1000, 10000, 16, 10, raft::matrix::detail::select::warpsort::kMaxCapacity * 4, raft::matrix::detail::select::warpsort::kMaxCapacity * 4, cuvs::distance::DistanceType::InnerProduct, false}, // The following two test cases should show very similar recall. // num_queries, num_db_vecs, dim, k, nprobe, nlist, metric, adaptive_centers {20000, 8712, 3, 10, 51, 66, cuvs::distance::DistanceType::L2Expanded, false}, {100000, 8712, 3, 10, 51, 66, cuvs::distance::DistanceType::L2Expanded, false}}; } // namespace cuvs::neighbors::ivf_flat
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/neighbors/haversine.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <cuvs/distance/distance_types.hpp> #include <cuvs/spatial/knn/detail/haversine_distance.cuh> #include <gtest/gtest.h> #include <iostream> #include <raft/core/resource/cuda_stream.hpp> #include <rmm/device_uvector.hpp> #include <vector> namespace raft { namespace spatial { namespace knn { template <typename value_idx, typename value_t> class HaversineKNNTest : public ::testing::Test { public: HaversineKNNTest() : stream(resource::get_cuda_stream(handle)), d_train_inputs(0, stream), d_ref_I(0, stream), d_ref_D(0, stream), d_pred_I(0, stream), d_pred_D(0, stream) { } protected: void basicTest() { // Allocate input d_train_inputs.resize(n * d, stream); // Allocate reference arrays d_ref_I.resize(n * n, stream); d_ref_D.resize(n * n, stream); // Allocate predicted arrays d_pred_I.resize(n * n, stream); d_pred_D.resize(n * n, stream); // make testdata on host std::vector<value_t> h_train_inputs = {0.71113885, -1.29215058, 0.59613176, -2.08048115, 0.74932804, -1.33634042, 0.51486728, -1.65962873, 0.53154002, -1.47049808, 0.72891737, -1.54095137}; h_train_inputs.resize(d_train_inputs.size()); raft::update_device( d_train_inputs.data(), h_train_inputs.data(), d_train_inputs.size(), stream); std::vector<value_t> h_res_D = {0., 0.05041587, 0.18767063, 0.23048252, 0.35749438, 0.62925595, 0., 0.36575755, 0.44288665, 0.5170737, 0.59501296, 0.62925595, 0., 0.05041587, 0.152463, 0.2426416, 0.34925285, 0.59501296, 0., 0.16461092, 0.2345792, 0.34925285, 0.35749438, 0.36575755, 0., 0.16461092, 0.20535265, 0.23048252, 0.2426416, 0.5170737, 0., 0.152463, 0.18767063, 0.20535265, 0.2345792, 0.44288665}; h_res_D.resize(n * n); raft::update_device(d_ref_D.data(), h_res_D.data(), n * n, stream); std::vector<value_idx> h_res_I = {0, 2, 5, 4, 3, 1, 1, 3, 5, 4, 2, 0, 2, 0, 5, 4, 3, 1, 3, 4, 5, 2, 0, 1, 4, 3, 5, 0, 2, 1, 5, 2, 0, 4, 3, 1}; h_res_I.resize(n * n); raft::update_device<value_idx>(d_ref_I.data(), h_res_I.data(), n * n, stream); cuvs::spatial::knn::detail::haversine_knn(d_pred_I.data(), d_pred_D.data(), d_train_inputs.data(), d_train_inputs.data(), n, n, k, stream); resource::sync_stream(handle, stream); } void SetUp() override { basicTest(); } protected: raft::resources handle; cudaStream_t stream; rmm::device_uvector<value_t> d_train_inputs; int n = 6; int d = 2; int k = 6; rmm::device_uvector<value_idx> d_pred_I; rmm::device_uvector<value_t> d_pred_D; rmm::device_uvector<value_idx> d_ref_I; rmm::device_uvector<value_t> d_ref_D; }; typedef HaversineKNNTest<int, float> HaversineKNNTestF; TEST_F(HaversineKNNTestF, Fit) { ASSERT_TRUE(raft::devArrMatch( d_ref_D.data(), d_pred_D.data(), n * n, raft::CompareApprox<float>(1e-3), stream)); ASSERT_TRUE( raft::devArrMatch(d_ref_I.data(), d_pred_I.data(), n * n, raft::Compare<int>(), stream)); } } // namespace knn } // namespace spatial } // namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_nn_descent.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "../test_utils.cuh" #include "ann_utils.cuh" #include <cuvs_internal/neighbors/naive_knn.cuh> #include <cuvs/neighbors/nn_descent.cuh> #include <raft/core/resource/cuda_stream.hpp> #include <raft/util/itertools.hpp> #include <gtest/gtest.h> #include <cstddef> #include <iostream> #include <string> #include <vector> namespace cuvs::neighbors::experimental::nn_descent { struct AnnNNDescentInputs { int n_rows; int dim; int graph_degree; cuvs::distance::DistanceType metric; bool host_dataset; double min_recall; }; inline ::std::ostream& operator<<(::std::ostream& os, const AnnNNDescentInputs& p) { os << "dataset shape=" << p.n_rows << "x" << p.dim << ", graph_degree=" << p.graph_degree << ", metric=" << static_cast<int>(p.metric) << (p.host_dataset ? ", host" : ", device") << std::endl; return os; } template <typename DistanceT, typename DataT, typename IdxT> class AnnNNDescentTest : public ::testing::TestWithParam<AnnNNDescentInputs> { public: AnnNNDescentTest() : stream_(resource::get_cuda_stream(handle_)), ps(::testing::TestWithParam<AnnNNDescentInputs>::GetParam()), database(0, stream_) { } protected: void testNNDescent() { size_t queries_size = ps.n_rows * ps.graph_degree; std::vector<IdxT> indices_NNDescent(queries_size); std::vector<IdxT> indices_naive(queries_size); { rmm::device_uvector<DistanceT> distances_naive_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_naive_dev(queries_size, stream_); naive_knn<DistanceT, DataT, IdxT>(handle_, distances_naive_dev.data(), indices_naive_dev.data(), database.data(), database.data(), ps.n_rows, ps.n_rows, ps.dim, ps.graph_degree, ps.metric); update_host(indices_naive.data(), indices_naive_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } { { nn_descent::index_params index_params; index_params.metric = ps.metric; index_params.graph_degree = ps.graph_degree; index_params.intermediate_graph_degree = 2 * ps.graph_degree; index_params.max_iterations = 100; auto database_view = raft::make_device_matrix_view<const DataT, int64_t>( (const DataT*)database.data(), ps.n_rows, ps.dim); { if (ps.host_dataset) { auto database_host = raft::make_host_matrix<DataT, int64_t>(ps.n_rows, ps.dim); raft::copy(database_host.data_handle(), database.data(), database.size(), stream_); auto database_host_view = raft::make_host_matrix_view<const DataT, int64_t>( (const DataT*)database_host.data_handle(), ps.n_rows, ps.dim); auto index = nn_descent::build<DataT, IdxT>(handle_, index_params, database_host_view); update_host( indices_NNDescent.data(), index.graph().data_handle(), queries_size, stream_); } else { auto index = nn_descent::build<DataT, IdxT>(handle_, index_params, database_view); update_host( indices_NNDescent.data(), index.graph().data_handle(), queries_size, stream_); }; } resource::sync_stream(handle_); } double min_recall = ps.min_recall; EXPECT_TRUE(eval_recall( indices_naive, indices_NNDescent, ps.n_rows, ps.graph_degree, 0.001, min_recall)); } } void SetUp() override { database.resize(((size_t)ps.n_rows) * ps.dim, stream_); raft::random::RngState r(1234ULL); if constexpr (std::is_same<DataT, float>{}) { raft::random::normal(handle_, r, database.data(), ps.n_rows * ps.dim, DataT(0.1), DataT(2.0)); } else { raft::random::uniformInt( handle_, r, database.data(), ps.n_rows * ps.dim, DataT(1), DataT(20)); } resource::sync_stream(handle_); } void TearDown() override { resource::sync_stream(handle_); database.resize(0, stream_); } private: raft::resources handle_; rmm::cuda_stream_view stream_; AnnNNDescentInputs ps; rmm::device_uvector<DataT> database; }; const std::vector<AnnNNDescentInputs> inputs = raft::util::itertools::product<AnnNNDescentInputs>( {1000, 2000}, // n_rows {3, 5, 7, 8, 17, 64, 128, 137, 192, 256, 512, 619, 1024}, // dim {32, 64}, // graph_degree {cuvs::distance::DistanceType::L2Expanded}, {false, true}, {0.90}); } // namespace cuvs::neighbors::experimental::nn_descent
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_ivf_pq.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "../test_utils.cuh" #include "ann_utils.cuh" #include <raft/core/resource/cuda_stream.hpp> #include <cuvs_internal/neighbors/naive_knn.cuh> #include <cuvs/distance/distance_types.hpp> #include <cuvs/neighbors/ivf_pq.cuh> #include <cuvs/neighbors/ivf_pq_helpers.cuh> #include <cuvs/neighbors/ivf_pq_serialize.cuh> #include <cuvs/neighbors/sample_filter.cuh> #include <raft/core/logger.hpp> #include <raft/linalg/map.cuh> #include <raft/linalg/map_reduce.cuh> #include <raft/matrix/gather.cuh> #include <raft/random/rng.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_vector.hpp> #include <rmm/mr/device/managed_memory_resource.hpp> #include <gtest/gtest.h> #include <cub/cub.cuh> #include <thrust/sequence.h> #include <algorithm> #include <cstddef> #include <iostream> #include <optional> #include <vector> namespace cuvs::neighbors::ivf_pq { struct test_ivf_sample_filter { static constexpr unsigned offset = 1500; }; struct ivf_pq_inputs { uint32_t num_db_vecs = 4096; uint32_t num_queries = 1024; uint32_t dim = 64; uint32_t k = 32; std::optional<double> min_recall = std::nullopt; ivf_pq::index_params index_params; ivf_pq::search_params search_params; // Set some default parameters for tests ivf_pq_inputs() { index_params.n_lists = max(32u, min(1024u, num_db_vecs / 128u)); index_params.kmeans_trainset_fraction = 1.0; } }; inline auto operator<<(std::ostream& os, const ivf_pq::codebook_gen& p) -> std::ostream& { switch (p) { case ivf_pq::codebook_gen::PER_CLUSTER: os << "codebook_gen::PER_CLUSTER"; break; case ivf_pq::codebook_gen::PER_SUBSPACE: os << "codebook_gen::PER_SUBSPACE"; break; default: RAFT_FAIL("unreachable code"); } return os; } inline auto operator<<(std::ostream& os, const ivf_pq_inputs& p) -> std::ostream& { ivf_pq_inputs dflt; bool need_comma = false; #define PRINT_DIFF_V(spec, val) \ do { \ if (dflt spec != p spec) { \ if (need_comma) { os << ", "; } \ os << #spec << " = " << val; \ need_comma = true; \ } \ } while (0) #define PRINT_DIFF(spec) PRINT_DIFF_V(spec, p spec) os << "ivf_pq_inputs {"; PRINT_DIFF(.num_db_vecs); PRINT_DIFF(.num_queries); PRINT_DIFF(.dim); PRINT_DIFF(.k); PRINT_DIFF_V(.min_recall, p.min_recall.value_or(0)); PRINT_DIFF_V(.index_params.metric, print_metric{p.index_params.metric}); PRINT_DIFF(.index_params.metric_arg); PRINT_DIFF(.index_params.add_data_on_build); PRINT_DIFF(.index_params.n_lists); PRINT_DIFF(.index_params.kmeans_n_iters); PRINT_DIFF(.index_params.kmeans_trainset_fraction); PRINT_DIFF(.index_params.pq_bits); PRINT_DIFF(.index_params.pq_dim); PRINT_DIFF(.index_params.codebook_kind); PRINT_DIFF(.index_params.force_random_rotation); PRINT_DIFF(.search_params.n_probes); PRINT_DIFF_V(.search_params.lut_dtype, print_dtype{p.search_params.lut_dtype}); PRINT_DIFF_V(.search_params.internal_distance_dtype, print_dtype{p.search_params.internal_distance_dtype}); os << "}"; return os; } template <typename T> void compare_vectors_l2( const raft::resources& res, T a, T b, uint32_t label, double compression_ratio, double eps) { auto n_rows = a.extent(0); auto dim = a.extent(1); rmm::mr::managed_memory_resource managed_memory; auto dist = make_device_mdarray<double>(res, &managed_memory, make_extents<uint32_t>(n_rows)); linalg::map_offset(res, dist.view(), [a, b, dim] __device__(uint32_t i) { spatial::knn::detail::utils::mapping<float> f{}; double d = 0.0f; for (uint32_t j = 0; j < dim; j++) { double t = f(a(i, j)) - f(b(i, j)); d += t * t; } return sqrt(d / double(dim)); }); resource::sync_stream(res); for (uint32_t i = 0; i < n_rows; i++) { double d = dist(i); // The theoretical estimate of the error is hard to come up with, // the estimate below is based on experimentation + curse of dimensionality ASSERT_LE(d, 1.2 * eps * std::pow(2.0, compression_ratio)) << " (label = " << label << ", ix = " << i << ", eps = " << eps << ")"; } } template <typename IdxT> auto min_output_size(const raft::resources& handle, const ivf_pq::index<IdxT>& index, uint32_t n_probes) -> IdxT { auto acc_sizes = index.accum_sorted_sizes(); uint32_t last_nonzero = index.n_lists(); while (last_nonzero > 0 && acc_sizes(last_nonzero - 1) == acc_sizes(last_nonzero)) { last_nonzero--; } return acc_sizes(last_nonzero) - acc_sizes(last_nonzero - std::min(last_nonzero, n_probes)); } template <typename EvalT, typename DataT, typename IdxT> class ivf_pq_test : public ::testing::TestWithParam<ivf_pq_inputs> { public: ivf_pq_test() : stream_(resource::get_cuda_stream(handle_)), ps(::testing::TestWithParam<ivf_pq_inputs>::GetParam()), database(0, stream_), search_queries(0, stream_) { } void gen_data() { database.resize(size_t{ps.num_db_vecs} * size_t{ps.dim}, stream_); search_queries.resize(size_t{ps.num_queries} * size_t{ps.dim}, stream_); raft::random::RngState r(1234ULL); if constexpr (std::is_same<DataT, float>{}) { raft::random::uniform( handle_, r, database.data(), ps.num_db_vecs * ps.dim, DataT(0.1), DataT(2.0)); raft::random::uniform( handle_, r, search_queries.data(), ps.num_queries * ps.dim, DataT(0.1), DataT(2.0)); } else { raft::random::uniformInt( handle_, r, database.data(), ps.num_db_vecs * ps.dim, DataT(1), DataT(20)); raft::random::uniformInt( handle_, r, search_queries.data(), ps.num_queries * ps.dim, DataT(1), DataT(20)); } resource::sync_stream(handle_); } void calc_ref() { size_t queries_size = size_t{ps.num_queries} * size_t{ps.k}; rmm::device_uvector<EvalT> distances_naive_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_naive_dev(queries_size, stream_); naive_knn<EvalT, DataT, IdxT>(handle_, distances_naive_dev.data(), indices_naive_dev.data(), search_queries.data(), database.data(), ps.num_queries, ps.num_db_vecs, ps.dim, ps.k, ps.index_params.metric); distances_ref.resize(queries_size); update_host(distances_ref.data(), distances_naive_dev.data(), queries_size, stream_); indices_ref.resize(queries_size); update_host(indices_ref.data(), indices_naive_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } auto build_only() { auto ipams = ps.index_params; ipams.add_data_on_build = true; auto index_view = raft::make_device_matrix_view<DataT, IdxT>(database.data(), ps.num_db_vecs, ps.dim); return ivf_pq::build<DataT, IdxT>(handle_, ipams, index_view); } auto build_2_extends() { auto db_indices = make_device_vector<IdxT>(handle_, ps.num_db_vecs); linalg::map_offset(handle_, db_indices.view(), identity_op{}); resource::sync_stream(handle_); auto size_1 = IdxT(ps.num_db_vecs) / 2; auto size_2 = IdxT(ps.num_db_vecs) - size_1; auto vecs_1 = database.data(); auto vecs_2 = database.data() + size_t(size_1) * size_t(ps.dim); auto inds_1 = db_indices.data_handle(); auto inds_2 = db_indices.data_handle() + size_t(size_1); auto ipams = ps.index_params; ipams.add_data_on_build = false; auto database_view = raft::make_device_matrix_view<DataT, IdxT>(database.data(), ps.num_db_vecs, ps.dim); auto idx = ivf_pq::build<DataT, IdxT>(handle_, ipams, database_view); auto vecs_2_view = raft::make_device_matrix_view<DataT, IdxT>(vecs_2, size_2, ps.dim); auto inds_2_view = raft::make_device_vector_view<IdxT, IdxT>(inds_2, size_2); ivf_pq::extend<DataT, IdxT>(handle_, vecs_2_view, inds_2_view, &idx); auto vecs_1_view = raft::make_device_matrix_view<DataT, IdxT, row_major>(vecs_1, size_1, ps.dim); auto inds_1_view = raft::make_device_vector_view<const IdxT, IdxT>(inds_1, size_1); ivf_pq::extend<DataT, IdxT>(handle_, vecs_1_view, inds_1_view, &idx); return idx; } auto build_serialize() { ivf_pq::serialize<IdxT>(handle_, "ivf_pq_index", build_only()); return ivf_pq::deserialize<IdxT>(handle_, "ivf_pq_index"); } void check_reconstruction(const index<IdxT>& index, double compression_ratio, uint32_t label, uint32_t n_take, uint32_t n_skip) { auto& rec_list = index.lists()[label]; auto dim = index.dim(); n_take = std::min<uint32_t>(n_take, rec_list->size.load()); n_skip = std::min<uint32_t>(n_skip, rec_list->size.load() - n_take); if (n_take == 0) { return; } auto rec_data = make_device_matrix<DataT>(handle_, n_take, dim); auto orig_data = make_device_matrix<DataT>(handle_, n_take, dim); ivf_pq::helpers::reconstruct_list_data(handle_, index, rec_data.view(), label, n_skip); matrix::gather(database.data(), IdxT{dim}, IdxT{n_take}, rec_list->indices.data_handle() + n_skip, IdxT{n_take}, orig_data.data_handle(), stream_); compare_vectors_l2(handle_, rec_data.view(), orig_data.view(), label, compression_ratio, 0.06); } void check_reconstruct_extend(index<IdxT>* index, double compression_ratio, uint32_t label) { // NB: this is not reference, the list is retained; the index will have to create a new list on // `erase_list` op. auto old_list = index->lists()[label]; auto n_rows = old_list->size.load(); if (n_rows == 0) { return; } auto vectors_1 = make_device_matrix<EvalT>(handle_, n_rows, index->dim()); auto indices = make_device_vector<IdxT>(handle_, n_rows); copy(indices.data_handle(), old_list->indices.data_handle(), n_rows, stream_); ivf_pq::helpers::reconstruct_list_data(handle_, *index, vectors_1.view(), label, 0); ivf_pq::helpers::erase_list(handle_, index, label); // NB: passing the type parameter because const->non-const implicit conversion of the mdspans // breaks type inference ivf_pq::helpers::extend_list<EvalT, IdxT>( handle_, index, vectors_1.view(), indices.view(), label); auto& new_list = index->lists()[label]; ASSERT_NE(old_list.get(), new_list.get()) << "The old list should have been shared and retained after ivf_pq index has erased the " "corresponding cluster."; auto vectors_2 = make_device_matrix<EvalT>(handle_, n_rows, index->dim()); ivf_pq::helpers::reconstruct_list_data(handle_, *index, vectors_2.view(), label, 0); // The code search is unstable, and there's high chance of repeating values of the lvl-2 codes. // Hence, encoding-decoding chain often leads to altering both the PQ codes and the // reconstructed data. compare_vectors_l2( handle_, vectors_1.view(), vectors_2.view(), label, compression_ratio, 0.04); // 0.025); } void check_packing(index<IdxT>* index, uint32_t label) { auto old_list = index->lists()[label]; auto n_rows = old_list->size.load(); if (n_rows == 0) { return; } auto codes = make_device_matrix<uint8_t>(handle_, n_rows, index->pq_dim()); auto indices = make_device_vector<IdxT>(handle_, n_rows); copy(indices.data_handle(), old_list->indices.data_handle(), n_rows, stream_); ivf_pq::helpers::unpack_list_data(handle_, *index, codes.view(), label, 0); ivf_pq::helpers::erase_list(handle_, index, label); ivf_pq::helpers::extend_list_with_codes<IdxT>( handle_, index, codes.view(), indices.view(), label); auto& new_list = index->lists()[label]; ASSERT_NE(old_list.get(), new_list.get()) << "The old list should have been shared and retained after ivf_pq index has erased the " "corresponding cluster."; auto list_data_size = (n_rows / ivf_pq::kIndexGroupSize) * new_list->data.extent(1) * new_list->data.extent(2) * new_list->data.extent(3); ASSERT_TRUE(old_list->data.size() >= list_data_size); ASSERT_TRUE(new_list->data.size() >= list_data_size); ASSERT_TRUE(devArrMatch(old_list->data.data_handle(), new_list->data.data_handle(), list_data_size, Compare<uint8_t>{})); // Pack a few vectors back to the list. int row_offset = 9; int n_vec = 3; ASSERT_TRUE(row_offset + n_vec < n_rows); size_t offset = row_offset * index->pq_dim(); auto codes_to_pack = make_device_matrix_view<const uint8_t, uint32_t>( codes.data_handle() + offset, n_vec, index->pq_dim()); ivf_pq::helpers::pack_list_data(handle_, index, codes_to_pack, label, row_offset); ASSERT_TRUE(devArrMatch(old_list->data.data_handle(), new_list->data.data_handle(), list_data_size, Compare<uint8_t>{})); // Another test with the API that take list_data directly auto list_data = index->lists()[label]->data.view(); uint32_t n_take = 4; ASSERT_TRUE(row_offset + n_take < n_rows); auto codes2 = raft::make_device_matrix<uint8_t>(handle_, n_take, index->pq_dim()); ivf_pq::helpers::codepacker::unpack( handle_, list_data, index->pq_bits(), row_offset, codes2.view()); // Write it back ivf_pq::helpers::codepacker::pack( handle_, make_const_mdspan(codes2.view()), index->pq_bits(), row_offset, list_data); ASSERT_TRUE(devArrMatch(old_list->data.data_handle(), new_list->data.data_handle(), list_data_size, Compare<uint8_t>{})); } void check_packing_contiguous(index<IdxT>* index, uint32_t label) { auto old_list = index->lists()[label]; auto n_rows = old_list->size.load(); if (n_rows == 0) { return; } auto codes = make_device_matrix<uint8_t>(handle_, n_rows, index->pq_dim()); auto indices = make_device_vector<IdxT>(handle_, n_rows); copy(indices.data_handle(), old_list->indices.data_handle(), n_rows, stream_); uint32_t code_size = ceildiv<uint32_t>(index->pq_dim() * index->pq_bits(), 8); auto codes_compressed = make_device_matrix<uint8_t>(handle_, n_rows, code_size); ivf_pq::helpers::unpack_contiguous_list_data( handle_, *index, codes_compressed.data_handle(), n_rows, label, 0); ivf_pq::helpers::erase_list(handle_, index, label); ivf_pq::detail::extend_list_prepare(handle_, index, make_const_mdspan(indices.view()), label); ivf_pq::helpers::pack_contiguous_list_data<IdxT>( handle_, index, codes_compressed.data_handle(), n_rows, label, 0); ivf_pq::helpers::recompute_internal_state(handle_, index); auto& new_list = index->lists()[label]; ASSERT_NE(old_list.get(), new_list.get()) << "The old list should have been shared and retained after ivf_pq index has erased the " "corresponding cluster."; auto list_data_size = (n_rows / ivf_pq::kIndexGroupSize) * new_list->data.extent(1) * new_list->data.extent(2) * new_list->data.extent(3); ASSERT_TRUE(old_list->data.size() >= list_data_size); ASSERT_TRUE(new_list->data.size() >= list_data_size); ASSERT_TRUE(devArrMatch(old_list->data.data_handle(), new_list->data.data_handle(), list_data_size, Compare<uint8_t>{})); // Pack a few vectors back to the list. uint32_t row_offset = 9; uint32_t n_vec = 3; ASSERT_TRUE(row_offset + n_vec < n_rows); size_t offset = row_offset * code_size; auto codes_to_pack = make_device_matrix_view<uint8_t, uint32_t>( codes_compressed.data_handle() + offset, n_vec, index->pq_dim()); ivf_pq::helpers::pack_contiguous_list_data( handle_, index, codes_to_pack.data_handle(), n_vec, label, row_offset); ASSERT_TRUE(devArrMatch(old_list->data.data_handle(), new_list->data.data_handle(), list_data_size, Compare<uint8_t>{})); // // Another test with the API that take list_data directly auto list_data = index->lists()[label]->data.view(); uint32_t n_take = 4; ASSERT_TRUE(row_offset + n_take < n_rows); auto codes2 = raft::make_device_matrix<uint8_t>(handle_, n_take, code_size); ivf_pq::helpers::codepacker::unpack_contiguous(handle_, list_data, index->pq_bits(), row_offset, n_take, index->pq_dim(), codes2.data_handle()); // Write it back ivf_pq::helpers::codepacker::pack_contiguous(handle_, codes2.data_handle(), n_vec, index->pq_dim(), index->pq_bits(), row_offset, list_data); ASSERT_TRUE(devArrMatch(old_list->data.data_handle(), new_list->data.data_handle(), list_data_size, Compare<uint8_t>{})); } template <typename BuildIndex> void run(BuildIndex build_index) { index<IdxT> index = build_index(); double compression_ratio = static_cast<double>(ps.dim * 8) / static_cast<double>(index.pq_dim() * index.pq_bits()); for (uint32_t label = 0; label < index.n_lists(); label++) { switch (label % 3) { case 0: { // Reconstruct and re-write vectors for one label check_reconstruct_extend(&index, compression_ratio, label); } break; case 1: { // Dump and re-write codes for one label check_packing(&index, label); check_packing_contiguous(&index, label); } break; default: { // check a small subset of data in a randomly chosen cluster to see if the data // reconstruction works well. check_reconstruction(index, compression_ratio, label, 100, 7); } } } size_t queries_size = ps.num_queries * ps.k; std::vector<IdxT> indices_ivf_pq(queries_size); std::vector<EvalT> distances_ivf_pq(queries_size); rmm::device_uvector<EvalT> distances_ivf_pq_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_ivf_pq_dev(queries_size, stream_); auto query_view = raft::make_device_matrix_view<DataT, uint32_t>(search_queries.data(), ps.num_queries, ps.dim); auto inds_view = raft::make_device_matrix_view<IdxT, uint32_t>( indices_ivf_pq_dev.data(), ps.num_queries, ps.k); auto dists_view = raft::make_device_matrix_view<EvalT, uint32_t>( distances_ivf_pq_dev.data(), ps.num_queries, ps.k); ivf_pq::search<DataT, IdxT>( handle_, ps.search_params, index, query_view, inds_view, dists_view); update_host(distances_ivf_pq.data(), distances_ivf_pq_dev.data(), queries_size, stream_); update_host(indices_ivf_pq.data(), indices_ivf_pq_dev.data(), queries_size, stream_); resource::sync_stream(handle_); // A very conservative lower bound on recall double min_recall = static_cast<double>(ps.search_params.n_probes) / static_cast<double>(ps.index_params.n_lists); // Using a heuristic to lower the required recall due to code-packing errors min_recall = std::min(std::erfc(0.05 * compression_ratio / std::max(min_recall, 0.5)), min_recall); // Use explicit per-test min recall value if provided. min_recall = ps.min_recall.value_or(min_recall); ASSERT_TRUE(eval_neighbours(indices_ref, indices_ivf_pq, distances_ref, distances_ivf_pq, ps.num_queries, ps.k, 0.0001 * compression_ratio, min_recall)) << ps; // Test a few extra invariants IdxT min_results = min_output_size(handle_, index, ps.search_params.n_probes); IdxT max_oob = ps.k <= min_results ? 0 : ps.k - min_results; IdxT found_oob = 0; for (uint32_t query_ix = 0; query_ix < ps.num_queries; query_ix++) { for (uint32_t k = 0; k < ps.k; k++) { auto flat_i = query_ix * ps.k + k; auto found_ix = indices_ivf_pq[flat_i]; if (found_ix == ivf_pq::kOutOfBoundsRecord<IdxT>) { found_oob++; continue; } ASSERT_NE(found_ix, ivf::kInvalidRecord<IdxT>) << "got an invalid record at query_ix = " << query_ix << ", k = " << k << " (distance = " << distances_ivf_pq[flat_i] << ")"; ASSERT_LT(found_ix, ps.num_db_vecs) << "got an impossible index = " << found_ix << " at query_ix = " << query_ix << ", k = " << k << " (distance = " << distances_ivf_pq[flat_i] << ")"; } } ASSERT_LE(found_oob, max_oob) << "got too many records out-of-bounds (see ivf_pq::kOutOfBoundsRecord<IdxT>)."; if (found_oob > 0) { RAFT_LOG_WARN( "Got %zu results out-of-bounds because of large top-k (%zu) and small n_probes (%u) and " "small DB size/n_lists ratio (%zu / %u)", size_t(found_oob), size_t(ps.k), ps.search_params.n_probes, size_t(ps.num_db_vecs), ps.index_params.n_lists); } } void SetUp() override // NOLINT { gen_data(); calc_ref(); } void TearDown() override // NOLINT { cudaGetLastError(); resource::sync_stream(handle_); database.resize(0, stream_); search_queries.resize(0, stream_); } private: raft::resources handle_; rmm::cuda_stream_view stream_; ivf_pq_inputs ps; // NOLINT rmm::device_uvector<DataT> database; // NOLINT rmm::device_uvector<DataT> search_queries; // NOLINT std::vector<IdxT> indices_ref; // NOLINT std::vector<EvalT> distances_ref; // NOLINT }; template <typename EvalT, typename DataT, typename IdxT> class ivf_pq_filter_test : public ::testing::TestWithParam<ivf_pq_inputs> { public: ivf_pq_filter_test() : stream_(resource::get_cuda_stream(handle_)), ps(::testing::TestWithParam<ivf_pq_inputs>::GetParam()), database(0, stream_), search_queries(0, stream_) { } void gen_data() { database.resize(size_t{ps.num_db_vecs} * size_t{ps.dim}, stream_); search_queries.resize(size_t{ps.num_queries} * size_t{ps.dim}, stream_); raft::random::RngState r(1234ULL); if constexpr (std::is_same<DataT, float>{}) { raft::random::uniform( handle_, r, database.data(), ps.num_db_vecs * ps.dim, DataT(0.1), DataT(2.0)); raft::random::uniform( handle_, r, search_queries.data(), ps.num_queries * ps.dim, DataT(0.1), DataT(2.0)); } else { raft::random::uniformInt( handle_, r, database.data(), ps.num_db_vecs * ps.dim, DataT(1), DataT(20)); raft::random::uniformInt( handle_, r, search_queries.data(), ps.num_queries * ps.dim, DataT(1), DataT(20)); } resource::sync_stream(handle_); } void calc_ref() { size_t queries_size = size_t{ps.num_queries} * size_t{ps.k}; rmm::device_uvector<EvalT> distances_naive_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_naive_dev(queries_size, stream_); naive_knn<EvalT, DataT, IdxT>(handle_, distances_naive_dev.data(), indices_naive_dev.data(), search_queries.data(), database.data() + test_ivf_sample_filter::offset * ps.dim, ps.num_queries, ps.num_db_vecs - test_ivf_sample_filter::offset, ps.dim, ps.k, ps.index_params.metric); raft::linalg::addScalar(indices_naive_dev.data(), indices_naive_dev.data(), IdxT(test_ivf_sample_filter::offset), queries_size, stream_); distances_ref.resize(queries_size); update_host(distances_ref.data(), distances_naive_dev.data(), queries_size, stream_); indices_ref.resize(queries_size); update_host(indices_ref.data(), indices_naive_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } auto build_only() { auto ipams = ps.index_params; ipams.add_data_on_build = true; auto index_view = raft::make_device_matrix_view<DataT, IdxT>(database.data(), ps.num_db_vecs, ps.dim); return ivf_pq::build<DataT, IdxT>(handle_, ipams, index_view); } template <typename BuildIndex> void run(BuildIndex build_index) { index<IdxT> index = build_index(); double compression_ratio = static_cast<double>(ps.dim * 8) / static_cast<double>(index.pq_dim() * index.pq_bits()); size_t queries_size = ps.num_queries * ps.k; std::vector<IdxT> indices_ivf_pq(queries_size); std::vector<EvalT> distances_ivf_pq(queries_size); rmm::device_uvector<EvalT> distances_ivf_pq_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_ivf_pq_dev(queries_size, stream_); auto query_view = raft::make_device_matrix_view<DataT, uint32_t>(search_queries.data(), ps.num_queries, ps.dim); auto inds_view = raft::make_device_matrix_view<IdxT, uint32_t>( indices_ivf_pq_dev.data(), ps.num_queries, ps.k); auto dists_view = raft::make_device_matrix_view<EvalT, uint32_t>( distances_ivf_pq_dev.data(), ps.num_queries, ps.k); // Create Bitset filter auto removed_indices = raft::make_device_vector<IdxT, int64_t>(handle_, test_ivf_sample_filter::offset); thrust::sequence( resource::get_thrust_policy(handle_), thrust::device_pointer_cast(removed_indices.data_handle()), thrust::device_pointer_cast(removed_indices.data_handle() + test_ivf_sample_filter::offset)); resource::sync_stream(handle_); raft::core::bitset<std::uint32_t, IdxT> removed_indices_bitset( handle_, removed_indices.view(), ps.num_db_vecs); ivf_pq::search_with_filtering<DataT, IdxT>( handle_, ps.search_params, index, query_view, inds_view, dists_view, cuvs::neighbors::filtering::bitset_filter(removed_indices_bitset.view())); update_host(distances_ivf_pq.data(), distances_ivf_pq_dev.data(), queries_size, stream_); update_host(indices_ivf_pq.data(), indices_ivf_pq_dev.data(), queries_size, stream_); resource::sync_stream(handle_); // A very conservative lower bound on recall double min_recall = static_cast<double>(ps.search_params.n_probes) / static_cast<double>(ps.index_params.n_lists); // Using a heuristic to lower the required recall due to code-packing errors min_recall = std::min(std::erfc(0.05 * compression_ratio / std::max(min_recall, 0.5)), min_recall); // Use explicit per-test min recall value if provided. min_recall = ps.min_recall.value_or(min_recall); ASSERT_TRUE(eval_neighbours(indices_ref, indices_ivf_pq, distances_ref, distances_ivf_pq, ps.num_queries, ps.k, 0.0001 * compression_ratio, min_recall)) << ps; } void SetUp() override // NOLINT { gen_data(); calc_ref(); } void TearDown() override // NOLINT { cudaGetLastError(); resource::sync_stream(handle_); database.resize(0, stream_); search_queries.resize(0, stream_); } private: raft::resources handle_; rmm::cuda_stream_view stream_; ivf_pq_inputs ps; // NOLINT rmm::device_uvector<DataT> database; // NOLINT rmm::device_uvector<DataT> search_queries; // NOLINT std::vector<IdxT> indices_ref; // NOLINT std::vector<EvalT> distances_ref; // NOLINT }; /* Test cases */ using test_cases_t = std::vector<ivf_pq_inputs>; // concatenate parameter sets for different type template <typename T> auto operator+(const std::vector<T>& a, const std::vector<T>& b) -> std::vector<T> { std::vector<T> res = a; res.insert(res.end(), b.begin(), b.end()); return res; } inline auto defaults() -> test_cases_t { return {ivf_pq_inputs{}}; } template <typename B, typename A, typename F> auto map(const std::vector<A>& xs, F f) -> std::vector<B> { std::vector<B> ys(xs.size()); std::transform(xs.begin(), xs.end(), ys.begin(), f); return ys; } inline auto with_dims(const std::vector<uint32_t>& dims) -> test_cases_t { return map<ivf_pq_inputs>(dims, [](uint32_t d) { ivf_pq_inputs x; x.dim = d; return x; }); } /** These will surely trigger the fastest kernel available. */ inline auto small_dims() -> test_cases_t { return with_dims({1, 2, 3, 4, 5, 8, 15, 16, 17}); } inline auto small_dims_per_cluster() -> test_cases_t { return map<ivf_pq_inputs>(small_dims(), [](const ivf_pq_inputs& x) { ivf_pq_inputs y(x); y.index_params.codebook_kind = ivf_pq::codebook_gen::PER_CLUSTER; return y; }); } inline auto big_dims() -> test_cases_t { // with_dims({512, 513, 1023, 1024, 1025, 2048, 2049, 2050, 2053, 6144, 8192, 12288, 16384}); auto xs = with_dims({512, 513, 1023, 1024, 1025, 2048, 2049, 2050, 2053, 6144}); return map<ivf_pq_inputs>(xs, [](const ivf_pq_inputs& x) { ivf_pq_inputs y(x); uint32_t pq_len = 2; y.index_params.pq_dim = div_rounding_up_safe(x.dim, pq_len); // This comes from pure experimentation, also the recall depens a lot on pq_len. y.min_recall = 0.48 + 0.028 * std::log2(x.dim); return y; }); } /** These will surely trigger no-smem-lut kernel. */ inline auto big_dims_moderate_lut() -> test_cases_t { return map<ivf_pq_inputs>(big_dims(), [](const ivf_pq_inputs& x) { ivf_pq_inputs y(x); uint32_t pq_len = 2; y.index_params.pq_dim = round_up_safe(div_rounding_up_safe(x.dim, pq_len), 4u); y.index_params.pq_bits = 6; y.search_params.lut_dtype = CUDA_R_16F; y.min_recall = 0.69; return y; }); } /** Some of these should trigger no-basediff kernel. */ inline auto big_dims_small_lut() -> test_cases_t { return map<ivf_pq_inputs>(big_dims(), [](const ivf_pq_inputs& x) { ivf_pq_inputs y(x); uint32_t pq_len = 8; y.index_params.pq_dim = round_up_safe(div_rounding_up_safe(x.dim, pq_len), 4u); y.index_params.pq_bits = 6; y.search_params.lut_dtype = CUDA_R_8U; y.min_recall = 0.21; return y; }); } /** * A minimal set of tests to check various enum-like parameters. */ inline auto enum_variety() -> test_cases_t { test_cases_t xs; #define ADD_CASE(f) \ do { \ xs.push_back({}); \ ([](ivf_pq_inputs & x) f)(xs[xs.size() - 1]); \ } while (0); ADD_CASE({ x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_CLUSTER; x.min_recall = 0.86; }); ADD_CASE({ x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_SUBSPACE; x.min_recall = 0.86; }); ADD_CASE({ x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_CLUSTER; x.index_params.pq_bits = 4; x.min_recall = 0.79; }); ADD_CASE({ x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_CLUSTER; x.index_params.pq_bits = 5; x.min_recall = 0.83; }); ADD_CASE({ x.index_params.pq_bits = 6; x.min_recall = 0.84; }); ADD_CASE({ x.index_params.pq_bits = 7; x.min_recall = 0.85; }); ADD_CASE({ x.index_params.pq_bits = 8; x.min_recall = 0.86; }); ADD_CASE({ x.index_params.force_random_rotation = true; x.min_recall = 0.86; }); ADD_CASE({ x.index_params.force_random_rotation = false; x.min_recall = 0.86; }); ADD_CASE({ x.search_params.lut_dtype = CUDA_R_32F; x.min_recall = 0.86; }); ADD_CASE({ x.search_params.lut_dtype = CUDA_R_16F; x.min_recall = 0.86; }); ADD_CASE({ x.search_params.lut_dtype = CUDA_R_8U; x.min_recall = 0.84; }); ADD_CASE({ x.search_params.internal_distance_dtype = CUDA_R_32F; x.min_recall = 0.86; }); ADD_CASE({ x.search_params.internal_distance_dtype = CUDA_R_16F; x.search_params.lut_dtype = CUDA_R_16F; x.min_recall = 0.86; }); return xs; } inline auto enum_variety_l2() -> test_cases_t { return map<ivf_pq_inputs>(enum_variety(), [](const ivf_pq_inputs& x) { ivf_pq_inputs y(x); y.index_params.metric = distance::DistanceType::L2Expanded; return y; }); } inline auto enum_variety_ip() -> test_cases_t { return map<ivf_pq_inputs>(enum_variety(), [](const ivf_pq_inputs& x) { ivf_pq_inputs y(x); if (y.min_recall.has_value()) { if (y.search_params.lut_dtype == CUDA_R_8U) { // InnerProduct score is signed, // thus we're forced to used signed 8-bit representation, // thus we have one bit less precision y.min_recall = y.min_recall.value() * 0.90; } else { // In other cases it seems to perform a little bit better, still worse than L2 y.min_recall = y.min_recall.value() * 0.94; } } y.index_params.metric = distance::DistanceType::InnerProduct; return y; }); } inline auto enum_variety_l2sqrt() -> test_cases_t { return map<ivf_pq_inputs>(enum_variety(), [](const ivf_pq_inputs& x) { ivf_pq_inputs y(x); y.index_params.metric = distance::DistanceType::L2SqrtExpanded; return y; }); } /** * Try different number of n_probes, some of which may trigger the non-fused version of the search * kernel. */ inline auto var_n_probes() -> test_cases_t { ivf_pq_inputs dflt; std::vector<uint32_t> xs; for (auto x = dflt.index_params.n_lists; x >= 1; x /= 2) { xs.push_back(x); } return map<ivf_pq_inputs>(xs, [](uint32_t n_probes) { ivf_pq_inputs x; x.search_params.n_probes = n_probes; return x; }); } /** * Try different number of nearest neighbours. * Values smaller than 32 test if the code behaves well when Capacity (== 32) does not change, * but `k <= Capacity` changes. * * Values between `32 and ivf_pq::detail::kMaxCapacity` test various instantiations of the * main kernel (Capacity-templated) * * Values above ivf_pq::detail::kMaxCapacity should trigger the non-fused version of the kernel * (manage_local_topk = false). * * Also we test here various values that are close-but-not-power-of-two to catch any problems * related to rounding/alignment. * * Note, we cannot control explicitly which instance of the search kernel to choose, hence it's * important to try a variety of different values of `k` to make sure all paths are triggered. * * Set the log level to DEBUG (5) or above to inspect the selected kernel instances. */ inline auto var_k() -> test_cases_t { return map<ivf_pq_inputs, uint32_t>( {1, 2, 3, 5, 8, 15, 16, 32, 63, 65, 127, 128, 256, 257, 1023, 2048, 2049}, [](uint32_t k) { ivf_pq_inputs x; x.k = k; // when there's not enough data, try more cluster probes x.search_params.n_probes = max(x.search_params.n_probes, min(x.index_params.n_lists, k)); return x; }); } /** * Cases brought up from downstream projects. */ inline auto special_cases() -> test_cases_t { test_cases_t xs; #define ADD_CASE(f) \ do { \ xs.push_back({}); \ ([](ivf_pq_inputs & x) f)(xs[xs.size() - 1]); \ } while (0); ADD_CASE({ x.num_db_vecs = 1183514; x.dim = 100; x.num_queries = 10000; x.k = 10; x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_SUBSPACE; x.index_params.pq_dim = 10; x.index_params.pq_bits = 8; x.index_params.n_lists = 1024; x.search_params.n_probes = 50; }); ADD_CASE({ x.num_db_vecs = 10000; x.dim = 16; x.num_queries = 500; x.k = 128; x.index_params.metric = distance::DistanceType::L2Expanded; x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_SUBSPACE; x.index_params.pq_bits = 8; x.index_params.n_lists = 100; x.search_params.n_probes = 100; }); ADD_CASE({ x.num_db_vecs = 10000; x.dim = 16; x.num_queries = 500; x.k = 129; x.index_params.metric = distance::DistanceType::L2Expanded; x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_SUBSPACE; x.index_params.pq_bits = 8; x.index_params.n_lists = 100; x.search_params.n_probes = 100; }); ADD_CASE({ x.num_db_vecs = 4335; x.dim = 4; x.num_queries = 100000; x.k = 12; x.index_params.metric = distance::DistanceType::L2Expanded; x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_SUBSPACE; x.index_params.pq_dim = 2; x.index_params.pq_bits = 8; x.index_params.n_lists = 69; x.search_params.n_probes = 69; }); ADD_CASE({ x.num_db_vecs = 4335; x.dim = 4; x.num_queries = 100000; x.k = 12; x.index_params.metric = distance::DistanceType::L2Expanded; x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_CLUSTER; x.index_params.pq_dim = 2; x.index_params.pq_bits = 8; x.index_params.n_lists = 69; x.search_params.n_probes = 69; }); return xs; } /* Test instantiations */ #define TEST_BUILD_SEARCH(type) \ TEST_P(type, build_search) /* NOLINT */ \ { \ this->run([this]() { return this->build_only(); }); \ } #define TEST_BUILD_EXTEND_SEARCH(type) \ TEST_P(type, build_extend_search) /* NOLINT */ \ { \ this->run([this]() { return this->build_2_extends(); }); \ } #define TEST_BUILD_SERIALIZE_SEARCH(type) \ TEST_P(type, build_serialize_search) /* NOLINT */ \ { \ this->run([this]() { return this->build_serialize(); }); \ } #define INSTANTIATE(type, vals) \ INSTANTIATE_TEST_SUITE_P(IvfPq, type, ::testing::ValuesIn(vals)); /* NOLINT */ } // namespace cuvs::neighbors::ivf_pq
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/neighbors/knn.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <raft/core/resource/cuda_stream.hpp> #include <cuvs/distance/distance_types.hpp> #include <cuvs/neighbors/brute_force.cuh> #include <raft/core/device_mdspan.hpp> #include <raft/core/logger.hpp> #include <rmm/device_buffer.hpp> #include <gtest/gtest.h> #include <cstddef> #include <iostream> #include <vector> namespace cuvs::neighbors::brute_force { struct KNNInputs { std::vector<std::vector<float>> input; int k; std::vector<int> labels; }; template <typename IdxT> RAFT_KERNEL build_actual_output( int* output, int n_rows, int k, const int* idx_labels, const IdxT* indices) { int element = threadIdx.x + blockDim.x * blockIdx.x; if (element >= n_rows * k) return; output[element] = idx_labels[indices[element]]; } RAFT_KERNEL build_expected_output(int* output, int n_rows, int k, const int* labels) { int row = threadIdx.x + blockDim.x * blockIdx.x; if (row >= n_rows) return; int cur_label = labels[row]; for (int i = 0; i < k; i++) { output[row * k + i] = cur_label; } } template <typename T, typename IdxT> class KNNTest : public ::testing::TestWithParam<KNNInputs> { public: KNNTest() : params_(::testing::TestWithParam<KNNInputs>::GetParam()), stream(resource::get_cuda_stream(handle)), actual_labels_(0, stream), expected_labels_(0, stream), input_(0, stream), search_data_(0, stream), indices_(0, stream), distances_(0, stream), search_labels_(0, stream) { } protected: void testBruteForce() { // #if (RAFT_ACTIVE_LEVEL >= RAFT_LEVEL_DEBUG) raft::print_device_vector("Input array: ", input_.data(), rows_ * cols_, std::cout); std::cout << "K: " << k_ << std::endl; raft::print_device_vector("Labels array: ", search_labels_.data(), rows_, std::cout); // #endif std::vector<device_matrix_view<const T, IdxT, row_major>> index = { make_device_matrix_view((const T*)(input_.data()), rows_, cols_)}; auto search = raft::make_device_matrix_view<const T, IdxT, row_major>( (const T*)(search_data_.data()), rows_, cols_); auto indices = raft::make_device_matrix_view<IdxT, IdxT, row_major>(indices_.data(), rows_, k_); auto distances = raft::make_device_matrix_view<T, IdxT, row_major>(distances_.data(), rows_, k_); auto metric = cuvs::distance::DistanceType::L2Unexpanded; knn(handle, index, search, indices, distances, metric, std::make_optional<IdxT>(0)); build_actual_output<<<raft::ceildiv(rows_ * k_, 32), 32, 0, stream>>>( actual_labels_.data(), rows_, k_, search_labels_.data(), indices_.data()); build_expected_output<<<raft::ceildiv(rows_ * k_, 32), 32, 0, stream>>>( expected_labels_.data(), rows_, k_, search_labels_.data()); ASSERT_TRUE(devArrMatch( expected_labels_.data(), actual_labels_.data(), rows_ * k_, raft::Compare<int>(), stream)); } void SetUp() override { rows_ = params_.input.size(); cols_ = params_.input[0].size(); k_ = params_.k; actual_labels_.resize(rows_ * k_, stream); expected_labels_.resize(rows_ * k_, stream); input_.resize(rows_ * cols_, stream); search_data_.resize(rows_ * cols_, stream); indices_.resize(rows_ * k_, stream); distances_.resize(rows_ * k_, stream); search_labels_.resize(rows_, stream); RAFT_CUDA_TRY( cudaMemsetAsync(actual_labels_.data(), 0, actual_labels_.size() * sizeof(int), stream)); RAFT_CUDA_TRY( cudaMemsetAsync(expected_labels_.data(), 0, expected_labels_.size() * sizeof(int), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(input_.data(), 0, input_.size() * sizeof(float), stream)); RAFT_CUDA_TRY( cudaMemsetAsync(search_data_.data(), 0, search_data_.size() * sizeof(float), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(indices_.data(), 0, indices_.size() * sizeof(IdxT), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(distances_.data(), 0, distances_.size() * sizeof(float), stream)); RAFT_CUDA_TRY( cudaMemsetAsync(search_labels_.data(), 0, search_labels_.size() * sizeof(int), stream)); std::vector<float> row_major_input; for (std::size_t i = 0; i < params_.input.size(); ++i) { for (std::size_t j = 0; j < params_.input[i].size(); ++j) { row_major_input.push_back(params_.input[i][j]); } } rmm::device_buffer input_d = rmm::device_buffer(row_major_input.data(), row_major_input.size() * sizeof(float), stream); float* input_ptr = static_cast<float*>(input_d.data()); rmm::device_buffer labels_d = rmm::device_buffer(params_.labels.data(), params_.labels.size() * sizeof(int), stream); int* labels_ptr = static_cast<int*>(labels_d.data()); raft::copy(input_.data(), input_ptr, rows_ * cols_, stream); raft::copy(search_data_.data(), input_ptr, rows_ * cols_, stream); raft::copy(search_labels_.data(), labels_ptr, rows_, stream); resource::sync_stream(handle, stream); } private: raft::resources handle; cudaStream_t stream; KNNInputs params_; int rows_; int cols_; rmm::device_uvector<float> input_; rmm::device_uvector<float> search_data_; rmm::device_uvector<IdxT> indices_; rmm::device_uvector<float> distances_; int k_; rmm::device_uvector<int> search_labels_; rmm::device_uvector<int> actual_labels_; rmm::device_uvector<int> expected_labels_; }; const std::vector<KNNInputs> inputs = { // 2D {{ {2.7810836, 2.550537003}, {1.465489372, 2.362125076}, {3.396561688, 4.400293529}, {1.38807019, 1.850220317}, {3.06407232, 3.005305973}, {7.627531214, 2.759262235}, {5.332441248, 2.088626775}, {6.922596716, 1.77106367}, {8.675418651, -0.242068655}, {7.673756466, 3.508563011}, }, 2, {0, 0, 0, 0, 0, 1, 1, 1, 1, 1}}}; typedef KNNTest<float, int> KNNTestFint32_t; TEST_P(KNNTestFint32_t, BruteForce) { this->testBruteForce(); } typedef KNNTest<float, uint32_t> KNNTestFuint32_t; TEST_P(KNNTestFuint32_t, BruteForce) { this->testBruteForce(); } INSTANTIATE_TEST_CASE_P(KNNTest, KNNTestFint32_t, ::testing::ValuesIn(inputs)); INSTANTIATE_TEST_CASE_P(KNNTest, KNNTestFuint32_t, ::testing::ValuesIn(inputs)); } // namespace cuvs::neighbors::brute_force
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_utils.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuvs/distance/distance_types.hpp> #include <cuvs/spatial/knn/detail/ann_utils.cuh> #include <raft/core/device_mdarray.hpp> // raft::make_device_matrix #include <raft/core/resource/cuda_stream.hpp> #include <raft/matrix/copy.cuh> #include <raft/matrix/detail/select_k.cuh> #include <raft/util/cuda_utils.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/mr/device/device_memory_resource.hpp> #include <cuvs_internal/neighbors/naive_knn.cuh> #include "../test_utils.cuh" #include <gtest/gtest.h> #include <iostream> namespace cuvs::neighbors { struct print_dtype { cudaDataType_t value; }; inline auto operator<<(std::ostream& os, const print_dtype& p) -> std::ostream& { switch (p.value) { case CUDA_R_16F: os << "CUDA_R_16F"; break; case CUDA_C_16F: os << "CUDA_C_16F"; break; case CUDA_R_16BF: os << "CUDA_R_16BF"; break; case CUDA_C_16BF: os << "CUDA_C_16BF"; break; case CUDA_R_32F: os << "CUDA_R_32F"; break; case CUDA_C_32F: os << "CUDA_C_32F"; break; case CUDA_R_64F: os << "CUDA_R_64F"; break; case CUDA_C_64F: os << "CUDA_C_64F"; break; case CUDA_R_4I: os << "CUDA_R_4I"; break; case CUDA_C_4I: os << "CUDA_C_4I"; break; case CUDA_R_4U: os << "CUDA_R_4U"; break; case CUDA_C_4U: os << "CUDA_C_4U"; break; case CUDA_R_8I: os << "CUDA_R_8I"; break; case CUDA_C_8I: os << "CUDA_C_8I"; break; case CUDA_R_8U: os << "CUDA_R_8U"; break; case CUDA_C_8U: os << "CUDA_C_8U"; break; case CUDA_R_16I: os << "CUDA_R_16I"; break; case CUDA_C_16I: os << "CUDA_C_16I"; break; case CUDA_R_16U: os << "CUDA_R_16U"; break; case CUDA_C_16U: os << "CUDA_C_16U"; break; case CUDA_R_32I: os << "CUDA_R_32I"; break; case CUDA_C_32I: os << "CUDA_C_32I"; break; case CUDA_R_32U: os << "CUDA_R_32U"; break; case CUDA_C_32U: os << "CUDA_C_32U"; break; case CUDA_R_64I: os << "CUDA_R_64I"; break; case CUDA_C_64I: os << "CUDA_C_64I"; break; case CUDA_R_64U: os << "CUDA_R_64U"; break; case CUDA_C_64U: os << "CUDA_C_64U"; break; default: RAFT_FAIL("unreachable code"); } return os; } struct print_metric { cuvs::distance::DistanceType value; }; inline auto operator<<(std::ostream& os, const print_metric& p) -> std::ostream& { switch (p.value) { case cuvs::distance::L2Expanded: os << "distance::L2Expanded"; break; case cuvs::distance::L2SqrtExpanded: os << "distance::L2SqrtExpanded"; break; case cuvs::distance::CosineExpanded: os << "distance::CosineExpanded"; break; case cuvs::distance::L1: os << "distance::L1"; break; case cuvs::distance::L2Unexpanded: os << "distance::L2Unexpanded"; break; case cuvs::distance::L2SqrtUnexpanded: os << "distance::L2SqrtUnexpanded"; break; case cuvs::distance::InnerProduct: os << "distance::InnerProduct"; break; case cuvs::distance::Linf: os << "distance::Linf"; break; case cuvs::distance::Canberra: os << "distance::Canberra"; break; case cuvs::distance::LpUnexpanded: os << "distance::LpUnexpanded"; break; case cuvs::distance::CorrelationExpanded: os << "distance::CorrelationExpanded"; break; case cuvs::distance::JaccardExpanded: os << "distance::JaccardExpanded"; break; case cuvs::distance::HellingerExpanded: os << "distance::HellingerExpanded"; break; case cuvs::distance::Haversine: os << "distance::Haversine"; break; case cuvs::distance::BrayCurtis: os << "distance::BrayCurtis"; break; case cuvs::distance::JensenShannon: os << "distance::JensenShannon"; break; case cuvs::distance::HammingUnexpanded: os << "distance::HammingUnexpanded"; break; case cuvs::distance::KLDivergence: os << "distance::KLDivergence"; break; case cuvs::distance::RusselRaoExpanded: os << "distance::RusselRaoExpanded"; break; case cuvs::distance::DiceExpanded: os << "distance::DiceExpanded"; break; case cuvs::distance::Precomputed: os << "distance::Precomputed"; break; default: RAFT_FAIL("unreachable code"); } return os; } template <typename IdxT, typename DistT, typename CompareDist> struct idx_dist_pair { IdxT idx; DistT dist; CompareDist eq_compare; auto operator==(const idx_dist_pair<IdxT, DistT, CompareDist>& a) const -> bool { if (idx == a.idx) return true; if (eq_compare(dist, a.dist)) return true; return false; } idx_dist_pair(IdxT x, DistT y, CompareDist op) : idx(x), dist(y), eq_compare(op) {} }; /** Calculate recall value using only neighbor indices */ template <typename T> auto calc_recall(const std::vector<T>& expected_idx, const std::vector<T>& actual_idx, size_t rows, size_t cols) { size_t match_count = 0; size_t total_count = static_cast<size_t>(rows) * static_cast<size_t>(cols); for (size_t i = 0; i < rows; ++i) { for (size_t k = 0; k < cols; ++k) { size_t idx_k = i * cols + k; // row major assumption! auto act_idx = actual_idx[idx_k]; for (size_t j = 0; j < cols; ++j) { size_t idx = i * cols + j; // row major assumption! auto exp_idx = expected_idx[idx]; if (act_idx == exp_idx) { match_count++; break; } } } } return std::make_tuple( static_cast<double>(match_count) / static_cast<double>(total_count), match_count, total_count); } template <typename T> auto eval_recall(const std::vector<T>& expected_idx, const std::vector<T>& actual_idx, size_t rows, size_t cols, double eps, double min_recall) -> testing::AssertionResult { auto [actual_recall, match_count, total_count] = calc_recall(expected_idx, actual_idx, rows, cols); double error_margin = (actual_recall - min_recall) / std::max(1.0 - min_recall, eps); RAFT_LOG_INFO("Recall = %f (%zu/%zu), the error is %2.1f%% %s the threshold (eps = %f).", actual_recall, match_count, total_count, std::abs(error_margin * 100.0), error_margin < 0 ? "above" : "below", eps); if (actual_recall < min_recall - eps) { return testing::AssertionFailure() << "actual recall (" << actual_recall << ") is lower than the minimum expected recall (" << min_recall << "); eps = " << eps << ". "; } return testing::AssertionSuccess(); } /** Overload of calc_recall to account for distances */ template <typename T, typename DistT> auto calc_recall(const std::vector<T>& expected_idx, const std::vector<T>& actual_idx, const std::vector<DistT>& expected_dist, const std::vector<DistT>& actual_dist, size_t rows, size_t cols, double eps) { size_t match_count = 0; size_t total_count = static_cast<size_t>(rows) * static_cast<size_t>(cols); for (size_t i = 0; i < rows; ++i) { for (size_t k = 0; k < cols; ++k) { size_t idx_k = i * cols + k; // row major assumption! auto act_idx = actual_idx[idx_k]; auto act_dist = actual_dist[idx_k]; for (size_t j = 0; j < cols; ++j) { size_t idx = i * cols + j; // row major assumption! auto exp_idx = expected_idx[idx]; auto exp_dist = expected_dist[idx]; idx_dist_pair exp_kvp(exp_idx, exp_dist, raft::CompareApprox<DistT>(eps)); idx_dist_pair act_kvp(act_idx, act_dist, raft::CompareApprox<DistT>(eps)); if (exp_kvp == act_kvp) { match_count++; break; } } } } return std::make_tuple( static_cast<double>(match_count) / static_cast<double>(total_count), match_count, total_count); } /** same as eval_recall, but in case indices do not match, * then check distances as well, and accept match if actual dist is equal to expected_dist */ template <typename T, typename DistT> auto eval_neighbours(const std::vector<T>& expected_idx, const std::vector<T>& actual_idx, const std::vector<DistT>& expected_dist, const std::vector<DistT>& actual_dist, size_t rows, size_t cols, double eps, double min_recall) -> testing::AssertionResult { auto [actual_recall, match_count, total_count] = calc_recall(expected_idx, actual_idx, expected_dist, actual_dist, rows, cols, eps); double error_margin = (actual_recall - min_recall) / std::max(1.0 - min_recall, eps); RAFT_LOG_INFO("Recall = %f (%zu/%zu), the error is %2.1f%% %s the threshold (eps = %f).", actual_recall, match_count, total_count, std::abs(error_margin * 100.0), error_margin < 0 ? "above" : "below", eps); if (actual_recall < min_recall - eps) { return testing::AssertionFailure() << "actual recall (" << actual_recall << ") is lower than the minimum expected recall (" << min_recall << "); eps = " << eps << ". "; } return testing::AssertionSuccess(); } template <typename T, typename DistT, typename IdxT> auto eval_distances(raft::resources const& handle, const T* x, // dataset, n_rows * n_cols const T* queries, // n_queries * n_cols const IdxT* neighbors, // n_queries * k const DistT* distances, // n_queries *k size_t n_rows, size_t n_cols, size_t n_queries, uint32_t k, cuvs::distance::DistanceType metric, double eps) -> testing::AssertionResult { // for each vector, we calculate the actual distance to the k neighbors for (size_t i = 0; i < n_queries; i++) { auto y = raft::make_device_matrix<T, IdxT>(handle, k, n_cols); auto naive_dist = raft::make_device_matrix<DistT, IdxT>(handle, 1, k); raft::matrix::copy_rows<T, IdxT>( handle, make_device_matrix_view<const T, IdxT>(x, k, n_cols), y.view(), make_device_vector_view<const IdxT, IdxT>(neighbors + i * k, k)); dim3 block_dim(16, 32, 1); auto grid_y = static_cast<uint16_t>(std::min<size_t>(raft::ceildiv<size_t>(k, block_dim.y), 32768)); dim3 grid_dim(raft::ceildiv<size_t>(n_rows, block_dim.x), grid_y, 1); naive_distance_kernel<DistT, T, IdxT> <<<grid_dim, block_dim, 0, resource::get_cuda_stream(handle)>>>( naive_dist.data_handle(), queries + i * n_cols, y.data_handle(), 1, k, n_cols, metric); if (!devArrMatch(distances + i * k, naive_dist.data_handle(), naive_dist.size(), CompareApprox<float>(eps))) { std::cout << n_rows << "x" << n_cols << ", " << k << std::endl; std::cout << "query " << i << std::endl; print_vector(" indices", neighbors + i * k, k, std::cout); print_vector("n dist", distances + i * k, k, std::cout); print_vector("c dist", naive_dist.data_handle(), naive_dist.size(), std::cout); return testing::AssertionFailure(); } } return testing::AssertionSuccess(); } } // namespace cuvs::neighbors
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/neighbors/tiled_knn.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "./ann_utils.cuh" #include "./knn_utils.cuh" #include <raft/core/resource/cuda_stream.hpp> #include <cuvs/distance/distance.cuh> // cuvs::distance::pairwise_distance #include <cuvs/distance/distance_types.hpp> #include <cuvs/neighbors/brute_force.cuh> #include <cuvs/neighbors/detail/knn_brute_force.cuh> // cuvs::neighbors::detail::brute_force_knn_impl #include <cuvs/neighbors/detail/selection_faiss.cuh> // cuvs::neighbors::detail::select_k #include <raft/core/device_mdspan.hpp> #include <raft/core/logger.hpp> #include <raft/linalg/transpose.cuh> #include <raft/matrix/init.cuh> #include <rmm/device_buffer.hpp> #include <gtest/gtest.h> #include <cstddef> #include <iostream> #include <vector> namespace cuvs::neighbors::brute_force { struct TiledKNNInputs { int num_queries; int num_db_vecs; int dim; int k; int row_tiles; int col_tiles; cuvs::distance::DistanceType metric; bool row_major; }; std::ostream& operator<<(std::ostream& os, const TiledKNNInputs& input) { return os << "num_queries:" << input.num_queries << " num_vecs:" << input.num_db_vecs << " dim:" << input.dim << " k:" << input.k << " row_tiles:" << input.row_tiles << " col_tiles:" << input.col_tiles << " metric:" << print_metric{input.metric} << " row_major:" << input.row_major; } template <typename T> class TiledKNNTest : public ::testing::TestWithParam<TiledKNNInputs> { public: TiledKNNTest() : stream_(resource::get_cuda_stream(handle_)), params_(::testing::TestWithParam<TiledKNNInputs>::GetParam()), database(params_.num_db_vecs * params_.dim, stream_), search_queries(params_.num_queries * params_.dim, stream_), raft_indices_(params_.num_queries * params_.k, stream_), raft_distances_(params_.num_queries * params_.k, stream_), ref_indices_(params_.num_queries * params_.k, stream_), ref_distances_(params_.num_queries * params_.k, stream_) { raft::matrix::fill( handle_, raft::make_device_matrix_view(database.data(), params_.num_db_vecs, params_.dim), T{0.0}); raft::matrix::fill( handle_, raft::make_device_matrix_view(search_queries.data(), params_.num_queries, params_.dim), T{0.0}); raft::matrix::fill( handle_, raft::make_device_matrix_view(raft_indices_.data(), params_.num_queries, params_.k), 0); raft::matrix::fill( handle_, raft::make_device_matrix_view(raft_distances_.data(), params_.num_queries, params_.k), T{0.0}); raft::matrix::fill( handle_, raft::make_device_matrix_view(ref_indices_.data(), params_.num_queries, params_.k), 0); raft::matrix::fill( handle_, raft::make_device_matrix_view(ref_distances_.data(), params_.num_queries, params_.k), T{0.0}); } protected: void testBruteForce() { float metric_arg = 3.0; // calculate the naive knn, by calculating the full pairwise distances and doing a k-select rmm::device_uvector<T> temp_distances(num_db_vecs * num_queries, stream_); rmm::device_uvector<char> workspace(0, stream_); distance::pairwise_distance(handle_, search_queries.data(), database.data(), temp_distances.data(), num_queries, num_db_vecs, dim, workspace, metric, params_.row_major, metric_arg); // setting the 'isRowMajor' flag in the pairwise distances api, not only sets // the inputs as colmajor - but also the output. this means we have to transpose in this // case auto temp_dist = temp_distances.data(); rmm::device_uvector<T> temp_row_major_dist(num_db_vecs * num_queries, stream_); if (!params_.row_major) { raft::linalg::transpose( handle_, temp_dist, temp_row_major_dist.data(), num_queries, num_db_vecs, stream_); temp_dist = temp_row_major_dist.data(); } cuvs::neighbors::detail::select_k<int, T>(temp_dist, nullptr, num_queries, num_db_vecs, ref_distances_.data(), ref_indices_.data(), cuvs::distance::is_min_close(metric), k_, stream_); if ((params_.row_tiles == 0) && (params_.col_tiles == 0)) { std::vector<T*> input{database.data()}; std::vector<size_t> sizes{static_cast<size_t>(num_db_vecs)}; neighbors::detail::brute_force_knn_impl<size_t, int, T>(handle_, input, sizes, dim, const_cast<T*>(search_queries.data()), num_queries, raft_indices_.data(), raft_distances_.data(), k_, params_.row_major, params_.row_major, nullptr, metric, metric_arg); } else { neighbors::detail::tiled_brute_force_knn(handle_, search_queries.data(), database.data(), num_queries, num_db_vecs, dim, k_, raft_distances_.data(), raft_indices_.data(), metric, metric_arg, params_.row_tiles, params_.col_tiles); } // verify. ASSERT_TRUE(cuvs::spatial::knn::devArrMatchKnnPair(ref_indices_.data(), raft_indices_.data(), ref_distances_.data(), raft_distances_.data(), num_queries, k_, float(0.001), stream_, true)); // Also test out the 'index' api - where we can use precomputed norms if (params_.row_major) { auto idx = cuvs::neighbors::brute_force::build<T>(handle_, raft::make_device_matrix_view<const T, int64_t>( database.data(), params_.num_db_vecs, params_.dim), metric, metric_arg); auto query_view = raft::make_device_matrix_view<const T, int64_t>( search_queries.data(), params_.num_queries, params_.dim); cuvs::neighbors::brute_force::search<T, int>( handle_, idx, query_view, raft::make_device_matrix_view<int, int64_t>( raft_indices_.data(), params_.num_queries, params_.k), raft::make_device_matrix_view<T, int64_t>( raft_distances_.data(), params_.num_queries, params_.k)); ASSERT_TRUE(cuvs::spatial::knn::devArrMatchKnnPair(ref_indices_.data(), raft_indices_.data(), ref_distances_.data(), raft_distances_.data(), num_queries, k_, float(0.001), stream_, true)); // also test out the batch api. First get new reference results (all k, up to a certain // max size) auto all_size = std::min(params_.num_db_vecs, 1024); auto all_indices = raft::make_device_matrix<int, int64_t>(handle_, num_queries, all_size); auto all_distances = raft::make_device_matrix<T, int64_t>(handle_, num_queries, all_size); cuvs::neighbors::brute_force::search<T, int>( handle_, idx, query_view, all_indices.view(), all_distances.view()); int64_t offset = 0; auto query = make_batch_k_query<T, int>(handle_, idx, query_view, k_); for (auto batch : *query) { auto batch_size = batch.batch_size(); auto indices = raft::make_device_matrix<int, int64_t>(handle_, num_queries, batch_size); auto distances = raft::make_device_matrix<T, int64_t>(handle_, num_queries, batch_size); matrix::slice_coordinates<int64_t> coords{0, offset, num_queries, offset + batch_size}; matrix::slice(handle_, raft::make_const_mdspan(all_indices.view()), indices.view(), coords); matrix::slice( handle_, raft::make_const_mdspan(all_distances.view()), distances.view(), coords); ASSERT_TRUE(cuvs::spatial::knn::devArrMatchKnnPair(indices.data_handle(), batch.indices().data_handle(), distances.data_handle(), batch.distances().data_handle(), num_queries, batch_size, float(0.001), stream_, true)); offset += batch_size; if (offset + batch_size > all_size) break; } // also test out with variable batch sizes offset = 0; int64_t batch_size = k_; query = make_batch_k_query<T, int>(handle_, idx, query_view, batch_size); for (auto it = query->begin(); it != query->end(); it.advance(batch_size)) { // batch_size could be less than requested (in the case of final batch). handle. ASSERT_TRUE(it->indices().extent(1) <= batch_size); batch_size = it->indices().extent(1); auto indices = raft::make_device_matrix<int, int64_t>(handle_, num_queries, batch_size); auto distances = raft::make_device_matrix<T, int64_t>(handle_, num_queries, batch_size); matrix::slice_coordinates<int64_t> coords{0, offset, num_queries, offset + batch_size}; matrix::slice(handle_, raft::make_const_mdspan(all_indices.view()), indices.view(), coords); matrix::slice( handle_, raft::make_const_mdspan(all_distances.view()), distances.view(), coords); ASSERT_TRUE(cuvs::spatial::knn::devArrMatchKnnPair(indices.data_handle(), it->indices().data_handle(), distances.data_handle(), it->distances().data_handle(), num_queries, batch_size, float(0.001), stream_, true)); offset += batch_size; if (offset + batch_size > all_size) break; batch_size += 2; } } } void SetUp() override { num_queries = params_.num_queries; num_db_vecs = params_.num_db_vecs; dim = params_.dim; k_ = params_.k; metric = params_.metric; unsigned long long int seed = 1234ULL; raft::random::RngState r(seed); // JensenShannon distance requires positive values T min_val = metric == cuvs::distance::DistanceType::JensenShannon ? T(0.0) : T(-1.0); uniform(handle_, r, database.data(), num_db_vecs * dim, min_val, T(1.0)); uniform(handle_, r, search_queries.data(), num_queries * dim, min_val, T(1.0)); } private: raft::resources handle_; cudaStream_t stream_ = 0; TiledKNNInputs params_; int num_queries; int num_db_vecs; int dim; rmm::device_uvector<T> database; rmm::device_uvector<T> search_queries; rmm::device_uvector<int> raft_indices_; rmm::device_uvector<T> raft_distances_; rmm::device_uvector<int> ref_indices_; rmm::device_uvector<T> ref_distances_; int k_; cuvs::distance::DistanceType metric; }; const std::vector<TiledKNNInputs> random_inputs = { {256, 512, 16, 8, 16, 8, cuvs::distance::DistanceType::L2Expanded, true}, {256, 512, 16, 8, 16, 8, cuvs::distance::DistanceType::L2Unexpanded, true}, {256, 512, 16, 8, 16, 8, cuvs::distance::DistanceType::L2SqrtExpanded, true}, {256, 512, 16, 8, 16, 8, cuvs::distance::DistanceType::L2SqrtUnexpanded, true}, {256, 512, 16, 8, 16, 8, cuvs::distance::DistanceType::L1, true}, {256, 512, 16, 8, 16, 8, cuvs::distance::DistanceType::Linf, true}, {256, 512, 16, 8, 16, 8, cuvs::distance::DistanceType::InnerProduct, true}, {256, 512, 16, 8, 16, 8, cuvs::distance::DistanceType::CorrelationExpanded, true}, {256, 512, 16, 8, 16, 8, cuvs::distance::DistanceType::CosineExpanded, true}, {256, 512, 16, 8, 16, 8, cuvs::distance::DistanceType::LpUnexpanded, true}, {256, 512, 16, 8, 16, 8, cuvs::distance::DistanceType::JensenShannon, true}, {256, 512, 16, 8, 16, 8, cuvs::distance::DistanceType::L2SqrtExpanded, true}, // BrayCurtis isn't currently supported by pairwise_distance api // {256, 512, 16, 8, 16, 8, cuvs::distance::DistanceType::BrayCurtis}, {256, 512, 16, 8, 16, 8, cuvs::distance::DistanceType::Canberra, true}, {10000, 40000, 32, 30, 512, 1024, cuvs::distance::DistanceType::L2Expanded, true}, {345, 1023, 16, 128, 512, 1024, cuvs::distance::DistanceType::CosineExpanded, true}, {789, 20516, 64, 256, 512, 4096, cuvs::distance::DistanceType::L2SqrtExpanded, true}, // Test where the final column tile has < K items: {4, 12, 32, 6, 4, 8, cuvs::distance::DistanceType::L2Expanded, true}, // Test where passing column_tiles < K {1, 40, 32, 30, 1, 8, cuvs::distance::DistanceType::L2Expanded, true}, // Passing tile sizes of 0 means to use brute_force_knn_impl (instead of the // tiled_brute_force_knn api). {1000, 500000, 128, 128, 0, 0, cuvs::distance::DistanceType::L2Expanded, true}, {1000, 500000, 128, 128, 0, 0, cuvs::distance::DistanceType::L2Expanded, false}, {1000, 5000, 128, 128, 0, 0, cuvs::distance::DistanceType::LpUnexpanded, true}, {1000, 5000, 128, 128, 0, 0, cuvs::distance::DistanceType::L2SqrtExpanded, false}, {1000, 5000, 128, 128, 0, 0, cuvs::distance::DistanceType::InnerProduct, false}}; typedef TiledKNNTest<float> TiledKNNTestF; TEST_P(TiledKNNTestF, BruteForce) { this->testBruteForce(); } INSTANTIATE_TEST_CASE_P(TiledKNNTest, TiledKNNTestF, ::testing::ValuesIn(random_inputs)); } // namespace cuvs::neighbors::brute_force
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/neighbors/spatial_data.h
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <vector> namespace raft { namespace spatial { // Latitude and longitude coordinates of 51 US states / territories std::vector<float> spatial_data = { 63.588753, -154.493062, 32.318231, -86.902298, 35.20105, -91.831833, 34.048928, -111.093731, 36.778261, -119.417932, 39.550051, -105.782067, 41.603221, -73.087749, 38.905985, -77.033418, 38.910832, -75.52767, 27.664827, -81.515754, 32.157435, -82.907123, 19.898682, -155.665857, 41.878003, -93.097702, 44.068202, -114.742041, 40.633125, -89.398528, 40.551217, -85.602364, 39.011902, -98.484246, 37.839333, -84.270018, 31.244823, -92.145024, 42.407211, -71.382437, 39.045755, -76.641271, 45.253783, -69.445469, 44.314844, -85.602364, 46.729553, -94.6859, 37.964253, -91.831833, 32.354668, -89.398528, 46.879682, -110.362566, 35.759573, -79.0193, 47.551493, -101.002012, 41.492537, -99.901813, 43.193852, -71.572395, 40.058324, -74.405661, 34.97273, -105.032363, 38.80261, -116.419389, 43.299428, -74.217933, 40.417287, -82.907123, 35.007752, -97.092877, 43.804133, -120.554201, 41.203322, -77.194525, 18.220833, -66.590149, 41.580095, -71.477429, 33.836081, -81.163725, 43.969515, -99.901813, 35.517491, -86.580447, 31.968599, -99.901813, 39.32098, -111.093731, 37.431573, -78.656894, 44.558803, -72.577841, 47.751074, -120.740139, 43.78444, -88.787868, 38.597626, -80.454903, 43.075968, -107.290284}; }; // namespace spatial }; // namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/neighbors/knn_utils.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <memory> #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/util/cudart_utils.hpp> namespace cuvs::spatial::knn { template <typename IdxT, typename DistT, typename compareDist> struct idx_dist_pair { IdxT idx; DistT dist; compareDist eq_compare; bool operator==(const idx_dist_pair<IdxT, DistT, compareDist>& a) const { if (idx == a.idx) return true; if (eq_compare(dist, a.dist)) return true; return false; } idx_dist_pair(IdxT x, DistT y, compareDist op) : idx(x), dist(y), eq_compare(op) {} }; template <typename T, typename DistT> testing::AssertionResult devArrMatchKnnPair(const T* expected_idx, const T* actual_idx, const DistT* expected_dist, const DistT* actual_dist, size_t rows, size_t cols, const DistT eps, cudaStream_t stream = 0, bool sort_inputs = false) { size_t size = rows * cols; std::unique_ptr<T[]> exp_idx_h(new T[size]); std::unique_ptr<T[]> act_idx_h(new T[size]); std::unique_ptr<DistT[]> exp_dist_h(new DistT[size]); std::unique_ptr<DistT[]> act_dist_h(new DistT[size]); raft::update_host<T>(exp_idx_h.get(), expected_idx, size, stream); raft::update_host<T>(act_idx_h.get(), actual_idx, size, stream); raft::update_host<DistT>(exp_dist_h.get(), expected_dist, size, stream); raft::update_host<DistT>(act_dist_h.get(), actual_dist, size, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); for (size_t i(0); i < rows; ++i) { std::vector<std::pair<DistT, T>> actual; std::vector<std::pair<DistT, T>> expected; for (size_t j(0); j < cols; ++j) { auto idx = i * cols + j; // row major assumption! auto exp_idx = exp_idx_h.get()[idx]; auto act_idx = act_idx_h.get()[idx]; auto exp_dist = exp_dist_h.get()[idx]; auto act_dist = act_dist_h.get()[idx]; actual.push_back(std::make_pair(act_dist, act_idx)); expected.push_back(std::make_pair(exp_dist, exp_idx)); } if (sort_inputs) { // inputs could be unsorted here, sort for comparison std::sort(actual.begin(), actual.end()); std::sort(expected.begin(), expected.end()); } for (size_t j(0); j < cols; ++j) { auto act = actual[j]; auto exp = expected[j]; idx_dist_pair exp_kvp(exp.second, exp.first, raft::CompareApprox<DistT>(eps)); idx_dist_pair act_kvp(act.second, act.first, raft::CompareApprox<DistT>(eps)); if (!(exp_kvp == act_kvp)) { return testing::AssertionFailure() << "actual=" << act_kvp.idx << "," << act_kvp.dist << "!=" << "expected" << exp_kvp.idx << "," << exp_kvp.dist << " @" << i << "," << j; } } } return testing::AssertionSuccess(); } } // namespace cuvs::spatial::knn
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/neighbors/epsilon_neighborhood.cu
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <cuvs/spatial/knn/epsilon_neighborhood.cuh> #include <gtest/gtest.h> #include <memory> #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/random/make_blobs.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> namespace raft { namespace spatial { namespace knn { template <typename T, typename IdxT> struct EpsInputs { IdxT n_row, n_col, n_centers, n_batches; T eps; }; template <typename T, typename IdxT> ::std::ostream& operator<<(::std::ostream& os, const EpsInputs<T, IdxT>& p) { return os; } template <typename T, typename IdxT> class EpsNeighTest : public ::testing::TestWithParam<EpsInputs<T, IdxT>> { protected: EpsNeighTest() : data(0, resource::get_cuda_stream(handle)), adj(0, resource::get_cuda_stream(handle)), labels(0, resource::get_cuda_stream(handle)), vd(0, resource::get_cuda_stream(handle)) { } void SetUp() override { auto stream = resource::get_cuda_stream(handle); param = ::testing::TestWithParam<EpsInputs<T, IdxT>>::GetParam(); data.resize(param.n_row * param.n_col, stream); labels.resize(param.n_row, stream); batchSize = param.n_row / param.n_batches; adj.resize(param.n_row * batchSize, stream); vd.resize(batchSize + 1, stream); RAFT_CUDA_TRY(cudaMemsetAsync(vd.data(), 0, vd.size() * sizeof(IdxT), stream)); random::make_blobs<T, IdxT>(data.data(), labels.data(), param.n_row, param.n_col, param.n_centers, stream, true, nullptr, nullptr, T(0.01), false); } const raft::resources handle; EpsInputs<T, IdxT> param; cudaStream_t stream = 0; rmm::device_uvector<T> data; rmm::device_uvector<bool> adj; rmm::device_uvector<IdxT> labels, vd; IdxT batchSize; }; // class EpsNeighTest const std::vector<EpsInputs<float, int>> inputsfi = { {15000, 16, 5, 1, 2.f}, {14000, 16, 5, 1, 2.f}, {15000, 17, 5, 1, 2.f}, {14000, 17, 5, 1, 2.f}, {15000, 18, 5, 1, 2.f}, {14000, 18, 5, 1, 2.f}, {15000, 32, 5, 1, 2.f}, {14000, 32, 5, 1, 2.f}, {20000, 10000, 10, 1, 2.f}, {20000, 10000, 10, 2, 2.f}, }; typedef EpsNeighTest<float, int> EpsNeighTestFI; TEST_P(EpsNeighTestFI, Result) { for (int i = 0; i < param.n_batches; ++i) { RAFT_CUDA_TRY(cudaMemsetAsync(adj.data(), 0, sizeof(bool) * param.n_row * batchSize, stream)); RAFT_CUDA_TRY(cudaMemsetAsync(vd.data(), 0, sizeof(int) * (batchSize + 1), stream)); auto adj_view = make_device_matrix_view<bool, int>(adj.data(), param.n_row, batchSize); auto vd_view = make_device_vector_view<int, int>(vd.data(), batchSize + 1); auto x_view = make_device_matrix_view<float, int>(data.data(), param.n_row, param.n_col); auto y_view = make_device_matrix_view<float, int>( data.data() + (i * batchSize * param.n_col), batchSize, param.n_col); eps_neighbors_l2sq<float, int, int>( handle, x_view, y_view, adj_view, vd_view, param.eps * param.eps); ASSERT_TRUE(raft::devArrMatch( param.n_row / param.n_centers, vd.data(), batchSize, raft::Compare<int>(), stream)); } } INSTANTIATE_TEST_CASE_P(EpsNeighTests, EpsNeighTestFI, ::testing::ValuesIn(inputsfi)); }; // namespace knn }; // namespace spatial }; // namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/neighbors/selection.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <cuvs/neighbors/detail/selection_faiss.cuh> #include <cuvs/neighbors/detail/selection_faiss_helpers.cuh> // kFaissMax #include <gtest/gtest.h> #include <numeric> #include <raft/core/resource/cuda_stream.hpp> #include <raft/random/rng.cuh> #include <raft/util/cudart_utils.hpp> #include "../test_utils.cuh" #include <cuvs/spatial/knn/knn.cuh> #include <raft/sparse/detail/utils.h> namespace cuvs::spatial::selection { using namespace raft; using namespace raft::sparse; struct SelectTestSpec { int n_inputs; int input_len; int k; int select_min; bool use_index_input = true; }; std::ostream& operator<<(std::ostream& os, const SelectTestSpec& ss) { os << "spec{size: " << ss.input_len << "*" << ss.n_inputs << ", k: " << ss.k; os << (ss.select_min ? "; min}" : "; max}"); return os; } template <typename IdxT> auto gen_simple_ids(int n_inputs, int input_len, const raft::resources& handle) -> std::vector<IdxT> { std::vector<IdxT> out(n_inputs * input_len); auto s = resource::get_cuda_stream(handle); rmm::device_uvector<IdxT> out_d(out.size(), s); iota_fill(out_d.data(), IdxT(n_inputs), IdxT(input_len), s); update_host(out.data(), out_d.data(), out.size(), s); s.synchronize(); return out; } template <typename KeyT, typename IdxT> struct SelectInOutSimple { public: bool not_supported = false; SelectInOutSimple(std::shared_ptr<raft::resources> handle, const SelectTestSpec& spec, const std::vector<KeyT>& in_dists, const std::vector<KeyT>& out_dists, const std::vector<IdxT>& out_ids) : in_dists_(in_dists), in_ids_(gen_simple_ids<IdxT>(spec.n_inputs, spec.input_len, *handle.get())), out_dists_(out_dists), out_ids_(out_ids), handle_(handle) { } auto get_in_dists() -> std::vector<KeyT>& { return in_dists_; } auto get_in_ids() -> std::vector<IdxT>& { return in_ids_; } auto get_out_dists() -> std::vector<KeyT>& { return out_dists_; } auto get_out_ids() -> std::vector<IdxT>& { return out_ids_; } private: std::shared_ptr<raft::resources> handle_; std::vector<KeyT> in_dists_; std::vector<IdxT> in_ids_; std::vector<KeyT> out_dists_; std::vector<IdxT> out_ids_; }; template <typename KeyT, typename IdxT> struct SelectInOutComputed { public: bool not_supported = false; SelectInOutComputed(std::shared_ptr<raft::resources> handle, const SelectTestSpec& spec, knn::SelectKAlgo algo, const std::vector<KeyT>& in_dists, const std::optional<std::vector<IdxT>>& in_ids = std::nullopt) : handle_(handle), in_dists_(in_dists), in_ids_(in_ids.value_or(gen_simple_ids<IdxT>(spec.n_inputs, spec.input_len, *handle.get()))), out_dists_(spec.n_inputs * spec.k), out_ids_(spec.n_inputs * spec.k) { // check if the size is supported by the algorithm switch (algo) { case knn::SelectKAlgo::WARP_SORT: if (spec.k > raft::matrix::detail::select::warpsort::kMaxCapacity) { not_supported = true; return; } break; case knn::SelectKAlgo::FAISS: if (spec.k > cuvs::neighbors::detail::kFaissMaxK<IdxT, KeyT>()) { not_supported = true; return; } break; default: break; } auto stream = resource::get_cuda_stream(*handle_); rmm::device_uvector<KeyT> in_dists_d(in_dists_.size(), stream); rmm::device_uvector<IdxT> in_ids_d(in_ids_.size(), stream); rmm::device_uvector<KeyT> out_dists_d(out_dists_.size(), stream); rmm::device_uvector<IdxT> out_ids_d(out_ids_.size(), stream); update_device(in_dists_d.data(), in_dists_.data(), in_dists_.size(), stream); update_device(in_ids_d.data(), in_ids_.data(), in_ids_.size(), stream); cuvs::spatial::knn::select_k<IdxT, KeyT>(in_dists_d.data(), spec.use_index_input ? in_ids_d.data() : nullptr, spec.n_inputs, spec.input_len, out_dists_d.data(), out_ids_d.data(), spec.select_min, spec.k, stream, algo); update_host(out_dists_.data(), out_dists_d.data(), out_dists_.size(), stream); update_host(out_ids_.data(), out_ids_d.data(), out_ids_.size(), stream); interruptible::synchronize(stream); auto p = topk_sort_permutation(out_dists_, out_ids_, spec.k, spec.select_min); apply_permutation(out_dists_, p); apply_permutation(out_ids_, p); } auto get_in_dists() -> std::vector<KeyT>& { return in_dists_; } auto get_in_ids() -> std::vector<IdxT>& { return in_ids_; } auto get_out_dists() -> std::vector<KeyT>& { return out_dists_; } auto get_out_ids() -> std::vector<IdxT>& { return out_ids_; } private: std::shared_ptr<raft::resources> handle_; std::vector<KeyT> in_dists_; std::vector<IdxT> in_ids_; std::vector<KeyT> out_dists_; std::vector<IdxT> out_ids_; auto topk_sort_permutation(const std::vector<KeyT>& vec, const std::vector<IdxT>& inds, int k, bool select_min) -> std::vector<IdxT> { std::vector<IdxT> p(vec.size()); std::iota(p.begin(), p.end(), 0); if (select_min) { std::sort(p.begin(), p.end(), [&vec, &inds, k](IdxT i, IdxT j) { const IdxT ik = i / k; const IdxT jk = j / k; if (ik == jk) { if (vec[i] == vec[j]) { return inds[i] < inds[j]; } return vec[i] < vec[j]; } return ik < jk; }); } else { std::sort(p.begin(), p.end(), [&vec, &inds, k](IdxT i, IdxT j) { const IdxT ik = i / k; const IdxT jk = j / k; if (ik == jk) { if (vec[i] == vec[j]) { return inds[i] < inds[j]; } return vec[i] > vec[j]; } return ik < jk; }); } return p; } template <typename T> void apply_permutation(std::vector<T>& vec, const std::vector<IdxT>& p) { for (auto i = IdxT(vec.size()) - 1; i > 0; i--) { auto j = p[i]; while (j > i) j = p[j]; std::swap(vec[j], vec[i]); } } }; template <typename InOut> using Params = std::tuple<SelectTestSpec, knn::SelectKAlgo, InOut, std::shared_ptr<raft::resources>>; template <typename KeyT, typename IdxT, template <typename, typename> typename ParamsReader> class SelectionTest : public testing::TestWithParam<typename ParamsReader<KeyT, IdxT>::ParamsIn> { protected: std::shared_ptr<raft::resources> handle_; const SelectTestSpec spec; const knn::SelectKAlgo algo; typename ParamsReader<KeyT, IdxT>::InOut ref; SelectInOutComputed<KeyT, IdxT> res; public: explicit SelectionTest(Params<typename ParamsReader<KeyT, IdxT>::InOut> ps) : handle_(std::get<3>(ps)), spec(std::get<0>(ps)), algo(std::get<1>(ps)), ref(std::get<2>(ps)), res(handle_, spec, algo, ref.get_in_dists(), ref.get_in_ids()) { } explicit SelectionTest(typename ParamsReader<KeyT, IdxT>::ParamsIn ps) : SelectionTest(ParamsReader<KeyT, IdxT>::read(ps)) { } SelectionTest() : SelectionTest(testing::TestWithParam<typename ParamsReader<KeyT, IdxT>::ParamsIn>::GetParam()) { } void run() { if (ref.not_supported || res.not_supported) { GTEST_SKIP(); } ASSERT_TRUE(hostVecMatch(ref.get_out_dists(), res.get_out_dists(), Compare<KeyT>())); // If the dists (keys) are the same, different corresponding ids may end up in the selection due // to non-deterministic nature of some implementations. auto& in_ids = ref.get_in_ids(); auto& in_dists = ref.get_in_dists(); auto compare_ids = [&in_ids, &in_dists](const IdxT& i, const IdxT& j) { if (i == j) return true; auto ix_i = size_t(std::find(in_ids.begin(), in_ids.end(), i) - in_ids.begin()); auto ix_j = size_t(std::find(in_ids.begin(), in_ids.end(), j) - in_ids.begin()); if (ix_i >= in_ids.size() || ix_j >= in_ids.size()) return false; auto dist_i = in_dists[ix_i]; auto dist_j = in_dists[ix_j]; if (dist_i == dist_j) return true; std::cout << "ERROR: ref[" << ix_i << "] = " << dist_i << " != " << "res[" << ix_j << "] = " << dist_j << std::endl; return false; }; ASSERT_TRUE(hostVecMatch(ref.get_out_ids(), res.get_out_ids(), compare_ids)); } }; template <typename KeyT, typename IdxT> struct params_simple { using InOut = SelectInOutSimple<KeyT, IdxT>; using Inputs = std::tuple<SelectTestSpec, std::vector<KeyT>, std::vector<KeyT>, std::vector<IdxT>>; using Handle = std::shared_ptr<raft::resources>; using ParamsIn = std::tuple<Inputs, knn::SelectKAlgo, Handle>; static auto read(ParamsIn ps) -> Params<InOut> { auto ins = std::get<0>(ps); auto algo = std::get<1>(ps); auto handle = std::get<2>(ps); return std::make_tuple( std::get<0>(ins), algo, SelectInOutSimple<KeyT, IdxT>( handle, std::get<0>(ins), std::get<1>(ins), std::get<2>(ins), std::get<3>(ins)), handle); } }; auto inputs_simple_f = testing::Values( params_simple<float, int>::Inputs( {5, 5, 5, true, true}, {5.0, 4.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 2.0, 3.0, 5.0, 1.0, 4.0, 5.0, 3.0, 2.0, 4.0, 1.0, 1.0, 3.0, 2.0, 5.0, 4.0}, {1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0}, {4, 3, 2, 1, 0, 0, 1, 2, 3, 4, 3, 0, 1, 4, 2, 4, 2, 1, 3, 0, 0, 2, 1, 4, 3}), params_simple<float, int>::Inputs( {5, 5, 3, true, true}, {5.0, 4.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 2.0, 3.0, 5.0, 1.0, 4.0, 5.0, 3.0, 2.0, 4.0, 1.0, 1.0, 3.0, 2.0, 5.0, 4.0}, {1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0}, {4, 3, 2, 0, 1, 2, 3, 0, 1, 4, 2, 1, 0, 2, 1}), params_simple<float, int>::Inputs( {5, 5, 5, true, false}, {5.0, 4.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 2.0, 3.0, 5.0, 1.0, 4.0, 5.0, 3.0, 2.0, 4.0, 1.0, 1.0, 3.0, 2.0, 5.0, 4.0}, {1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0}, {4, 3, 2, 1, 0, 0, 1, 2, 3, 4, 3, 0, 1, 4, 2, 4, 2, 1, 3, 0, 0, 2, 1, 4, 3}), params_simple<float, int>::Inputs( {5, 5, 3, true, false}, {5.0, 4.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 2.0, 3.0, 5.0, 1.0, 4.0, 5.0, 3.0, 2.0, 4.0, 1.0, 1.0, 3.0, 2.0, 5.0, 4.0}, {1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0}, {4, 3, 2, 0, 1, 2, 3, 0, 1, 4, 2, 1, 0, 2, 1}), params_simple<float, int>::Inputs( {5, 7, 3, true, true}, {5.0, 4.0, 3.0, 2.0, 1.3, 7.5, 19.0, 9.0, 2.0, 3.0, 3.0, 5.0, 6.0, 4.0, 2.0, 3.0, 5.0, 1.0, 4.0, 1.0, 1.0, 5.0, 7.0, 2.5, 4.0, 7.0, 8.0, 8.0, 1.0, 3.0, 2.0, 5.0, 4.0, 1.1, 1.2}, {1.3, 2.0, 3.0, 2.0, 3.0, 3.0, 1.0, 1.0, 1.0, 2.5, 4.0, 5.0, 1.0, 1.1, 1.2}, {4, 3, 2, 1, 2, 3, 3, 5, 6, 2, 3, 0, 0, 5, 6}), params_simple<float, int>::Inputs( {1, 7, 3, true, true}, {2.0, 3.0, 5.0, 1.0, 4.0, 1.0, 1.0}, {1.0, 1.0, 1.0}, {3, 5, 6}), params_simple<float, int>::Inputs( {1, 7, 3, false, false}, {2.0, 3.0, 5.0, 1.0, 4.0, 1.0, 1.0}, {5.0, 4.0, 3.0}, {2, 4, 1}), params_simple<float, int>::Inputs( {1, 7, 3, false, true}, {2.0, 3.0, 5.0, 9.0, 4.0, 9.0, 9.0}, {9.0, 9.0, 9.0}, {3, 5, 6}), params_simple<float, int>::Inputs( {1, 130, 5, false, true}, {19, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 4, 4, 2, 3, 2, 3, 2, 3, 2, 3, 2, 20}, {20, 19, 18, 17, 16}, {129, 0, 117, 116, 115}), params_simple<float, int>::Inputs( {1, 130, 15, false, true}, {19, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 4, 4, 2, 3, 2, 3, 2, 3, 2, 3, 2, 20}, {20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6}, {129, 0, 117, 116, 115, 114, 113, 112, 111, 110, 109, 108, 107, 106, 105})); typedef SelectionTest<float, int, params_simple> SimpleFloatInt; TEST_P(SimpleFloatInt, Run) { run(); } INSTANTIATE_TEST_CASE_P(SelectionTest, SimpleFloatInt, testing::Combine(inputs_simple_f, testing::Values(knn::SelectKAlgo::FAISS, knn::SelectKAlgo::RADIX_8_BITS, knn::SelectKAlgo::RADIX_11_BITS, knn::SelectKAlgo::WARP_SORT), testing::Values(std::make_shared<raft::resources>()))); template <knn::SelectKAlgo RefAlgo> struct with_ref { template <typename KeyT, typename IdxT> struct params_random { using InOut = SelectInOutComputed<KeyT, IdxT>; using Handle = std::shared_ptr<raft::resources>; using ParamsIn = std::tuple<SelectTestSpec, knn::SelectKAlgo, Handle>; static auto read(ParamsIn ps) -> Params<InOut> { auto spec = std::get<0>(ps); auto algo = std::get<1>(ps); auto handle = std::get<2>(ps); std::vector<KeyT> dists(spec.input_len * spec.n_inputs); { auto s = resource::get_cuda_stream(*handle); rmm::device_uvector<KeyT> dists_d(spec.input_len * spec.n_inputs, s); raft::random::RngState r(42); normal(*(handle.get()), r, dists_d.data(), dists_d.size(), KeyT(10.0), KeyT(100.0)); update_host(dists.data(), dists_d.data(), dists_d.size(), s); s.synchronize(); } return std::make_tuple( spec, algo, SelectInOutComputed<KeyT, IdxT>(handle, spec, RefAlgo, dists), handle); } }; }; auto inputs_random_longlist = testing::Values(SelectTestSpec{1, 130, 15, false}, SelectTestSpec{1, 128, 15, false}, SelectTestSpec{20, 700, 1, true}, SelectTestSpec{20, 700, 2, true}, SelectTestSpec{20, 700, 3, true}, SelectTestSpec{20, 700, 4, true}, SelectTestSpec{20, 700, 5, true}, SelectTestSpec{20, 700, 6, true}, SelectTestSpec{20, 700, 7, true}, SelectTestSpec{20, 700, 8, true}, SelectTestSpec{20, 700, 9, true}, SelectTestSpec{20, 700, 10, true, false}, SelectTestSpec{20, 700, 11, true}, SelectTestSpec{20, 700, 12, true}, SelectTestSpec{20, 700, 16, true}, SelectTestSpec{100, 1700, 17, true}, SelectTestSpec{100, 1700, 31, true, false}, SelectTestSpec{100, 1700, 32, false}, SelectTestSpec{100, 1700, 33, false}, SelectTestSpec{100, 1700, 63, false}, SelectTestSpec{100, 1700, 64, false, false}, SelectTestSpec{100, 1700, 65, false}, SelectTestSpec{100, 1700, 255, true}, SelectTestSpec{100, 1700, 256, true}, SelectTestSpec{100, 1700, 511, false}, SelectTestSpec{100, 1700, 512, true}, SelectTestSpec{100, 1700, 1023, false, false}, SelectTestSpec{100, 1700, 1024, true}, SelectTestSpec{100, 1700, 1700, true}); auto inputs_random_largesize = testing::Values(SelectTestSpec{100, 100000, 1, true}, SelectTestSpec{100, 100000, 2, true}, SelectTestSpec{100, 100000, 3, true, false}, SelectTestSpec{100, 100000, 7, true}, SelectTestSpec{100, 100000, 16, true}, SelectTestSpec{100, 100000, 31, true}, SelectTestSpec{100, 100000, 32, true, false}, SelectTestSpec{100, 100000, 60, true}, SelectTestSpec{100, 100000, 100, true, false}, SelectTestSpec{100, 100000, 200, true}, SelectTestSpec{100000, 100, 100, false}, SelectTestSpec{1, 100000000, 1, true}, SelectTestSpec{1, 100000000, 16, false, false}, SelectTestSpec{1, 100000000, 64, false}, SelectTestSpec{1, 100000000, 128, true, false}, SelectTestSpec{1, 100000000, 256, false, false}); auto inputs_random_largek = testing::Values(SelectTestSpec{100, 100000, 1000, true}, SelectTestSpec{100, 100000, 2000, false}, SelectTestSpec{100, 100000, 100000, true, false}, SelectTestSpec{100, 100000, 2048, false}, SelectTestSpec{100, 100000, 1237, true}); typedef SelectionTest<float, int, with_ref<knn::SelectKAlgo::FAISS>::params_random> ReferencedRandomFloatInt; TEST_P(ReferencedRandomFloatInt, Run) { run(); } INSTANTIATE_TEST_CASE_P(SelectionTest, ReferencedRandomFloatInt, testing::Combine(inputs_random_longlist, testing::Values(knn::SelectKAlgo::RADIX_8_BITS, knn::SelectKAlgo::RADIX_11_BITS, knn::SelectKAlgo::WARP_SORT), testing::Values(std::make_shared<raft::resources>()))); typedef SelectionTest<double, size_t, with_ref<knn::SelectKAlgo::FAISS>::params_random> ReferencedRandomDoubleSizeT; TEST_P(ReferencedRandomDoubleSizeT, Run) { run(); } INSTANTIATE_TEST_CASE_P(SelectionTest, ReferencedRandomDoubleSizeT, testing::Combine(inputs_random_longlist, testing::Values(knn::SelectKAlgo::RADIX_8_BITS, knn::SelectKAlgo::RADIX_11_BITS, knn::SelectKAlgo::WARP_SORT), testing::Values(std::make_shared<raft::resources>()))); typedef SelectionTest<double, int, with_ref<knn::SelectKAlgo::FAISS>::params_random> ReferencedRandomDoubleInt; TEST_P(ReferencedRandomDoubleInt, LargeSize) { run(); } INSTANTIATE_TEST_CASE_P(SelectionTest, ReferencedRandomDoubleInt, testing::Combine(inputs_random_largesize, testing::Values(knn::SelectKAlgo::WARP_SORT), testing::Values(std::make_shared<raft::resources>()))); /** TODO: Fix test failure in RAFT CI * * SelectionTest/ReferencedRandomFloatSizeT.LargeK/0 * Indicices do not match! ref[91628] = 131.359 != res[36504] = 158.438 * Actual: false (actual=36504 != expected=91628 @38999; * * SelectionTest/ReferencedRandomFloatSizeT.LargeK/1 * ERROR: ref[57977] = 58.9079 != res[21973] = 54.9354 * Actual: false (actual=21973 != expected=57977 @107999; * */ typedef SelectionTest<float, size_t, with_ref<knn::SelectKAlgo::RADIX_11_BITS>::params_random> ReferencedRandomFloatSizeT; TEST_P(ReferencedRandomFloatSizeT, LargeK) { run(); } INSTANTIATE_TEST_CASE_P(SelectionTest, ReferencedRandomFloatSizeT, testing::Combine(inputs_random_largek, testing::Values(knn::SelectKAlgo::FAISS), testing::Values(std::make_shared<raft::resources>()))); } // namespace cuvs::spatial::selection
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/neighbors/fused_l2_knn.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "./knn_utils.cuh" #include <raft/core/resource/cuda_stream.hpp> #include <cuvs/distance/distance_types.hpp> #include <cuvs/neighbors/brute_force.cuh> #include <cuvs/spatial/knn/knn.cuh> #include <raft/core/device_mdspan.hpp> #include <raft/random/rng.cuh> #include <cuvs/distance/distance.cuh> #include <rmm/device_buffer.hpp> #include <gtest/gtest.h> #include <cstddef> #include <iostream> #include <vector> namespace raft { namespace spatial { namespace knn { struct FusedL2KNNInputs { int num_queries; int num_db_vecs; int dim; int k; cuvs::distance::DistanceType metric_; }; template <typename T> class FusedL2KNNTest : public ::testing::TestWithParam<FusedL2KNNInputs> { public: FusedL2KNNTest() : stream_(resource::get_cuda_stream(handle_)), params_(::testing::TestWithParam<FusedL2KNNInputs>::GetParam()), database(params_.num_db_vecs * params_.dim, stream_), search_queries(params_.num_queries * params_.dim, stream_), raft_indices_(params_.num_queries * params_.k, stream_), raft_distances_(params_.num_queries * params_.k, stream_), ref_indices_(params_.num_queries * params_.k, stream_), ref_distances_(params_.num_queries * params_.k, stream_) { RAFT_CUDA_TRY(cudaMemsetAsync(database.data(), 0, database.size() * sizeof(T), stream_)); RAFT_CUDA_TRY( cudaMemsetAsync(search_queries.data(), 0, search_queries.size() * sizeof(T), stream_)); RAFT_CUDA_TRY( cudaMemsetAsync(raft_indices_.data(), 0, raft_indices_.size() * sizeof(int64_t), stream_)); RAFT_CUDA_TRY( cudaMemsetAsync(raft_distances_.data(), 0, raft_distances_.size() * sizeof(T), stream_)); RAFT_CUDA_TRY( cudaMemsetAsync(ref_indices_.data(), 0, ref_indices_.size() * sizeof(int64_t), stream_)); RAFT_CUDA_TRY( cudaMemsetAsync(ref_distances_.data(), 0, ref_distances_.size() * sizeof(T), stream_)); } protected: void testBruteForce() { // calculate the naive knn, by calculating the full pairwise distances and doing a k-select rmm::device_uvector<T> temp_distances(num_db_vecs * num_queries, stream_); distance::pairwise_distance( handle_, raft::make_device_matrix_view<T, int32_t>(search_queries.data(), num_queries, dim), raft::make_device_matrix_view<T, int32_t>(database.data(), num_db_vecs, dim), raft::make_device_matrix_view<T, int32_t>(temp_distances.data(), num_queries, num_db_vecs), metric); spatial::knn::select_k<int64_t, T>(temp_distances.data(), nullptr, num_queries, num_db_vecs, ref_distances_.data(), ref_indices_.data(), true, k_, stream_); auto index_view = raft::make_device_matrix_view<const T, int64_t>(database.data(), num_db_vecs, dim); auto query_view = raft::make_device_matrix_view<const T, int64_t>(search_queries.data(), num_queries, dim); auto out_indices_view = raft::make_device_matrix_view<int64_t, int64_t>(raft_indices_.data(), num_queries, k_); auto out_dists_view = raft::make_device_matrix_view<T, int64_t>(raft_distances_.data(), num_queries, k_); cuvs::neighbors::brute_force::fused_l2_knn( handle_, index_view, query_view, out_indices_view, out_dists_view, metric); // verify. ASSERT_TRUE(devArrMatchKnnPair(ref_indices_.data(), raft_indices_.data(), ref_distances_.data(), raft_distances_.data(), num_queries, k_, float(0.001), stream_)); } void SetUp() override { num_queries = params_.num_queries; num_db_vecs = params_.num_db_vecs; dim = params_.dim; k_ = params_.k; metric = params_.metric_; unsigned long long int seed = 1234ULL; raft::random::RngState r(seed); uniform(handle_, r, database.data(), num_db_vecs * dim, T(-1.0), T(1.0)); uniform(handle_, r, search_queries.data(), num_queries * dim, T(-1.0), T(1.0)); } private: raft::resources handle_; cudaStream_t stream_ = 0; FusedL2KNNInputs params_; int num_queries; int num_db_vecs; int dim; rmm::device_uvector<T> database; rmm::device_uvector<T> search_queries; rmm::device_uvector<int64_t> raft_indices_; rmm::device_uvector<T> raft_distances_; rmm::device_uvector<int64_t> ref_indices_; rmm::device_uvector<T> ref_distances_; int k_; cuvs::distance::DistanceType metric; }; const std::vector<FusedL2KNNInputs> inputs = { {100, 1000, 16, 10, cuvs::distance::DistanceType::L2Expanded}, {256, 256, 30, 10, cuvs::distance::DistanceType::L2Expanded}, {1000, 10000, 16, 10, cuvs::distance::DistanceType::L2Expanded}, {100, 1000, 16, 50, cuvs::distance::DistanceType::L2Expanded}, {20, 10000, 16, 10, cuvs::distance::DistanceType::L2Expanded}, {1000, 10000, 16, 50, cuvs::distance::DistanceType::L2Expanded}, {1000, 10000, 32, 50, cuvs::distance::DistanceType::L2Expanded}, {10000, 40000, 32, 30, cuvs::distance::DistanceType::L2Expanded}, // L2 unexpanded {100, 1000, 16, 10, cuvs::distance::DistanceType::L2Unexpanded}, {1000, 10000, 16, 10, cuvs::distance::DistanceType::L2Unexpanded}, {100, 1000, 16, 50, cuvs::distance::DistanceType::L2Unexpanded}, {20, 10000, 16, 50, cuvs::distance::DistanceType::L2Unexpanded}, {1000, 10000, 16, 50, cuvs::distance::DistanceType::L2Unexpanded}, {1000, 10000, 32, 50, cuvs::distance::DistanceType::L2Unexpanded}, {10000, 40000, 32, 30, cuvs::distance::DistanceType::L2Unexpanded}, }; typedef FusedL2KNNTest<float> FusedL2KNNTestF; TEST_P(FusedL2KNNTestF, FusedBruteForce) { this->testBruteForce(); } INSTANTIATE_TEST_CASE_P(FusedL2KNNTest, FusedL2KNNTestF, ::testing::ValuesIn(inputs)); } // namespace knn } // namespace spatial } // namespace raft
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_cagra/test_uint8_t_uint32_t.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "../ann_cagra.cuh" namespace cuvs::neighbors::cagra { typedef AnnCagraTest<float, std::uint8_t, std::uint32_t> AnnCagraTestU8_U32; TEST_P(AnnCagraTestU8_U32, AnnCagra) { this->testCagra(); } typedef AnnCagraSortTest<float, std::uint8_t, std::uint32_t> AnnCagraSortTestU8_U32; TEST_P(AnnCagraSortTestU8_U32, AnnCagraSort) { this->testCagraSort(); } typedef AnnCagraFilterTest<float, std::uint8_t, std::uint32_t> AnnCagraFilterTestU8_U32; TEST_P(AnnCagraFilterTestU8_U32, AnnCagraSort) { this->testCagraFilter(); this->testCagraRemoved(); } INSTANTIATE_TEST_CASE_P(AnnCagraTest, AnnCagraTestU8_U32, ::testing::ValuesIn(inputs)); INSTANTIATE_TEST_CASE_P(AnnCagraSortTest, AnnCagraSortTestU8_U32, ::testing::ValuesIn(inputs)); INSTANTIATE_TEST_CASE_P(AnnCagraFilterTest, AnnCagraFilterTestU8_U32, ::testing::ValuesIn(inputs)); } // namespace cuvs::neighbors::cagra
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_cagra/test_float_uint32_t.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "../ann_cagra.cuh" namespace cuvs::neighbors::cagra { typedef AnnCagraTest<float, float, std::uint32_t> AnnCagraTestF_U32; TEST_P(AnnCagraTestF_U32, AnnCagra) { this->testCagra(); } typedef AnnCagraSortTest<float, float, std::uint32_t> AnnCagraSortTestF_U32; TEST_P(AnnCagraSortTestF_U32, AnnCagraSort) { this->testCagraSort(); } typedef AnnCagraFilterTest<float, float, std::uint32_t> AnnCagraFilterTestF_U32; TEST_P(AnnCagraFilterTestF_U32, AnnCagraFilter) { this->testCagraFilter(); this->testCagraRemoved(); } INSTANTIATE_TEST_CASE_P(AnnCagraTest, AnnCagraTestF_U32, ::testing::ValuesIn(inputs)); INSTANTIATE_TEST_CASE_P(AnnCagraSortTest, AnnCagraSortTestF_U32, ::testing::ValuesIn(inputs)); INSTANTIATE_TEST_CASE_P(AnnCagraFilterTest, AnnCagraFilterTestF_U32, ::testing::ValuesIn(inputs)); } // namespace cuvs::neighbors::cagra
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_cagra/test_float_int64_t.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "../ann_cagra.cuh" #include "search_kernel_uint64_t.cuh" namespace cuvs::neighbors::cagra { typedef AnnCagraTest<float, float, std::int64_t> AnnCagraTestF_I64; TEST_P(AnnCagraTestF_I64, AnnCagra) { this->testCagra(); } INSTANTIATE_TEST_CASE_P(AnnCagraTest, AnnCagraTestF_I64, ::testing::ValuesIn(inputs)); } // namespace cuvs::neighbors::cagra
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_cagra/test_int8_t_uint32_t.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "../ann_cagra.cuh" namespace cuvs::neighbors::cagra { typedef AnnCagraTest<float, std::int8_t, std::uint32_t> AnnCagraTestI8_U32; TEST_P(AnnCagraTestI8_U32, AnnCagra) { this->testCagra(); } typedef AnnCagraSortTest<float, std::int8_t, std::uint32_t> AnnCagraSortTestI8_U32; TEST_P(AnnCagraSortTestI8_U32, AnnCagraSort) { this->testCagraSort(); } typedef AnnCagraFilterTest<float, std::int8_t, std::uint32_t> AnnCagraFilterTestI8_U32; TEST_P(AnnCagraFilterTestI8_U32, AnnCagraFilter) { this->testCagraFilter(); this->testCagraRemoved(); } INSTANTIATE_TEST_CASE_P(AnnCagraTest, AnnCagraTestI8_U32, ::testing::ValuesIn(inputs)); INSTANTIATE_TEST_CASE_P(AnnCagraSortTest, AnnCagraSortTestI8_U32, ::testing::ValuesIn(inputs)); INSTANTIATE_TEST_CASE_P(AnnCagraFilterTest, AnnCagraFilterTestI8_U32, ::testing::ValuesIn(inputs)); } // namespace cuvs::neighbors::cagra
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_cagra/search_kernel_uint64_t.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuvs/neighbors/sample_filter_types.hpp> // none_cagra_sample_filter #include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT namespace cuvs::neighbors::cagra::detail { namespace multi_cta_search { #define instantiate_kernel_selection( \ TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T) \ extern template void \ select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>( \ raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, \ raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, \ INDEX_T* const topk_indices_ptr, \ DISTANCE_T* const topk_distances_ptr, \ const DATA_T* const queries_ptr, \ const uint32_t num_queries, \ const INDEX_T* dev_seed_ptr, \ uint32_t* const num_executed_iterations, \ uint32_t topk, \ uint32_t block_size, \ uint32_t result_buffer_size, \ uint32_t smem_size, \ int64_t hash_bitlen, \ INDEX_T* hashmap_ptr, \ uint32_t num_cta_per_query, \ uint32_t num_random_samplings, \ uint64_t rand_xor_mask, \ uint32_t num_seeds, \ size_t itopk_size, \ size_t search_width, \ size_t min_iterations, \ size_t max_iterations, \ SAMPLE_FILTER_T sample_filter, \ cudaStream_t stream); instantiate_kernel_selection( 32, 1024, float, uint64_t, float, cuvs::neighbors::filtering::none_cagra_sample_filter); instantiate_kernel_selection( 8, 128, float, uint64_t, float, cuvs::neighbors::filtering::none_cagra_sample_filter); instantiate_kernel_selection( 16, 256, float, uint64_t, float, cuvs::neighbors::filtering::none_cagra_sample_filter); instantiate_kernel_selection( 32, 512, float, uint64_t, float, cuvs::neighbors::filtering::none_cagra_sample_filter); #undef instantiate_kernel_selection } // namespace multi_cta_search namespace single_cta_search { #define instantiate_single_cta_select_and_run( \ TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T) \ extern template void \ select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>( \ raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, \ raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, \ INDEX_T* const topk_indices_ptr, \ DISTANCE_T* const topk_distances_ptr, \ const DATA_T* const queries_ptr, \ const uint32_t num_queries, \ const INDEX_T* dev_seed_ptr, \ uint32_t* const num_executed_iterations, \ uint32_t topk, \ uint32_t num_itopk_candidates, \ uint32_t block_size, \ uint32_t smem_size, \ int64_t hash_bitlen, \ INDEX_T* hashmap_ptr, \ size_t small_hash_bitlen, \ size_t small_hash_reset_interval, \ uint32_t num_random_samplings, \ uint64_t rand_xor_mask, \ uint32_t num_seeds, \ size_t itopk_size, \ size_t search_width, \ size_t min_iterations, \ size_t max_iterations, \ SAMPLE_FILTER_T sample_filter, \ cudaStream_t stream); instantiate_single_cta_select_and_run( 32, 1024, float, uint64_t, float, cuvs::neighbors::filtering::none_cagra_sample_filter); instantiate_single_cta_select_and_run( 8, 128, float, uint64_t, float, cuvs::neighbors::filtering::none_cagra_sample_filter); instantiate_single_cta_select_and_run( 16, 256, float, uint64_t, float, cuvs::neighbors::filtering::none_cagra_sample_filter); instantiate_single_cta_select_and_run( 32, 512, float, uint64_t, float, cuvs::neighbors::filtering::none_cagra_sample_filter); } // namespace single_cta_search } // namespace cuvs::neighbors::cagra::detail
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_ivf_pq/test_uint8_t_int64_t.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../ann_ivf_pq.cuh" namespace cuvs::neighbors::ivf_pq { using f32_u08_i64 = ivf_pq_test<float, uint8_t, int64_t>; TEST_BUILD_SEARCH(f32_u08_i64) TEST_BUILD_EXTEND_SEARCH(f32_u08_i64) INSTANTIATE(f32_u08_i64, small_dims_per_cluster() + enum_variety()); } // namespace cuvs::neighbors::ivf_pq
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_ivf_pq/test_filter_int8_t_int64_t.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #undef RAFT_EXPLICIT_INSTANTIATE_ONLY // Enable instantiation of search with filter #include "../ann_ivf_pq.cuh" namespace cuvs::neighbors::ivf_pq { using f32_i08_i64_filter = ivf_pq_filter_test<float, int8_t, int64_t>; TEST_BUILD_SEARCH(f32_i08_i64_filter) INSTANTIATE(f32_i08_i64_filter, big_dims()); } // namespace cuvs::neighbors::ivf_pq
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_ivf_pq/test_float_uint32_t.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // XXX: the uint32_t instance is not compiled in libraft.so. So we allow // instantiating the template here. // // TODO: consider removing this test or consider adding an instantiation to the // library. #undef RAFT_EXPLICIT_INSTANTIATE_ONLY #include "../ann_ivf_pq.cuh" namespace cuvs::neighbors::ivf_pq { using f32_f32_u32 = ivf_pq_test<float, float, uint32_t>; using f32_f32_u32_filter = ivf_pq_filter_test<float, float, uint32_t>; TEST_BUILD_SEARCH(f32_f32_u32) TEST_BUILD_SERIALIZE_SEARCH(f32_f32_u32) INSTANTIATE(f32_f32_u32, defaults() + var_n_probes() + var_k() + special_cases()); TEST_BUILD_SEARCH(f32_f32_u32_filter) INSTANTIATE(f32_f32_u32_filter, defaults()); } // namespace cuvs::neighbors::ivf_pq
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_ivf_pq/test_int8_t_int64_t.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../ann_ivf_pq.cuh" namespace cuvs::neighbors::ivf_pq { using f32_i08_i64 = ivf_pq_test<float, int8_t, int64_t>; TEST_BUILD_SEARCH(f32_i08_i64) TEST_BUILD_SERIALIZE_SEARCH(f32_i08_i64) INSTANTIATE(f32_i08_i64, defaults() + big_dims() + var_k()); } // namespace cuvs::neighbors::ivf_pq
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_ivf_pq/test_float_int64_t.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../ann_ivf_pq.cuh" namespace cuvs::neighbors::ivf_pq { using f32_f32_i64 = ivf_pq_test<float, float, int64_t>; TEST_BUILD_EXTEND_SEARCH(f32_f32_i64) TEST_BUILD_SERIALIZE_SEARCH(f32_f32_i64) INSTANTIATE(f32_f32_i64, defaults() + small_dims() + big_dims_moderate_lut()); } // namespace cuvs::neighbors::ivf_pq
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_ivf_pq/test_filter_float_int64_t.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #undef RAFT_EXPLICIT_INSTANTIATE_ONLY // Enable instantiation of search with filter #include "../ann_ivf_pq.cuh" namespace cuvs::neighbors::ivf_pq { using f32_f32_i64_filter = ivf_pq_filter_test<float, float, int64_t>; TEST_BUILD_SEARCH(f32_f32_i64_filter) INSTANTIATE(f32_f32_i64_filter, defaults() + big_dims_moderate_lut()); } // namespace cuvs::neighbors::ivf_pq
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_nn_descent/test_uint8_t_uint32_t.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "../ann_nn_descent.cuh" namespace cuvs::neighbors::experimental::nn_descent { typedef AnnNNDescentTest<float, uint8_t, std::uint32_t> AnnNNDescentTestUI8_U32; TEST_P(AnnNNDescentTestUI8_U32, AnnNNDescent) { this->testNNDescent(); } INSTANTIATE_TEST_CASE_P(AnnNNDescentTest, AnnNNDescentTestUI8_U32, ::testing::ValuesIn(inputs)); } // namespace cuvs::neighbors::experimental::nn_descent
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_nn_descent/test_float_uint32_t.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "../ann_nn_descent.cuh" namespace cuvs::neighbors::experimental::nn_descent { typedef AnnNNDescentTest<float, float, std::uint32_t> AnnNNDescentTestF_U32; TEST_P(AnnNNDescentTestF_U32, AnnNNDescent) { this->testNNDescent(); } INSTANTIATE_TEST_CASE_P(AnnNNDescentTest, AnnNNDescentTestF_U32, ::testing::ValuesIn(inputs)); } // namespace cuvs::neighbors::experimental::nn_descent
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_nn_descent/test_int8_t_uint32_t.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "../ann_nn_descent.cuh" namespace cuvs::neighbors::experimental::nn_descent { typedef AnnNNDescentTest<float, int8_t, std::uint32_t> AnnNNDescentTestI8_U32; TEST_P(AnnNNDescentTestI8_U32, AnnNNDescent) { this->testNNDescent(); } INSTANTIATE_TEST_CASE_P(AnnNNDescentTest, AnnNNDescentTestI8_U32, ::testing::ValuesIn(inputs)); } // namespace cuvs::neighbors::experimental::nn_descent
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_ivf_flat/test_uint8_t_int64_t.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "../ann_ivf_flat.cuh" namespace cuvs::neighbors::ivf_flat { typedef AnnIVFFlatTest<float, uint8_t, std::int64_t> AnnIVFFlatTestF_uint8; TEST_P(AnnIVFFlatTestF_uint8, AnnIVFFlat) { this->testIVFFlat(); } INSTANTIATE_TEST_CASE_P(AnnIVFFlatTest, AnnIVFFlatTestF_uint8, ::testing::ValuesIn(inputs)); } // namespace cuvs::neighbors::ivf_flat
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_ivf_flat/test_int8_t_int64_t.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "../ann_ivf_flat.cuh" namespace cuvs::neighbors::ivf_flat { typedef AnnIVFFlatTest<float, int8_t, std::int64_t> AnnIVFFlatTestF_int8; TEST_P(AnnIVFFlatTestF_int8, AnnIVFFlat) { this->testIVFFlat(); } INSTANTIATE_TEST_CASE_P(AnnIVFFlatTest, AnnIVFFlatTestF_int8, ::testing::ValuesIn(inputs)); } // namespace cuvs::neighbors::ivf_flat
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_ivf_flat/test_float_int64_t.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "../ann_ivf_flat.cuh" namespace cuvs::neighbors::ivf_flat { typedef AnnIVFFlatTest<float, float, std::int64_t> AnnIVFFlatTestF; TEST_P(AnnIVFFlatTestF, AnnIVFFlat) { this->testIVFFlat(); this->testPacker(); } INSTANTIATE_TEST_CASE_P(AnnIVFFlatTest, AnnIVFFlatTestF, ::testing::ValuesIn(inputs)); } // namespace cuvs::neighbors::ivf_flat
0
rapidsai_public_repos/cuvs/cpp/test/neighbors
rapidsai_public_repos/cuvs/cpp/test/neighbors/ann_ivf_flat/test_filter_float_int64_t.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #undef RAFT_EXPLICIT_INSTANTIATE_ONLY // Enable instantiation of search with filter #include "../ann_ivf_flat.cuh" namespace cuvs::neighbors::ivf_flat { typedef AnnIVFFlatTest<float, float, std::int64_t> AnnIVFFlatFilterTestF; TEST_P(AnnIVFFlatFilterTestF, AnnIVFFlatFilter) { this->testFilter(); } INSTANTIATE_TEST_CASE_P(AnnIVFFlatTest, AnnIVFFlatFilterTestF, ::testing::ValuesIn(inputs)); } // namespace cuvs::neighbors::ivf_flat
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/entropy.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <algorithm> #include <gtest/gtest.h> #include <iostream> #include <raft/core/interruptible.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/entropy.cuh> #include <raft/util/cudart_utils.hpp> #include <random> #include <rmm/device_uvector.hpp> namespace raft { namespace stats { struct entropyParam { int nElements; int lowerLabelRange; int upperLabelRange; double tolerance; }; // test fixture class template <typename T> class entropyTest : public ::testing::TestWithParam<entropyParam> { protected: // the constructor entropyTest() : stream(resource::get_cuda_stream(handle)) {} void SetUp() override { // getting the parameters params = ::testing::TestWithParam<entropyParam>::GetParam(); nElements = params.nElements; lowerLabelRange = params.lowerLabelRange; upperLabelRange = params.upperLabelRange; // generating random value test input std::vector<int> arr1(nElements, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange); std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); }); // generating the golden output int numUniqueClasses = upperLabelRange - lowerLabelRange + 1; int* p = (int*)malloc(numUniqueClasses * sizeof(int)); memset(p, 0, numUniqueClasses * sizeof(int)); // calculating the bincount array for (int i = 0; i < nElements; ++i) { ++p[arr1[i] - lowerLabelRange]; } // calculating the aggregate entropy for (int i = 0; i < numUniqueClasses; ++i) { if (p[i]) truthEntropy += -1 * (double(p[i]) / double(nElements)) * (log(double(p[i])) - log(double(nElements))); } // allocating and initializing memory to the GPU rmm::device_uvector<T> clusterArray(nElements, stream); raft::update_device(clusterArray.data(), &arr1[0], (int)nElements, stream); raft::interruptible::synchronize(stream); // calling the entropy CUDA implementation computedEntropy = raft::stats::entropy(handle, raft::make_device_vector_view<const T>(clusterArray.data(), nElements), lowerLabelRange, upperLabelRange); } raft::resources handle; // declaring the data values entropyParam params; T lowerLabelRange, upperLabelRange; int nElements = 0; double truthEntropy = 0; double computedEntropy = 0; cudaStream_t stream = 0; }; // setting test parameter values const std::vector<entropyParam> inputs = {{199, 1, 10, 0.000001}, {200, 15, 100, 0.000001}, {100, 1, 20, 0.000001}, {10, 1, 10, 0.000001}, {198, 1, 100, 0.000001}, {300, 3, 99, 0.000001}, {199, 1, 10, 0.000001}, {200, 15, 100, 0.000001}, {100, 1, 20, 0.000001}, {10, 1, 10, 0.000001}, {198, 1, 100, 0.000001}, {300, 3, 99, 0.000001}}; // writing the test suite typedef entropyTest<int> entropyTestClass; TEST_P(entropyTestClass, Result) { ASSERT_NEAR(computedEntropy, truthEntropy, params.tolerance); } INSTANTIATE_TEST_CASE_P(entropy, entropyTestClass, ::testing::ValuesIn(inputs)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/trustworthiness.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <cuvs/distance/distance.cuh> #include <gtest/gtest.h> #include <iostream> #include <raft/core/resource/cuda_stream.hpp> #include <raft/util/cudart_utils.hpp> #include <raft/stats/trustworthiness_score.cuh> #include <vector> namespace raft { namespace stats { class TrustworthinessScoreTest : public ::testing::Test { public: TrustworthinessScoreTest() : d_X(0, resource::get_cuda_stream(handle)), d_X_embedded(0, resource::get_cuda_stream(handle)) { } protected: void basicTest() { std::vector<float> X = { 5.6142087, 8.59787, -4.382763, -3.6452143, -5.8816037, -0.6330313, 4.6920023, -0.79210913, 0.6106314, 2.1210914, 5.919943, -8.43784, -6.4819884, 0.41001374, -6.1052523, -4.0825715, -5.314755, -2.834671, 5.751696, -6.5012555, -0.4719201, -7.53353, 7.6789393, -1.4959852, -5.5977287, -9.564147, 1.2902534, 3.559834, -6.7659483, 8.265964, 4.595404, 9.133477, -6.1553917, -6.319754, -2.9039452, 4.4150834, -3.094395, -4.426273, 9.584571, -5.64133, 6.6209483, 7.4044604, 3.9620576, 5.639907, 10.33007, -0.8792053, 5.143776, -7.464049, 1.2448754, -5.6300974, 5.4518576, 4.119535, 6.749645, 7.627064, -7.2298336, 1.9681473, -6.9083176, 6.404673, 0.07186685, 9.0994835, 8.51037, -8.986389, 0.40534487, 2.115397, 4.086756, 1.2284287, -2.6272132, 0.06527536, -9.587425, -7.206078, 7.864875, 7.4397306, -6.9233336, -2.6643622, 3.3466153, 7.0408177, -3.6069896, -9.971769, 4.4075623, 7.9063697, 2.559074, 4.323717, 1.6867131, -1.1576937, -9.893141, -3.251416, -7.4889135, -4.0588717, -2.73338, -7.4852257, 3.4460473, 9.759119, -5.4680476, -4.722435, -8.032619, -1.4598992, 4.227361, 3.135568, 1.1950601, 1.1982028, 6.998856, -6.131138, -6.6921015, 0.5361224, -7.1213965, -5.6104236, -7.2212887, -2.2710054, 8.544764, -6.0254574, 1.4582269, -5.5587835, 8.031556, -0.26328218, -5.2591386, -9.262641, 2.8691363, 5.299787, -9.209455, 8.523085, 5.180329, 10.655528, -5.7171874, -6.7739563, -3.6306462, 4.067106, -1.5912259, -3.2345476, 8.042973, -3.6364832, 4.1242137, 9.886953, 5.4743724, 6.3058076, 9.369645, -0.5175337, 4.9859877, -7.879498, 1.358422, -4.147944, 3.8984218, 5.894656, 6.4903927, 8.702036, -8.023722, 2.802145, -7.748032, 5.8461113, -0.34215945, 11.298865, 1.4107164, -9.949621, -1.6257563, -10.655836, 2.4528909, 1.1570255, 5.170669, 2.8398793, 7.1838694, 9.088459, 2.631155, 3.964414, 2.8769252, 0.04198391, -0.16993195, 3.6747139, -2.8377378, 6.1782537, 10.759618, -4.5642614, -8.522967, 0.8614642, 6.623416, -1.029324, 5.5488334, -7.804511, 2.128833, 7.9042315, 7.789576, -2.7944536, 0.72271067, -10.511495, -0.78634536, -10.661714, 2.9376361, 1.9148129, 6.22859, 0.26264945, 8.028384, 6.8743043, 0.9351067, 7.0690722, 4.2846055, 1.4134506, -0.18144785, 5.2778087, -1.7140163, 9.217541, 8.602799, -2.6537218, -7.8377395, 1.1244944, 5.4540544, -0.38506773, 3.9885726, -10.76455, 1.4440702, 9.136163, 6.664117, -5.7046547, 8.038592, -9.229767, -0.2799413, 3.6064725, 4.187257, 1.0516582, -2.0707326, -0.7615968, -8.561018, -3.7831352, 10.300297, 5.332594, -6.5880876, -4.2508664, 1.7985519, 5.7226253, -4.1223383, -9.6697855, 1.4885283, 7.524974, 1.7206005, 4.890457, 3.7264557, 0.4428284, -9.922455, -4.250455, -6.4410596, -2.107994, -1.4109765, -6.1325397, 0.32883006, 6.0489736, 7.7257385, -8.281174, 1.0129383, -10.792166, 8.378851, 10.802716, 9.848448, -9.188757, 1.3151443, 1.9971865, -2.521849, 4.3268294, -7.775683, -2.2902298, 3.0824065, -7.17559, 9.6100855, 7.3965735, -10.476525, 5.895973, -3.6974669, -7.6688933, 1.7354839, -7.4045196, -1.7992063, -4.0394845, 5.2471714, -2.250571, 2.528036, -8.343515, -2.2374575, -10.019771, 0.73371273, 3.1853926, 2.7994921, 2.6637669, 7.620401, 7.515571, 0.68636256, 5.834537, 4.650282, -1.0362619, 0.4461701, 3.7870514, -4.1340904, 7.202998, 9.736904, -3.005512, -8.920467, 1.1228397, 6.2598724, 1.2812365, 4.5442104, -8.791537, 0.92113096, 8.464749, 8.359035, -4.3923397, 1.2252625, -10.1986475, -1.4409319, -10.013967, 3.9071581, 1.683064, 4.877419, 1.6570637, 9.559105, 7.3546534, 0.36635467, 5.220211, 4.6303267, 0.6601065, 0.16149978, 3.8818731, -3.4438233, 8.42085, 8.659159, -3.0935583, -8.039611, 2.3060374, 5.134666, 1.0458113, 6.0190983, -9.143728, 0.99048865, 9.210842, 6.670241, -5.9614363, 0.8747396, 7.078824, 8.067469, -10.314754, 0.45977542, -9.28306, 9.1838665, 9.318644, 7.189082, -11.092555, 1.0320464, 3.882163, 0.10953151, 7.9029684, -6.9068265, -1.3526366, 5.3996363, -8.430931, 11.452577, 6.39663, -11.090514, 4.6662245, -3.1268113, -8.357452, 2.2276728, -10.357126, -0.9291848, -3.4193344, 3.1289792, -2.5030103, 6.772719, 11.457757, -4.2125936, -6.684548, -4.7611327, 3.6960156, -2.3030636, -3.0591488, 10.452471, -4.1267314, 5.66614, 7.501461, 5.072407, 6.636537, 8.990381, -0.2559256, 4.737867, -6.2149944, 2.535682, -5.5484023, 5.7113924, 3.4742818, 7.9915137, 7.0052586, -7.156467, 1.4354781, -8.286235, 5.7523417, -2.4175215, 9.678009, 0.05066403, -9.645226, -2.2658763, -9.518178, 4.493372, 2.3232365, 2.1659086, 0.42507997, 8.360246, 8.23535, 2.6878164, 5.236947, 3.4924245, -0.6089895, 0.8884741, 4.359464, -4.6073823, 7.83441, 8.958755, -3.4690795, -9.182282, 1.2478025, 5.6311107, -1.2408862, 3.6316886, -8.684654, 2.1078515, 7.2813864, 7.9265943, -3.6135032, 0.4571511, 8.493568, 10.496853, -7.432897, 0.8625995, -9.607528, 7.2899456, 8.83158, 8.908199, -10.300263, 1.1451302, 3.7871468, -0.97040755, 5.7664757, -8.9688, -2.146672, 5.9641485, -6.2908535, 10.126465, 6.1553903, -12.066902, 6.301596, -5.0419583, -8.228695, 2.4879954, -8.918582, -3.7434099, -4.1593685, 3.7431836, -1.1704745, 0.5524103, 9.109399, 9.571567, -11.209955, 1.2462777, -9.554555, 9.091726, 11.477966, 7.630937, -10.450911, 1.9205878, 5.358983, -0.44546837, 6.7611346, -9.74753, -0.5939732, 3.8892255, -6.437991, 10.294727, 5.6723895, -10.7883, 6.192348, -5.293862, -10.811491, 1.0194173, -7.074576, -3.192368, -2.5231771, 4.2791643, -0.53309685, 0.501366, 9.636625, 7.710316, -6.4219728, 1.0975566, -8.218886, 6.9011984, 9.873679, 8.903804, -9.316832, 1.2404599, 4.9039655, 1.2272617, 4.541515, -5.2753224, -3.2196746, 3.1303136, -7.285681, 9.041425, 5.6417427, -9.93667, 5.7548947, -5.113397, -8.544622, 4.182665, -7.7709813, -3.2810235, -3.312072, 3.8900535, -2.0604856, 6.709082, -8.461194, 1.2666026, 4.8770437, 2.6955879, 3.0340345, -1.1614609, -3.536341, -7.090382, -5.36146, 9.072544, 6.4554095, -4.4728956, -1.88395, 3.1095037, 8.782348, -3.316743, -8.65248, 1.6802986, 8.186188, 2.1783829, 4.931278, 4.158475, 1.4033595, -11.320101, -3.7084908, -6.740436, -2.5555193, -1.0451177, -6.5569925, 0.82810307, 8.505919, 8.332857, -9.488569, -0.21588463, -8.056692, 8.493993, 7.6401625, 8.812983, -9.377281, 2.4369764, 3.1766508, 0.6300803, 5.6666765, -7.913654, -0.42301777, 4.506412, -7.8954244, 10.904591, 5.042256, -9.626183, 8.347351, -3.605006, -7.923387, 1.1024277, -8.705793, -2.5151258, -2.5066147, 4.0515003, -2.060757, 6.2635093, 8.286584, -6.0509276, -6.76452, -3.1158175, 1.6578803, -1.4608748, -1.24211, 8.151246, -4.2970877, 6.093071, 7.4911637, 4.51018, 4.8425875, 9.211085, -2.4386222, 4.5830803, -5.6079445, 2.3713675, -4.0707507, 3.1787417, 5.462342, 6.915912, 6.3928423, -7.2970796, 5.0112796, -9.140893, 4.9990606, 0.38391754, 7.7088532, 1.9340848, 8.18833, 8.16617, -9.42086, -0.3388326, -9.659727, 8.243045, 8.099073, 8.439428, -7.038694, 2.1077902, 3.3866816, -1.9975324, 7.4972878, -7.2525196, -1.553731, 4.08758, -6.6922374, 9.50525, 4.026735, -9.243538, 7.2740564, -3.9319072, -6.3228955, 1.6693478, -7.923119, -3.7423058, -2.2813146, 5.3469067, -1.8285407, 3.3118162, 8.826356, -4.4641976, -6.4751124, -9.200089, -2.519147, 4.225298, 2.4105988, -0.4344186, 0.53441775, 5.2836394, -8.2816105, -4.996147, -1.6870759, -7.8543897, -3.9788852, -7.0346904, -3.1289773, 7.4567637, -5.6227813, 1.0709786, -8.866012, 8.427324, -1.1755563, -5.789216, -8.197835, 5.3342214, 6.0646234, -6.8975716, 7.717031, 3.480355, 8.312151, -3.6645212, -3.0976524, -8.090359, -1.9176173, 2.4257212, 1.9700835, 0.4098958, 2.1341088, 7.652741, -9.9595585, -5.989757, 0.10119354, -7.935407, -5.792786, -5.22783, -4.318978, 5.414037, -6.4621663, 1.670883, -6.9224787, 8.696932, -2.0214002, -6.6681314, -8.326418, 4.9049683, 5.4442496, -6.403739, 7.5822453, 7.0972915, -9.072851, -0.23897195, 1.7662339, 5.3096304, 1.983179, -2.222645, -0.34700772, -9.094717, -6.107907, 9.525174, 8.1550665, -5.6940084, -4.1636486, 1.7360662, 8.528821, -3.7299833, -9.341266, 2.608542, 9.108706, 0.7978509, 4.2488184, 2.454484, 0.9446999, -10.106636, -3.8973773, -6.6566644, -4.5647273, -0.99837756, -6.568582, 9.324853, -7.9020953, 2.0910501, 2.2896829, 1.6790711, 1.3159255, -3.5258796, 1.8898442, -8.105812, -4.924962, 8.771129, 7.1202874, -5.991957, -3.4106019, 2.4450088, 7.796387, -3.055946, -7.8971434, 1.9856719, 9.001636, 1.8511922, 3.019749, 3.1227696, 0.4822102, -10.021213, -3.530504, -6.225959, -3.0029628, -1.7881511, -7.3879776, 1.3925704, 9.499782, -3.7318087, -3.7074296, -7.7466836, -1.5284524, 4.0535855, 3.112011, 0.10340207, -0.5429599, 6.67026, -9.155924, -4.924038, 0.64248866, -10.0103655, -3.2742946, -4.850029, -3.6707063, 8.586258, -5.855605, 4.906918, -6.7813993, 7.9938135, -2.5473144, -5.688948, -7.822478, 2.1421318, 4.66659, -9.701272, 9.549149, 0.8998125, -8.651497, -0.56899565, -8.639817, 2.3088377, 2.1264515, 3.2764478, 2.341989, 8.594338, 8.630639, 2.8440373, 6.2043204, 4.433932, 0.6320018, -1.8179281, 5.09452, -1.5741565, 8.153934, 8.744339, -3.6945698, -8.883078, 1.5329908, 5.2745943, 0.44716078, 4.8809066, -7.9594903, 1.134374, 9.233994, 6.5528665, -4.520542, 9.477355, -8.622195, -0.23191702, 2.0485356, 3.9379985, 1.5916302, -1.4516805, -0.0843819, -7.8554378, -5.88308, 7.999766, 6.2572145, -5.585321, -4.0097756, 0.42382592, 6.160884, -3.631315, -8.333449, 2.770595, 7.8495173, 3.3331623, 4.940415, 3.6207345, -0.037517, -11.034698, -3.185103, -6.614664, -3.2177854, -2.0792234, -6.8879867, 7.821685, -8.455084, 1.0784642, 4.0033927, 2.7343264, 2.6052725, -4.1224284, -0.89305353, -6.8267674, -4.9715133, 8.880253, 5.6994023, -5.9695024, -4.9181266, 1.3017995, 7.972617, -3.9452884, -10.424556, 2.4504194, 6.21529, 0.93840516, 4.2070026, 6.159839, 0.91979957, -8.706724, -4.317946, -6.6823545, -3.0388, -2.464262, -7.3716645, 1.3926703, 6.544412, -5.6251183, -5.122411, -8.622049, -2.3905911, 3.9138813, 1.9779967, -0.05011125, 0.13310997, 7.229751, -9.742043, -8.08724, 1.2426697, -7.9230795, -3.3162494, -7.129571, -3.5488048, 7.4701195, -5.2357526, 0.5917681, -6.272206, 6.342328, -2.909731, -4.991607, -8.845513, 3.3228495, 7.033246, -7.8180246, 8.214469, 6.3910093, 9.185153, -6.20472, -7.713809, -3.8481297, 3.5579286, 0.7078448, -3.2893546, 7.384514, -4.448121, 3.0104196, 9.492943, 8.024847, 4.9114385, 9.965594, -3.014036, 5.182494, -5.8806014, 2.5312455, -5.9926524, 4.474469, 6.3717875, 6.993105, 6.493093, -8.935534, 3.004074, -8.055647, 8.315765, -1.3026813, 8.250377, 0.02606229, 6.8508425, 9.655665, -7.0116496, -0.41060972, -10.049198, 7.897801, 6.7791023, 8.3362, -9.821014, 2.491157, 3.5160472, -1.6228812, 7.398063, -8.769123, -3.1743705, 3.2827861, -6.497855, 10.831924, 5.2761307, -9.704417, 4.3817043, -3.9841619, -8.111647, 1.1883026, -8.115312, -2.9240117, -5.8879666, 4.20928, -0.3587938, 6.935672, -10.177582, 0.48819053, 3.1250648, 2.9306343, 3.082544, -3.477687, -1.3768549, -7.4922366, -3.756631, 10.039836, 3.6670392, -5.9761434, -4.4728765, 3.244255, 7.027899, -2.3806512, -10.4100685, 1.605716, 7.7953773, 0.5408159, 1.7156523, 3.824097, -1.0604783, -10.142124, -5.246805, -6.5283823, -4.579547, -2.42714, -6.709197, 2.7782338, 7.33353, -6.454507, -2.9929368, -7.8362985, -2.695445, 2.4900775, 1.6682367, 0.4641757, -1.0495365, 6.9631333, -9.291356, -8.23837, -0.34263706, -8.275113, -2.8454232, -5.0864096, -2.681942, 7.5450225, -6.2517986, 0.06810654, -6.470652, 4.9042645, -1.8369255, -6.6937943, -7.9625087, 2.8510258, 6.180508, -8.282598, 7.919079, 1.4897474, 6.7217417, -4.2459426, -4.114431, -8.375707, -2.143264, 5.6972933, 1.5574739, 0.39375135, 1.7930849, 5.1737595, -7.826241, -5.160268, -0.80433255, -7.839536, -5.2620406, -5.4643164, -3.185536, 6.620315, -7.065227, 1.0524757, -6.125088, 5.7126627, -1.6161644, -3.852159, -9.164279, 2.7005782, 5.946544, -8.468236, 8.2145405, 1.1035942, 6.590157, -4.0461283, -4.8090615, -7.6702685, -2.1121511, 5.1147075, 1.6128504, 2.0064135, 1.0544407, 6.0038295, -7.8282537, -4.801278, 0.32349443, -8.0649805, -4.372714, -5.61336, -5.21394, 8.176595, -5.4753284, 1.7800134, -8.267283, 7.2133374, -0.16594432, -6.317046, -9.490406, 4.1261597, 5.473317, -7.7551675, 7.007468, 7.478628, -8.801905, 0.10975724, 3.5478222, 4.797803, 1.3825226, -3.357369, 0.99262005, -6.94877, -5.4781394, 9.632604, 5.7492557, -5.9014316, -3.1632116, 2.340859, 8.708098, -3.1255999, -8.848661, 4.5612836, 8.455157, 0.73460823, 4.112301, 4.392744, -0.30759293, -6.8036823, -3.0331545, -8.269506, -2.82415, -0.9411246, -5.993506, 2.1618164, -8.716055, -0.7432543, -10.255819, 3.095418, 2.5131428, 4.752442, 0.9907621, 7.8279433, 7.85814, 0.50430876, 5.2840405, 4.457291, 0.03330028, -0.40692952, 3.9244103, -2.117118, 7.6977615, 8.759009, -4.2157164, -9.136053, 3.247858, 4.668686, 0.76162136, 5.3833632, -9.231471, 0.44309422, 8.380872, 6.7211227, -3.091507, 2.173508, -9.038242, -1.3666698, -9.819077, 0.37825826, 2.3898845, 4.2440815, 1.9161536, 7.24787, 6.9124637, 1.6238527, 5.1140285, 3.1935842, 1.02845, -1.1273454, 5.638998, -2.497932, 8.342559, 8.586319, -2.9069402, -7.6387944, 3.5975037, 4.4115705, 0.41506064, 4.9078383, -9.68327, 1.8159529, 9.744613, 8.40622, -4.495336, 9.244892, -8.789869, 1.3158468, 4.018167, 3.3922846, 2.652022, -2.7495477, 0.2528986, -8.268324, -6.004913, 10.428784, 6.6580734, -5.537176, -1.7177434, 2.7504628, 6.7735, -2.4454272, -9.998361, 2.9483433, 6.8266654, 2.3787718, 4.472637, 2.5871701, 0.7355365, -7.7027745, -4.1879907, -7.172832, -4.1843605, -0.03646783, -5.419406, 6.958486, 11.011111, -7.1821184, -7.956423, -3.408451, 4.6850276, -2.348787, -4.398289, 6.9787564, -3.8324208, 5.967827, 8.433518, 4.660108, 5.5657144, 9.964243, -1.3515275, 6.404833, -6.4805903, 2.4379845, -6.0816774, 1.752272, 5.3771873, 6.9613523, 6.9788294, -6.3894596, 3.7521114, -6.8034263, 6.4458385, -0.7233525, 10.512529, 4.362273, 9.231461, -6.3382263, -7.659, -3.461823, 4.71463, 0.17817476, -3.685746, 7.2962036, -4.6489477, 5.218017, 11.546999, 4.7218375, 6.8498397, 9.281103, -3.900459, 6.844054, -7.0886965, -0.05019227, -8.233724, 5.5808983, 6.374517, 8.321048, 7.969449, -7.3478637, 1.4917561, -8.003144, 4.780668, -1.1981848, 7.753739, 2.0260844, -8.880096, -3.4258451, -7.141975, 1.9637157, 1.814725, 5.311151, 1.4831505, 7.8483663, 7.257948, 1.395786, 6.417756, 5.376912, 0.59505713, 0.00062552, 3.6634305, -4.159713, 7.3571978, 10.966816, -2.5419605, -8.466229, 1.904205, 5.6338267, -0.52567476, 5.59736, -8.361799, 0.5009981, 8.460681, 7.3891273, -3.5272243, 5.0552278, 9.921456, -7.69693, -7.286378, -1.9198836, 3.1666567, -2.5832257, -2.2445817, 9.888111, -5.076563, 5.677401, 7.497946, 5.662994, 5.414262, 8.566503, -2.5530663, 7.1032815, -6.0612082, 1.3419591, -4.9595256, 4.3377542, 4.3790717, 6.793512, 8.383502, -7.1278043, 3.3240774, -9.379446, 6.838661, -0.81241214, 8.694813, 0.79141915, 7.632467, 8.575382, -8.533798, 0.28954387, -7.5675836, 5.8653326, 8.97235, 7.1649346, -10.575289, 0.9359381, 5.02381, -0.5609511, 5.543464, -7.69131, -2.1792977, 2.4729247, -6.1917787, 10.373678, 7.6549597, -8.809486, 5.5657206, -3.3169382, -8.042887, 2.0874746, -7.079005, -3.33398, -3.6843317, 4.0172358, -2.0754814, 1.1726758, 7.4618697, 6.9483604, -8.469206, 0.7401797, -10.318176, 8.384557, 10.5476265, 9.146971, -9.250223, 0.6290606, 4.4941425, -0.7514017, 7.2271705, -8.309598, -1.4761636, 4.0140634, -6.021102, 9.132852, 5.6610966, -11.249811, 8.359293, -1.9445792, -7.7393436, -0.3931331, -8.824441, -2.5995944, -2.5714035, 4.140213, -3.6863053, 5.517265, 9.020411, -4.9286127, -7.871219, -3.7446704, 2.5179656, -1.4543481, -2.2703636, 7.010597, -3.6436229, 6.753862, 7.4129915, 7.1406755, 5.653706, 9.5445175, 0.15698843, 4.761813, -7.698002, 1.6870106, -4.5410123, 4.171763, 5.3747005, 6.341021, 7.456738, -8.231657, 2.763487, -9.208167, 6.676799, -1.1957736, 10.062605, 4.0975976, 7.312957, -2.4981596, -2.9658387, -8.150425, -2.1075552, 2.64375, 1.6636052, 1.1483809, 0.09276015, 5.8556347, -7.8481026, -5.9913163, -0.02840613, -9.937289, -1.0486673, -5.2340155, -3.83912, 7.7165728, -8.409944, 0.80863273, -6.9119215, 7.5712357, 0.36031485, -6.056131, -8.470033, 1.8678337, 3.0121377, -7.3096333, 8.205484, 5.262654, 8.774514, -4.7603083, -7.2096143, -4.437014, 3.6080024, -1.624254, -4.2787876, 8.880863, -4.8984556, 5.1782074, 9.944454, 3.911282, 3.5396595, 8.867042, -1.2006199, 5.393288, -5.6455317, 0.7829499, -4.0338907, 2.479272, 6.5080743, 8.582535, 7.0097537, -6.9823785, 3.984318, -7.225381, 5.3135114, -1.0391048, 8.951443, -0.70119005, -8.510742, -0.42949116, -10.9224825, 2.8176029, 1.6800792, 5.778404, 1.7269998, 7.1975236, 7.7258267, 2.7632928, 5.3399253, 3.4650044, 0.01971426, -1.6468811, 4.114996, -1.5110453, 6.8689218, 8.269899, -3.1568048, -7.0344677, 1.2911975, 5.950357, 0.19028673, 4.657226, -8.199647, 2.246055, 8.989509, 5.3101015, -4.2400866}; std::vector<float> X_embedded = { -0.41849962, -0.53906363, 0.46958843, -0.35832694, -0.23779503, -0.29751351, -0.01072748, -0.21353109, -0.54769957, -0.55086273, 0.37093949, -0.12714292, -0.06639574, -0.36098689, -0.13060696, -0.07362658, -1.01205945, -0.39285606, 0.2864089, -0.32031146, -0.19595343, 0.08900568, -0.04813879, -0.06563424, -0.42655188, -0.69014251, 0.51459783, -0.1942696, -0.07767916, -0.6119386, 0.04813685, -0.22557008, -0.56890118, -0.60293794, 0.43429622, -0.09240723, -0.00624062, -0.25800395, -0.1886092, 0.01655941, -0.01961523, -0.14147359, 0.41414487, -0.8512944, -0.61199242, -0.18586016, 0.14024924, -0.41635606, -0.02890144, 0.1065347, 0.39700791, -1.14060664, -0.95313865, 0.14416681, 0.17306046, -0.53189689, -0.98987544, -0.67918193, 0.41787854, -0.20878236, -0.06612862, 0.03502904, -0.03765266, -0.0980606, -0.00971657, 0.29432917, 0.36575687, -1.1645509, -0.89094597, 0.03718805, 0.2310573, -0.38345811, -0.10401925, -0.10653082, 0.38469055, -0.88302094, -0.80197543, 0.03548668, 0.02775662, -0.54374295, 0.03379983, 0.00923623, 0.29320273, -1.05263519, -0.93360096, 0.03778313, 0.12360487, -0.56437284, 0.0644429, 0.33432651, 0.36450726, -1.22978747, -0.83822101, -0.18796451, 0.34888434, -0.3801491, -0.45327303, -0.59747899, 0.39697698, -0.15616602, -0.06159166, -0.40301991, -0.11725303, -0.11913263, -0.12406619, -0.11227967, 0.43083835, -0.90535849, -0.81646025, 0.10012121, -0.0141237, -0.63747931, 0.04805023, 0.34190539, 0.50725192, -1.17861414, -0.74641538, -0.09333111, 0.27992678, -0.56214809, 0.04970971, 0.36249384, 0.57705611, -1.16913795, -0.69849908, 0.10957897, 0.27983218, -0.62088525, 0.0410459, 0.23973398, 0.40960434, -1.14183664, -0.83321381, 0.02149482, 0.21720445, -0.49869928, -0.95655465, -0.51680422, 0.45761383, -0.08351214, -0.12151554, 0.00819737, -0.20813803, -0.01055793, 0.25319234, 0.36154974, 0.1822421, -1.15837133, -0.92209691, -0.0501582, 0.08535917, -0.54003763, -1.08675635, -1.04009593, 0.09408128, 0.07009826, -0.01762833, -0.19180447, -0.18029785, -0.20342001, 0.04034991, 0.1814747, 0.36906669, -1.13532007, -0.8852452, 0.0782818, 0.16825101, -0.50301319, -0.29128098, -0.65341312, 0.51484352, -0.38758236, -0.22531103, -0.55021971, 0.10804344, -0.3521522, -0.38849035, -0.74110794, 0.53761131, -0.25142813, -0.1118066, -0.47453368, 0.06347904, -0.23796193, -1.02682328, -0.47594091, 0.39515916, -0.2782529, -0.16566519, 0.08063579, 0.00810116, -0.06213913, -1.059654, -0.62496334, 0.53698546, -0.11806234, 0.00356161, 0.11513405, -0.14213292, 0.04102662, -0.36622161, -0.73686272, 0.48323864, -0.27338892, -0.14203401, -0.41736352, 0.03332564, -0.21907479, -0.06396769, 0.01831361, 0.46263444, -1.01878166, -0.86486858, 0.17622118, -0.01249686, -0.74530888, -0.9354887, -0.5027945, 0.38170099, -0.15547098, 0.00677824, -0.04677663, -0.13541745, 0.07253501, -0.97933143, -0.58001202, 0.48235369, -0.18836913, -0.02430783, 0.07572441, -0.08101331, 0.00630076, -0.16881248, -0.67989182, 0.46083611, -0.43910736, -0.29321918, -0.38735861, 0.07669903, -0.29749861, -0.40047669, -0.56722462, 0.33168188, -0.13118173, -0.06672747, -0.56856316, -0.26269144, -0.14236671, 0.10651901, 0.4962585, 0.38848072, -1.06653547, -0.64079332, -0.47378591, 0.43195483, -0.04856951, -0.9840439, -0.70610428, 0.34028092, -0.2089237, -0.05382041, 0.01625874, -0.02080803, -0.12535211, -0.04146428, -1.24533033, 0.48944879, 0.0578458, 0.26708388, -0.90321028, 0.35377088, -0.36791429, -0.35382384, -0.52748734, 0.42854419, -0.31744713, -0.19174226, -0.39073724, -0.03258846, -0.19978228, -0.36185205, -0.57412046, 0.43681973, -0.25414538, -0.12904905, -0.46334973, -0.03123853, -0.11303604, -0.87073672, -0.45441297, 0.41825858, -0.25303507, -0.21845073, 0.10248682, -0.11045569, -0.10002795, -0.00572806, 0.16519061, 0.42651513, -1.11417019, -0.83789682, 0.02995787, 0.16843079, -0.53874511, 0.03056994, 0.17877036, 0.49632853, -1.03276777, -0.74778616, -0.03971953, 0.10907949, -0.67385727, -0.9523471, -0.56550741, 0.40409449, -0.2703723, -0.10175014, 0.13605487, -0.06306008, -0.01768126, -0.4749442, -0.56964815, 0.39389887, -0.19248079, -0.04161081, -0.38728487, -0.20341556, -0.12656988, -0.35949609, -0.46137866, 0.28798422, -0.06603147, -0.04363992, -0.60343552, -0.23565227, -0.10242701, -0.06792886, 0.09689897, 0.33259571, -0.98854214, -0.84444433, 0.00673901, 0.13457057, -0.43145794, -0.51500046, -0.50821936, 0.38000089, 0.0132636, 0.0580942, -0.40157595, -0.11967677, 0.02549113, -0.10350953, 0.22918226, 0.40411913, -1.05619383, -0.71218503, -0.02197581, 0.26422262, -0.34765676, 0.06601537, 0.21712676, 0.34723559, -1.20982027, -0.95646334, 0.00793948, 0.27620381, -0.43475035, -0.67326003, -0.6137197, 0.43724492, -0.17666136, -0.06591748, -0.18937394, -0.07400128, -0.06881691, -0.5201112, -0.61088628, 0.4225319, -0.18969463, -0.06921366, -0.33993208, -0.06990873, -0.10288513, -0.70659858, -0.56003648, 0.46628812, -0.16090363, -0.0185108, -0.1431348, -0.1128775, -0.0078648, -0.02323332, 0.04292452, 0.39291084, -0.94897962, -0.63863206, -0.16546988, 0.23698957, -0.30633628}; auto stream = resource::get_cuda_stream(handle); d_X.resize(X.size(), stream); d_X_embedded.resize(X_embedded.size(), stream); raft::update_device(d_X.data(), X.data(), X.size(), stream); raft::update_device(d_X_embedded.data(), X_embedded.data(), X_embedded.size(), stream); auto n_sample = 50; auto n_features_origin = 30; auto n_features_embedded = 8; // euclidean test score = trustworthiness_score<cuvs::distance::DistanceType::L2SqrtUnexpanded, float>( handle, raft::make_device_matrix_view<const float>(d_X.data(), n_sample, n_features_origin), raft::make_device_matrix_view<const float>( d_X_embedded.data(), n_sample, n_features_embedded), 5); } void SetUp() override { basicTest(); } void TearDown() override {} protected: raft::resources handle; rmm::device_uvector<float> d_X; rmm::device_uvector<float> d_X_embedded; double score; }; typedef TrustworthinessScoreTest TrustworthinessScoreTestF; TEST_F(TrustworthinessScoreTestF, Result) { ASSERT_TRUE(0.9375 < score && score < 0.9379); } }; // namespace stats }; // namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/sum.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/linalg/eltwise.cuh> #include <raft/stats/sum.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <gtest/gtest.h> namespace raft { namespace stats { template <typename T> struct SumInputs { T tolerance; int rows, cols; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const SumInputs<T>& dims) { return os; } template <typename T> class SumTest : public ::testing::TestWithParam<SumInputs<T>> { public: SumTest() : params(::testing::TestWithParam<SumInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), rows(params.rows), cols(params.cols), data(rows * cols, stream), sum_act(cols, stream) { } protected: void SetUp() override { int len = rows * cols; T data_h[len]; for (int i = 0; i < len; i++) { data_h[i] = T(1); } raft::update_device(data.data(), data_h, len, stream); sum(handle, raft::make_device_matrix_view<const T>(data.data(), rows, cols), raft::make_device_vector_view(sum_act.data(), cols)); resource::sync_stream(handle, stream); } protected: raft::resources handle; cudaStream_t stream; SumInputs<T> params; int rows, cols; rmm::device_uvector<T> data, sum_act; }; const std::vector<SumInputs<float>> inputsf = {{0.05f, 1024, 32, 1234ULL}, {0.05f, 1024, 256, 1234ULL}}; const std::vector<SumInputs<double>> inputsd = {{0.05, 1024, 32, 1234ULL}, {0.05, 1024, 256, 1234ULL}}; typedef SumTest<float> SumTestF; TEST_P(SumTestF, Result) { ASSERT_TRUE(raft::devArrMatch( float(params.rows), sum_act.data(), params.cols, raft::CompareApprox<float>(params.tolerance))); } typedef SumTest<double> SumTestD; TEST_P(SumTestD, Result) { ASSERT_TRUE(raft::devArrMatch(double(params.rows), sum_act.data(), params.cols, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(SumTests, SumTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(SumTests, SumTestD, ::testing::ValuesIn(inputsd)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/minmax.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <limits> #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/random/rng.cuh> #include <raft/stats/minmax.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <stdio.h> #include <stdlib.h> namespace raft { namespace stats { ///@todo: need to add tests for verifying the column subsampling feature template <typename T> struct MinMaxInputs { T tolerance; int rows, cols; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MinMaxInputs<T>& dims) { return os; } template <typename T> RAFT_KERNEL naiveMinMaxInitKernel(int ncols, T* globalmin, T* globalmax, T init_val) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= ncols) return; globalmin[tid] = init_val; globalmax[tid] = -init_val; } template <typename T> RAFT_KERNEL naiveMinMaxKernel(const T* data, int nrows, int ncols, T* globalmin, T* globalmax) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int col = tid / nrows; if (col < ncols) { T val = data[tid]; if (!isnan(val)) { raft::myAtomicMin(&globalmin[col], val); raft::myAtomicMax(&globalmax[col], val); } } } template <typename T> void naiveMinMax( const T* data, int nrows, int ncols, T* globalmin, T* globalmax, cudaStream_t stream) { const int TPB = 128; int nblks = raft::ceildiv(ncols, TPB); T init_val = std::numeric_limits<T>::max(); naiveMinMaxInitKernel<<<nblks, TPB, 0, stream>>>(ncols, globalmin, globalmax, init_val); RAFT_CUDA_TRY(cudaGetLastError()); nblks = raft::ceildiv(nrows * ncols, TPB); naiveMinMaxKernel<<<nblks, TPB, 0, stream>>>(data, nrows, ncols, globalmin, globalmax); RAFT_CUDA_TRY(cudaGetLastError()); } template <typename T> RAFT_KERNEL nanKernel(T* data, const bool* mask, int len, T nan) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; if (!mask[tid]) data[tid] = nan; } template <typename T> class MinMaxTest : public ::testing::TestWithParam<MinMaxInputs<T>> { protected: MinMaxTest() : minmax_act(0, resource::get_cuda_stream(handle)), minmax_ref(0, resource::get_cuda_stream(handle)) { } void SetUp() override { auto stream = resource::get_cuda_stream(handle); params = ::testing::TestWithParam<MinMaxInputs<T>>::GetParam(); raft::random::RngState r(params.seed); int len = params.rows * params.cols; rmm::device_uvector<T> data(len, stream); rmm::device_uvector<bool> mask(len, stream); minmax_act.resize(2 * params.cols, stream); minmax_ref.resize(2 * params.cols, stream); normal(handle, r, data.data(), len, (T)0.0, (T)1.0); T nan_prob = 0.01; bernoulli(handle, r, mask.data(), len, nan_prob); const int TPB = 256; nanKernel<<<raft::ceildiv(len, TPB), TPB, 0, stream>>>( data.data(), mask.data(), len, std::numeric_limits<T>::quiet_NaN()); RAFT_CUDA_TRY(cudaPeekAtLastError()); naiveMinMax(data.data(), params.rows, params.cols, minmax_ref.data(), minmax_ref.data() + params.cols, stream); raft::stats::minmax<T, int>( handle, raft::make_device_matrix_view<const T, int, raft::layout_f_contiguous>( data.data(), params.rows, params.cols), std::nullopt, std::nullopt, raft::make_device_vector_view<T, int>(minmax_act.data(), params.cols), raft::make_device_vector_view<T, int>(minmax_act.data() + params.cols, params.cols), std::nullopt); } protected: raft::resources handle; MinMaxInputs<T> params; rmm::device_uvector<T> minmax_act; rmm::device_uvector<T> minmax_ref; }; const std::vector<MinMaxInputs<float>> inputsf = {{0.00001f, 1024, 32, 1234ULL}, {0.00001f, 1024, 64, 1234ULL}, {0.00001f, 1024, 128, 1234ULL}, {0.00001f, 1024, 256, 1234ULL}, {0.00001f, 1024, 512, 1234ULL}, {0.00001f, 1024, 1024, 1234ULL}, {0.00001f, 4096, 32, 1234ULL}, {0.00001f, 4096, 64, 1234ULL}, {0.00001f, 4096, 128, 1234ULL}, {0.00001f, 4096, 256, 1234ULL}, {0.00001f, 4096, 512, 1234ULL}, {0.00001f, 4096, 1024, 1234ULL}, {0.00001f, 8192, 32, 1234ULL}, {0.00001f, 8192, 64, 1234ULL}, {0.00001f, 8192, 128, 1234ULL}, {0.00001f, 8192, 256, 1234ULL}, {0.00001f, 8192, 512, 1234ULL}, {0.00001f, 8192, 1024, 1234ULL}, {0.00001f, 1024, 8192, 1234ULL}}; const std::vector<MinMaxInputs<double>> inputsd = {{0.0000001, 1024, 32, 1234ULL}, {0.0000001, 1024, 64, 1234ULL}, {0.0000001, 1024, 128, 1234ULL}, {0.0000001, 1024, 256, 1234ULL}, {0.0000001, 1024, 512, 1234ULL}, {0.0000001, 1024, 1024, 1234ULL}, {0.0000001, 4096, 32, 1234ULL}, {0.0000001, 4096, 64, 1234ULL}, {0.0000001, 4096, 128, 1234ULL}, {0.0000001, 4096, 256, 1234ULL}, {0.0000001, 4096, 512, 1234ULL}, {0.0000001, 4096, 1024, 1234ULL}, {0.0000001, 8192, 32, 1234ULL}, {0.0000001, 8192, 64, 1234ULL}, {0.0000001, 8192, 128, 1234ULL}, {0.0000001, 8192, 256, 1234ULL}, {0.0000001, 8192, 512, 1234ULL}, {0.0000001, 8192, 1024, 1234ULL}, {0.0000001, 1024, 8192, 1234ULL}}; typedef MinMaxTest<float> MinMaxTestF; TEST_P(MinMaxTestF, Result) { ASSERT_TRUE(raft::devArrMatch(minmax_ref.data(), minmax_act.data(), 2 * params.cols, raft::CompareApprox<float>(params.tolerance))); } typedef MinMaxTest<double> MinMaxTestD; TEST_P(MinMaxTestD, Result) { ASSERT_TRUE(raft::devArrMatch(minmax_ref.data(), minmax_act.data(), 2 * params.cols, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(MinMaxTests, MinMaxTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MinMaxTests, MinMaxTestD, ::testing::ValuesIn(inputsd)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/adjusted_rand_index.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <algorithm> #include <gtest/gtest.h> #include <iostream> #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/adjusted_rand_index.cuh> #include <raft/util/cudart_utils.hpp> #include <random> namespace raft { namespace stats { struct adjustedRandIndexParam { int nElements; int lowerLabelRange; int upperLabelRange; bool sameArrays; double tolerance; // if this is true, then it is assumed that `sameArrays` is also true // further it also assumes `lowerLabelRange` and `upperLabelRange` are 0 bool testZeroArray; }; template <typename T, typename MathT = int> class adjustedRandIndexTest : public ::testing::TestWithParam<adjustedRandIndexParam> { protected: adjustedRandIndexTest() : stream(resource::get_cuda_stream(handle)), firstClusterArray(0, stream), secondClusterArray(0, stream) { } void SetUp() override { params = ::testing::TestWithParam<adjustedRandIndexParam>::GetParam(); nElements = params.nElements; firstClusterArray.resize(nElements, stream); secondClusterArray.resize(nElements, stream); RAFT_CUDA_TRY( cudaMemsetAsync(firstClusterArray.data(), 0, firstClusterArray.size() * sizeof(T), stream)); RAFT_CUDA_TRY( cudaMemsetAsync(secondClusterArray.data(), 0, secondClusterArray.size() * sizeof(T), stream)); if (!params.testZeroArray) { SetUpDifferentArrays(); } else { SetupZeroArray(); } // allocating and initializing memory to the GPU computed_adjusted_rand_index = adjusted_rand_index<T, MathT>( handle, raft::make_device_vector_view<const T>(firstClusterArray.data(), nElements), raft::make_device_vector_view<const T>(secondClusterArray.data(), nElements)); } void SetUpDifferentArrays() { lowerLabelRange = params.lowerLabelRange; upperLabelRange = params.upperLabelRange; std::vector<int> arr1(nElements, 0); std::vector<int> arr2(nElements, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange); std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); }); if (params.sameArrays) { arr2 = arr1; } else { std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); }); } // calculating golden output int numUniqueClasses = upperLabelRange - lowerLabelRange + 1; size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int); int* hGoldenOutput = (int*)malloc(sizeOfMat); memset(hGoldenOutput, 0, sizeOfMat); for (int i = 0; i < nElements; i++) { int row = arr1[i] - lowerLabelRange; int column = arr2[i] - lowerLabelRange; hGoldenOutput[row * numUniqueClasses + column] += 1; } int sumOfNijCTwo = 0; int* a = (int*)malloc(numUniqueClasses * sizeof(int)); int* b = (int*)malloc(numUniqueClasses * sizeof(int)); memset(a, 0, numUniqueClasses * sizeof(int)); memset(b, 0, numUniqueClasses * sizeof(int)); int sumOfAiCTwo = 0; int sumOfBiCTwo = 0; // calculating the sum of number of pairwise points in each index // and also the reducing contingency matrix along row and column for (int i = 0; i < numUniqueClasses; ++i) { for (int j = 0; j < numUniqueClasses; ++j) { int Nij = hGoldenOutput[i * numUniqueClasses + j]; sumOfNijCTwo += ((Nij) * (Nij - 1)) / 2; a[i] += hGoldenOutput[i * numUniqueClasses + j]; b[i] += hGoldenOutput[j * numUniqueClasses + i]; } } // claculating the sum of number pairwise points in ever column sum // claculating the sum of number pairwise points in ever row sum for (int i = 0; i < numUniqueClasses; ++i) { sumOfAiCTwo += ((a[i]) * (a[i] - 1)) / 2; sumOfBiCTwo += ((b[i]) * (b[i] - 1)) / 2; } // calculating the ARI double nCTwo = double(nElements) * double(nElements - 1) / 2.0; double expectedIndex = (double(sumOfBiCTwo) * double(sumOfAiCTwo)) / double(nCTwo); double maxIndex = (double(sumOfAiCTwo) + double(sumOfBiCTwo)) / 2.0; double index = (double)sumOfNijCTwo; if (maxIndex - expectedIndex) truth_adjusted_rand_index = (index - expectedIndex) / (maxIndex - expectedIndex); else truth_adjusted_rand_index = 0; raft::update_device(firstClusterArray.data(), &arr1[0], nElements, stream); raft::update_device(secondClusterArray.data(), &arr2[0], nElements, stream); } void SetupZeroArray() { lowerLabelRange = 0; upperLabelRange = 0; truth_adjusted_rand_index = 1.0; } raft::resources handle; cudaStream_t stream = 0; adjustedRandIndexParam params; T lowerLabelRange, upperLabelRange; rmm::device_uvector<T> firstClusterArray; rmm::device_uvector<T> secondClusterArray; int nElements = 0; double truth_adjusted_rand_index = 0; double computed_adjusted_rand_index = 0; }; const std::vector<adjustedRandIndexParam> inputs = { {199, 1, 10, false, 0.000001, false}, {200, 15, 100, false, 0.000001, false}, {100, 1, 20, false, 0.000001, false}, {10, 1, 10, false, 0.000001, false}, {198, 1, 100, false, 0.000001, false}, {300, 3, 99, false, 0.000001, false}, {199, 1, 10, true, 0.000001, false}, {200, 15, 100, true, 0.000001, false}, {100, 1, 20, true, 0.000001, false}, // FIXME: disabled temporarily due to flaky test // {10, 1, 10, true, 0.000001, false}, {198, 1, 100, true, 0.000001, false}, {300, 3, 99, true, 0.000001, false}, {199, 0, 0, false, 0.000001, true}, {200, 0, 0, false, 0.000001, true}, {100, 0, 0, false, 0.000001, true}, {10, 0, 0, false, 0.000001, true}, {198, 0, 0, false, 0.000001, true}, {300, 0, 0, false, 0.000001, true}, {199, 0, 0, true, 0.000001, true}, {200, 0, 0, true, 0.000001, true}, {100, 0, 0, true, 0.000001, true}, {10, 0, 0, true, 0.000001, true}, {198, 0, 0, true, 0.000001, true}, {300, 0, 0, true, 0.000001, true}, }; const std::vector<adjustedRandIndexParam> large_inputs = { {2000000, 1, 1000, false, 0.000001, false}, {2000000, 1, 1000, true, 0.000001, false}, {2000000, 0, 0, false, 0.000001, true}, {2000000, 0, 0, true, 0.000001, true}, }; typedef adjustedRandIndexTest<int, int> ARI_ii; TEST_P(ARI_ii, Result) { ASSERT_NEAR(computed_adjusted_rand_index, truth_adjusted_rand_index, params.tolerance); } INSTANTIATE_TEST_CASE_P(adjusted_rand_index, ARI_ii, ::testing::ValuesIn(inputs)); typedef adjustedRandIndexTest<int, unsigned long long> ARI_il; TEST_P(ARI_il, Result) { ASSERT_NEAR(computed_adjusted_rand_index, truth_adjusted_rand_index, params.tolerance); } INSTANTIATE_TEST_CASE_P(adjusted_rand_index, ARI_il, ::testing::ValuesIn(inputs)); INSTANTIATE_TEST_CASE_P(adjusted_rand_index_large, ARI_il, ::testing::ValuesIn(large_inputs)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/weighted_mean.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <cstdint> #include <gtest/gtest.h> #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/random/rng.cuh> #include <raft/stats/weighted_mean.cuh> #include <raft/util/cuda_utils.cuh> #include <thrust/device_vector.h> #include <thrust/host_vector.h> namespace raft { namespace stats { template <typename T> struct WeightedMeanInputs { T tolerance; int M, N; unsigned long long int seed; bool along_rows; // Used only for the weightedMean test function bool row_major; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const WeightedMeanInputs<T>& I) { return os << "{ " << I.tolerance << ", " << I.M << ", " << I.N << ", " << I.seed << ", " << I.along_rows << "}" << std::endl; } ///// weighted row-wise mean test and support functions template <typename T> void naiveRowWeightedMean(T* R, T* D, T* W, int M, int N, bool rowMajor) { int istr = rowMajor ? 1 : M; int jstr = rowMajor ? N : 1; // sum the weights T WS = 0; for (int i = 0; i < N; i++) WS += W[i]; for (int j = 0; j < M; j++) { R[j] = (T)0; for (int i = 0; i < N; i++) { // R[j] += (W[i]*D[i*istr + j*jstr] - R[j])/(T)(i+1); R[j] += (W[i] * D[i * istr + j * jstr]) / WS; } } } template <typename T> class RowWeightedMeanTest : public ::testing::TestWithParam<WeightedMeanInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam(); raft::random::RngState r(params.seed); int rows = params.M, cols = params.N, len = rows * cols; auto stream = resource::get_cuda_stream(handle); // device-side data din.resize(len); dweights.resize(cols); dexp.resize(rows); dact.resize(rows); // create random matrix and weights uniform(handle, r, din.data().get(), len, T(-1.0), T(1.0)); uniform(handle, r, dweights.data().get(), cols, T(-1.0), T(1.0)); // host-side data thrust::host_vector<T> hin = din; thrust::host_vector<T> hweights = dweights; thrust::host_vector<T> hexp(rows); // compute naive result & copy to GPU naiveRowWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols, params.row_major); dexp = hexp; auto output = raft::make_device_vector_view<T, std::uint32_t>(dact.data().get(), rows); auto weights = raft::make_device_vector_view<const T, std::uint32_t>(dweights.data().get(), cols); if (params.row_major) { auto input = raft::make_device_matrix_view<const T, std::uint32_t, raft::row_major>( din.data().get(), rows, cols); // compute result row_weighted_mean(handle, input, weights, output); } else { auto input = raft::make_device_matrix_view<const T, std::uint32_t, raft::col_major>( din.data().get(), rows, cols); // compute result row_weighted_mean(handle, input, weights, output); } // adjust tolerance to account for round-off accumulation params.tolerance *= params.N; } protected: raft::resources handle; WeightedMeanInputs<T> params; thrust::host_vector<T> hin, hweights; thrust::device_vector<T> din, dweights, dexp, dact; }; ///// weighted column-wise mean test and support functions template <typename T> void naiveColWeightedMean(T* R, T* D, T* W, int M, int N, bool rowMajor) { int istr = rowMajor ? 1 : M; int jstr = rowMajor ? N : 1; // sum the weights T WS = 0; for (int j = 0; j < M; j++) WS += W[j]; for (int i = 0; i < N; i++) { R[i] = (T)0; for (int j = 0; j < M; j++) { // R[i] += (W[j]*D[i*istr + j*jstr] - R[i])/(T)(j+1); R[i] += (W[j] * D[i * istr + j * jstr]) / WS; } } } template <typename T> class ColWeightedMeanTest : public ::testing::TestWithParam<WeightedMeanInputs<T>> { void SetUp() override { params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam(); raft::random::RngState r(params.seed); int rows = params.M, cols = params.N, len = rows * cols; auto stream = resource::get_cuda_stream(handle); // device-side data din.resize(len); dweights.resize(rows); dexp.resize(cols); dact.resize(cols); // create random matrix and weights uniform(handle, r, din.data().get(), len, T(-1.0), T(1.0)); uniform(handle, r, dweights.data().get(), rows, T(-1.0), T(1.0)); // host-side data thrust::host_vector<T> hin = din; thrust::host_vector<T> hweights = dweights; thrust::host_vector<T> hexp(cols); // compute naive result & copy to GPU naiveColWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols, params.row_major); dexp = hexp; auto output = raft::make_device_vector_view<T, std::uint32_t>(dact.data().get(), cols); auto weights = raft::make_device_vector_view<const T, std::uint32_t>(dweights.data().get(), rows); if (params.row_major) { auto input = raft::make_device_matrix_view<const T, std::uint32_t, raft::row_major>( din.data().get(), rows, cols); // compute result col_weighted_mean(handle, input, weights, output); } else { auto input = raft::make_device_matrix_view<const T, std::uint32_t, raft::col_major>( din.data().get(), rows, cols); // compute result col_weighted_mean(handle, input, weights, output); } // adjust tolerance to account for round-off accumulation params.tolerance *= params.M; } protected: raft::resources handle; WeightedMeanInputs<T> params; thrust::host_vector<T> hin, hweights; thrust::device_vector<T> din, dweights, dexp, dact; }; template <typename T> class WeightedMeanTest : public ::testing::TestWithParam<WeightedMeanInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam(); raft::random::RngState r(params.seed); auto stream = resource::get_cuda_stream(handle); int rows = params.M, cols = params.N, len = rows * cols; auto weight_size = params.along_rows ? cols : rows; auto mean_size = params.along_rows ? rows : cols; // device-side data din.resize(len); dweights.resize(weight_size); dexp.resize(mean_size); dact.resize(mean_size); // create random matrix and weights uniform(handle, r, din.data().get(), len, T(-1.0), T(1.0)); uniform(handle, r, dweights.data().get(), weight_size, T(-1.0), T(1.0)); // host-side data thrust::host_vector<T> hin = din; thrust::host_vector<T> hweights = dweights; thrust::host_vector<T> hexp(mean_size); // compute naive result & copy to GPU if (params.along_rows) naiveRowWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols, params.row_major); else naiveColWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols, params.row_major); dexp = hexp; auto output = raft::make_device_vector_view<T, std::uint32_t>(dact.data().get(), mean_size); auto weights = raft::make_device_vector_view<const T, std::uint32_t>(dweights.data().get(), weight_size); if (params.row_major) { auto input = raft::make_device_matrix_view<const T, std::uint32_t, raft::row_major>( din.data().get(), rows, cols); // compute result weighted_mean(handle, input, weights, output, params.along_rows); } else { auto input = raft::make_device_matrix_view<const T, std::uint32_t, raft::col_major>( din.data().get(), rows, cols); // compute result weighted_mean(handle, input, weights, output, params.along_rows); } // adjust tolerance to account for round-off accumulation params.tolerance *= params.N; } protected: raft::resources handle; WeightedMeanInputs<T> params; thrust::host_vector<T> hin, hweights; thrust::device_vector<T> din, dweights, dexp, dact; }; ////// Parameter sets and test instantiation static const float tolF = 128 * std::numeric_limits<float>::epsilon(); static const double tolD = 256 * std::numeric_limits<double>::epsilon(); const std::vector<WeightedMeanInputs<float>> inputsf = {{tolF, 4, 4, 1234, true, true}, {tolF, 32, 32, 1234, true, false}, {tolF, 32, 64, 1234, false, false}, {tolF, 32, 256, 1234, true, true}, {tolF, 32, 256, 1234, false, false}, {tolF, 1024, 32, 1234, true, false}, {tolF, 1024, 64, 1234, true, true}, {tolF, 1024, 128, 1234, true, false}, {tolF, 1024, 256, 1234, true, true}, {tolF, 1024, 32, 1234, false, false}, {tolF, 1024, 64, 1234, false, true}, {tolF, 1024, 128, 1234, false, false}, {tolF, 1024, 256, 1234, false, true}}; const std::vector<WeightedMeanInputs<double>> inputsd = {{tolD, 4, 4, 1234, true, true}, {tolD, 32, 32, 1234, true, false}, {tolD, 32, 64, 1234, false, false}, {tolD, 32, 256, 1234, true, true}, {tolD, 32, 256, 1234, false, false}, {tolD, 1024, 32, 1234, true, false}, {tolD, 1024, 64, 1234, true, true}, {tolD, 1024, 128, 1234, true, false}, {tolD, 1024, 256, 1234, true, true}, {tolD, 1024, 32, 1234, false, false}, {tolD, 1024, 64, 1234, false, true}, {tolD, 1024, 128, 1234, false, false}, {tolD, 1024, 256, 1234, false, true}}; using RowWeightedMeanTestF = RowWeightedMeanTest<float>; TEST_P(RowWeightedMeanTestF, Result) { ASSERT_TRUE(devArrMatch( dexp.data().get(), dact.data().get(), params.M, raft::CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(RowWeightedMeanTest, RowWeightedMeanTestF, ::testing::ValuesIn(inputsf)); using RowWeightedMeanTestD = RowWeightedMeanTest<double>; TEST_P(RowWeightedMeanTestD, Result) { ASSERT_TRUE(devArrMatch( dexp.data().get(), dact.data().get(), params.M, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(RowWeightedMeanTest, RowWeightedMeanTestD, ::testing::ValuesIn(inputsd)); using ColWeightedMeanTestF = ColWeightedMeanTest<float>; TEST_P(ColWeightedMeanTestF, Result) { ASSERT_TRUE(devArrMatch( dexp.data().get(), dact.data().get(), params.N, raft::CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ColWeightedMeanTest, ColWeightedMeanTestF, ::testing::ValuesIn(inputsf)); using ColWeightedMeanTestD = ColWeightedMeanTest<double>; TEST_P(ColWeightedMeanTestD, Result) { ASSERT_TRUE(devArrMatch( dexp.data().get(), dact.data().get(), params.N, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ColWeightedMeanTest, ColWeightedMeanTestD, ::testing::ValuesIn(inputsd)); using WeightedMeanTestF = WeightedMeanTest<float>; TEST_P(WeightedMeanTestF, Result) { auto mean_size = params.along_rows ? params.M : params.N; ASSERT_TRUE(devArrMatch( dexp.data().get(), dact.data().get(), mean_size, raft::CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(WeightedMeanTest, WeightedMeanTestF, ::testing::ValuesIn(inputsf)); using WeightedMeanTestD = WeightedMeanTest<double>; TEST_P(WeightedMeanTestD, Result) { auto mean_size = params.along_rows ? params.M : params.N; ASSERT_TRUE(devArrMatch(dexp.data().get(), dact.data().get(), mean_size, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(WeightedMeanTest, WeightedMeanTestD, ::testing::ValuesIn(inputsd)); }; // end namespace stats }; // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/v_measure.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <algorithm> #include <gtest/gtest.h> #include <iostream> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/homogeneity_score.cuh> #include <raft/stats/v_measure.cuh> #include <raft/util/cudart_utils.hpp> #include <random> namespace raft { namespace stats { // parameter structure definition struct vMeasureParam { int nElements; int lowerLabelRange; int upperLabelRange; double beta; bool sameArrays; double tolerance; }; // test fixture class template <typename T> class vMeasureTest : public ::testing::TestWithParam<vMeasureParam> { protected: // the constructor void SetUp() override { // getting the parameters params = ::testing::TestWithParam<vMeasureParam>::GetParam(); nElements = params.nElements; lowerLabelRange = params.lowerLabelRange; upperLabelRange = params.upperLabelRange; // generating random value test input std::vector<int> arr1(nElements, 0); std::vector<int> arr2(nElements, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange); std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); }); if (params.sameArrays) { arr2 = arr1; } else { std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); }); } // allocating and initializing memory to the GPU stream = resource::get_cuda_stream(handle); rmm::device_uvector<T> truthClusterArray(nElements, stream); rmm::device_uvector<T> predClusterArray(nElements, stream); raft::update_device(truthClusterArray.data(), &arr1[0], (int)nElements, stream); raft::update_device(predClusterArray.data(), &arr2[0], (int)nElements, stream); // calculating the golden output double truthHomogeity, truthCompleteness; truthHomogeity = raft::stats::homogeneity_score(truthClusterArray.data(), predClusterArray.data(), nElements, lowerLabelRange, upperLabelRange, stream); truthCompleteness = raft::stats::homogeneity_score(predClusterArray.data(), truthClusterArray.data(), nElements, lowerLabelRange, upperLabelRange, stream); if (truthCompleteness + truthHomogeity == 0.0) truthVMeasure = 0.0; else truthVMeasure = ((1 + params.beta) * truthHomogeity * truthCompleteness / (params.beta * truthHomogeity + truthCompleteness)); // calling the v_measure CUDA implementation computedVMeasure = raft::stats::v_measure( handle, raft::make_device_vector_view<const T>(truthClusterArray.data(), nElements), raft::make_device_vector_view<const T>(predClusterArray.data(), nElements), lowerLabelRange, upperLabelRange, params.beta); } // declaring the data values raft::resources handle; vMeasureParam params; T lowerLabelRange, upperLabelRange; int nElements = 0; double truthVMeasure = 0; double computedVMeasure = 0; cudaStream_t stream = 0; }; // setting test parameter values const std::vector<vMeasureParam> inputs = {{199, 1, 10, 1.0, false, 0.000001}, {200, 15, 100, 1.0, false, 0.000001}, {100, 1, 20, 1.0, false, 0.000001}, {10, 1, 10, 1.0, false, 0.000001}, {198, 1, 100, 1.0, false, 0.000001}, {300, 3, 99, 1.0, false, 0.000001}, {199, 1, 10, 1.0, true, 0.000001}, {200, 15, 100, 1.0, true, 0.000001}, {100, 1, 20, 1.0, true, 0.000001}, {10, 1, 10, 1.0, true, 0.000001}, {198, 1, 100, 1.0, true, 0.000001}, {300, 3, 99, 1.0, true, 0.000001}}; // writing the test suite typedef vMeasureTest<int> vMeasureTestClass; TEST_P(vMeasureTestClass, Result) { ASSERT_NEAR(computedVMeasure, truthVMeasure, params.tolerance); } INSTANTIATE_TEST_CASE_P(vMeasure, vMeasureTestClass, ::testing::ValuesIn(inputs)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/contingencyMatrix.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <algorithm> #include <gtest/gtest.h> #include <iostream> #include <raft/core/interruptible.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/contingency_matrix.cuh> #include <raft/util/cudart_utils.hpp> #include <random> #include <rmm/device_uvector.hpp> namespace raft { namespace stats { struct ContingencyMatrixParam { int nElements; int minClass; int maxClass; bool calcCardinality; bool skipLabels; float tolerance; }; template <typename T> class ContingencyMatrixTest : public ::testing::TestWithParam<ContingencyMatrixParam> { protected: ContingencyMatrixTest() : stream(resource::get_cuda_stream(handle)), dY(0, stream), dYHat(0, stream), dComputedOutput(0, stream), dGoldenOutput(0, stream) { } void SetUp() override { params = ::testing::TestWithParam<ContingencyMatrixParam>::GetParam(); int numElements = params.nElements; int lowerLabelRange = params.minClass; int upperLabelRange = params.maxClass; std::vector<int> y(numElements, 0); std::vector<int> y_hat(numElements, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange); std::generate(y.begin(), y.end(), [&]() { return intGenerator(dre); }); std::generate(y_hat.begin(), y_hat.end(), [&]() { return intGenerator(dre); }); if (params.skipLabels) { // remove two label value from input arrays int y1 = (upperLabelRange - lowerLabelRange) / 2; int y2 = y1 + (upperLabelRange - lowerLabelRange) / 4; // replacement values int y1_R = y1 + 1; int y2_R = y2 + 1; std::replace(y.begin(), y.end(), y1, y1_R); std::replace(y.begin(), y.end(), y2, y2_R); std::replace(y_hat.begin(), y_hat.end(), y1, y1_R); std::replace(y_hat.begin(), y_hat.end(), y2, y2_R); } dY.resize(numElements, stream); dYHat.resize(numElements, stream); raft::update_device(dYHat.data(), &y_hat[0], numElements, stream); raft::update_device(dY.data(), &y[0], numElements, stream); if (params.calcCardinality) { raft::stats::get_input_class_cardinality( handle, raft::make_device_vector_view<const T>(dY.data(), numElements), raft::make_host_scalar_view(&minLabel), raft::make_host_scalar_view(&maxLabel)); } else { minLabel = lowerLabelRange; maxLabel = upperLabelRange; } numUniqueClasses = maxLabel - minLabel + 1; dComputedOutput.resize(numUniqueClasses * numUniqueClasses, stream); dGoldenOutput.resize(numUniqueClasses * numUniqueClasses, stream); // generate golden output on CPU size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int); std::vector<int> hGoldenOutput(sizeOfMat, 0); for (int i = 0; i < numElements; i++) { auto row = y[i] - minLabel; auto column = y_hat[i] - minLabel; hGoldenOutput[row * numUniqueClasses + column] += 1; } raft::update_device( dGoldenOutput.data(), hGoldenOutput.data(), numUniqueClasses * numUniqueClasses, stream); raft::interruptible::synchronize(stream); } void RunTest() { int numElements = params.nElements; raft::stats::contingency_matrix( handle, raft::make_device_vector_view<const T>(dY.data(), numElements), raft::make_device_vector_view<const T>(dYHat.data(), numElements), raft::make_device_matrix_view(dComputedOutput.data(), numUniqueClasses, numUniqueClasses), std::make_optional(minLabel), std::make_optional(maxLabel)); raft::interruptible::synchronize(stream); ASSERT_TRUE(raft::devArrMatch(dComputedOutput.data(), dGoldenOutput.data(), numUniqueClasses * numUniqueClasses, raft::Compare<T>())); } raft::resources handle; ContingencyMatrixParam params; int numUniqueClasses = -1; T minLabel, maxLabel; cudaStream_t stream = 0; rmm::device_uvector<T> dY, dYHat; rmm::device_uvector<int> dComputedOutput, dGoldenOutput; }; const std::vector<ContingencyMatrixParam> inputs = { {10000, 1, 10, true, false, 0.000001}, {10000, 1, 5000, true, false, 0.000001}, {10000, 1, 10000, true, false, 0.000001}, {10000, 1, 20000, true, false, 0.000001}, {10000, 1, 10, false, false, 0.000001}, {10000, 1, 5000, false, false, 0.000001}, {10000, 1, 10000, false, false, 0.000001}, {10000, 1, 20000, false, false, 0.000001}, {100000, 1, 100, false, false, 0.000001}, {1000000, 1, 1200, true, false, 0.000001}, {1000000, 1, 10000, false, false, 0.000001}, {100000, 1, 100, false, true, 0.000001}, }; typedef ContingencyMatrixTest<int> ContingencyMatrixTestS; TEST_P(ContingencyMatrixTestS, Result) { RunTest(); } INSTANTIATE_TEST_CASE_P(ContingencyMatrix, ContingencyMatrixTestS, ::testing::ValuesIn(inputs)); } // namespace stats } // namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/mean.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/random/rng.cuh> #include <raft/stats/mean.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <stdio.h> #include <stdlib.h> namespace raft { namespace stats { template <typename T> struct MeanInputs { T tolerance, mean; int rows, cols; bool sample, rowMajor; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MeanInputs<T>& dims) { return os; } template <typename T> class MeanTest : public ::testing::TestWithParam<MeanInputs<T>> { public: MeanTest() : params(::testing::TestWithParam<MeanInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), rows(params.rows), cols(params.cols), data(rows * cols, stream), mean_act(cols, stream) { } protected: void SetUp() override { raft::random::RngState r(params.seed); int len = rows * cols; normal(handle, r, data.data(), len, params.mean, (T)1.0); meanSGtest(data.data(), stream); } void meanSGtest(T* data, cudaStream_t stream) { int rows = params.rows, cols = params.cols; if (params.rowMajor) { using layout = raft::row_major; mean(handle, raft::make_device_matrix_view<const T, int, layout>(data, rows, cols), raft::make_device_vector_view<T, int>(mean_act.data(), cols), params.sample); } else { using layout = raft::col_major; mean(handle, raft::make_device_matrix_view<const T, int, layout>(data, rows, cols), raft::make_device_vector_view<T, int>(mean_act.data(), cols), params.sample); } } protected: raft::resources handle; cudaStream_t stream; MeanInputs<T> params; int rows, cols; rmm::device_uvector<T> data, mean_act; }; // Note: For 1024 samples, 256 experiments, a mean of 1.0 with stddev=1.0, the // measured mean (of a normal distribution) will fall outside of an epsilon of // 0.15 only 4/10000 times. (epsilon of 0.1 will fail 30/100 times) const std::vector<MeanInputs<float>> inputsf = {{0.15f, 1.f, 1024, 32, true, false, 1234ULL}, {0.15f, 1.f, 1024, 64, true, false, 1234ULL}, {0.15f, 1.f, 1024, 128, true, false, 1234ULL}, {0.15f, 1.f, 1024, 256, true, false, 1234ULL}, {0.15f, -1.f, 1024, 32, false, false, 1234ULL}, {0.15f, -1.f, 1024, 64, false, false, 1234ULL}, {0.15f, -1.f, 1024, 128, false, false, 1234ULL}, {0.15f, -1.f, 1024, 256, false, false, 1234ULL}, {0.15f, 1.f, 1024, 32, true, true, 1234ULL}, {0.15f, 1.f, 1024, 64, true, true, 1234ULL}, {0.15f, 1.f, 1024, 128, true, true, 1234ULL}, {0.15f, 1.f, 1024, 256, true, true, 1234ULL}, {0.15f, -1.f, 1024, 32, false, true, 1234ULL}, {0.15f, -1.f, 1024, 64, false, true, 1234ULL}, {0.15f, -1.f, 1024, 128, false, true, 1234ULL}, {0.15f, -1.f, 1024, 256, false, true, 1234ULL}}; const std::vector<MeanInputs<double>> inputsd = {{0.15, 1.0, 1024, 32, true, false, 1234ULL}, {0.15, 1.0, 1024, 64, true, false, 1234ULL}, {0.15, 1.0, 1024, 128, true, false, 1234ULL}, {0.15, 1.0, 1024, 256, true, false, 1234ULL}, {0.15, -1.0, 1024, 32, false, false, 1234ULL}, {0.15, -1.0, 1024, 64, false, false, 1234ULL}, {0.15, -1.0, 1024, 128, false, false, 1234ULL}, {0.15, -1.0, 1024, 256, false, false, 1234ULL}, {0.15, 1.0, 1024, 32, true, true, 1234ULL}, {0.15, 1.0, 1024, 64, true, true, 1234ULL}, {0.15, 1.0, 1024, 128, true, true, 1234ULL}, {0.15, 1.0, 1024, 256, true, true, 1234ULL}, {0.15, -1.0, 1024, 32, false, true, 1234ULL}, {0.15, -1.0, 1024, 64, false, true, 1234ULL}, {0.15, -1.0, 1024, 128, false, true, 1234ULL}, {0.15, -1.0, 1024, 256, false, true, 1234ULL}}; typedef MeanTest<float> MeanTestF; TEST_P(MeanTestF, Result) { ASSERT_TRUE( devArrMatch(params.mean, mean_act.data(), params.cols, CompareApprox<float>(params.tolerance))); } typedef MeanTest<double> MeanTestD; TEST_P(MeanTestD, Result) { ASSERT_TRUE(devArrMatch( params.mean, mean_act.data(), params.cols, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(MeanTests, MeanTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_SUITE_P(MeanTests, MeanTestD, ::testing::ValuesIn(inputsd)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/meanvar.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/matrix/math.cuh> #include <raft/random/rng.cuh> #include <raft/stats/meanvar.cuh> #include <raft/util/cudart_utils.hpp> #include <algorithm> namespace raft { namespace stats { template <typename T> struct MeanVarInputs { T mean, stddev; int rows, cols; bool sample, rowMajor; unsigned long long int seed; static const int N_SIGMAS = 6; T mean_tol() const { return T(N_SIGMAS) * stddev / sqrt(T(rows)); } T var_tol() const { return T(N_SIGMAS) * stddev * stddev * sqrt(T(2.0) / T(std::max(1, rows - 1))); } }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MeanVarInputs<T>& ps) { return os << "rows: " << ps.rows << "; cols: " << ps.cols << "; " << (ps.rowMajor ? "row-major" : "col-major") << " (tolerance: mean = " << ps.mean_tol() << ", var = " << ps.var_tol() << ")"; } template <typename T> class MeanVarTest : public ::testing::TestWithParam<MeanVarInputs<T>> { public: MeanVarTest() : params(::testing::TestWithParam<MeanVarInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), data(params.rows * params.cols, stream), mean_act(params.cols, stream), vars_act(params.cols, stream) { } protected: void SetUp() override { random::RngState r(params.seed); normal(handle, r, data.data(), params.cols * params.rows, params.mean, params.stddev); if (params.rowMajor) { using layout = raft::row_major; meanvar( handle, raft::make_device_matrix_view<const T, int, layout>(data.data(), params.rows, params.cols), raft::make_device_vector_view<T, int>(mean_act.data(), params.cols), raft::make_device_vector_view<T, int>(vars_act.data(), params.cols), params.sample); } else { using layout = raft::col_major; meanvar( handle, raft::make_device_matrix_view<const T, int, layout>(data.data(), params.rows, params.cols), raft::make_device_vector_view<T, int>(mean_act.data(), params.cols), raft::make_device_vector_view<T, int>(vars_act.data(), params.cols), params.sample); } RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } protected: raft::resources handle; cudaStream_t stream; MeanVarInputs<T> params; rmm::device_uvector<T> data, mean_act, vars_act; }; const std::vector<MeanVarInputs<float>> inputsf = { {1.f, 2.f, 1024, 32, true, false, 1234ULL}, {1.f, 2.f, 1024, 64, true, false, 1234ULL}, {1.f, 2.f, 1024, 128, true, false, 1234ULL}, {1.f, 2.f, 1024, 256, true, false, 1234ULL}, {-1.f, 2.f, 1024, 32, false, false, 1234ULL}, {-1.f, 2.f, 1024, 64, false, false, 1234ULL}, {-1.f, 2.f, 1024, 128, false, false, 1234ULL}, {-1.f, 2.f, 1024, 256, false, false, 1234ULL}, {-1.f, 2.f, 1024, 256, false, false, 1234ULL}, {-1.f, 2.f, 1024, 257, false, false, 1234ULL}, {1.f, 2.f, 1024, 32, true, true, 1234ULL}, {1.f, 2.f, 1024, 64, true, true, 1234ULL}, {1.f, 2.f, 1024, 128, true, true, 1234ULL}, {1.f, 2.f, 1024, 256, true, true, 1234ULL}, {-1.f, 2.f, 1024, 32, false, true, 1234ULL}, {-1.f, 2.f, 1024, 64, false, true, 1234ULL}, {-1.f, 2.f, 1024, 128, false, true, 1234ULL}, {-1.f, 2.f, 1024, 256, false, true, 1234ULL}, {-1.f, 2.f, 1024, 257, false, true, 1234ULL}, {-1.f, 2.f, 700, 13, false, true, 1234ULL}, {10.f, 2.f, 500000, 811, false, true, 1234ULL}}; const std::vector<MeanVarInputs<double>> inputsd = {{1.0, 2.0, 1024, 32, true, false, 1234ULL}, {1.0, 2.0, 1024, 64, true, false, 1234ULL}, {1.0, 2.0, 1024, 128, true, false, 1234ULL}, {1.0, 2.0, 1024, 256, true, false, 1234ULL}, {-1.0, 2.0, 1024, 32, false, false, 1234ULL}, {-1.0, 2.0, 1024, 64, false, false, 1234ULL}, {-1.0, 2.0, 1024, 128, false, false, 1234ULL}, {-1.0, 2.0, 1024, 256, false, false, 1234ULL}, {1.0, 2.0, 1024, 32, true, true, 1234ULL}, {1.0, 2.0, 1024, 64, true, true, 1234ULL}, {1.0, 2.0, 1024, 128, true, true, 1234ULL}, {1.0, 2.0, 1024, 256, true, true, 1234ULL}, {-1.0, 2.0, 1024, 32, false, true, 1234ULL}, {-1.0, 2.0, 1024, 64, false, true, 1234ULL}, {-1.0, 2.0, 1024, 128, false, true, 1234ULL}, {-1.0, 2.0, 1024, 256, false, true, 1234ULL}}; typedef MeanVarTest<float> MeanVarTestF; TEST_P(MeanVarTestF, Result) { ASSERT_TRUE(devArrMatch( params.mean, mean_act.data(), params.cols, CompareApprox<float>(params.mean_tol()), stream)); ASSERT_TRUE(devArrMatch(params.stddev * params.stddev, vars_act.data(), params.cols, CompareApproxNoScaling<float>(params.var_tol()), stream)); } typedef MeanVarTest<double> MeanVarTestD; TEST_P(MeanVarTestD, Result) { ASSERT_TRUE(devArrMatch( params.mean, mean_act.data(), params.cols, CompareApprox<double>(params.mean_tol()), stream)); ASSERT_TRUE(devArrMatch(params.stddev * params.stddev, vars_act.data(), params.cols, CompareApproxNoScaling<double>(params.var_tol()), stream)); } INSTANTIATE_TEST_SUITE_P(MeanVarTests, MeanVarTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_SUITE_P(MeanVarTests, MeanVarTestD, ::testing::ValuesIn(inputsd)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/kl_divergence.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <algorithm> #include <gtest/gtest.h> #include <iostream> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/kl_divergence.cuh> #include <raft/util/cudart_utils.hpp> #include <random> namespace raft { namespace stats { // parameter structure definition struct klDivergenceParam { int nElements; double tolerance; }; // test fixture class template <typename DataT> class klDivergenceTest : public ::testing::TestWithParam<klDivergenceParam> { protected: // the constructor void SetUp() override { // getting the parameters params = ::testing::TestWithParam<klDivergenceParam>::GetParam(); stream = resource::get_cuda_stream(handle); nElements = params.nElements; // generating random value test input std::vector<DataT> h_modelPDF(nElements, 0); std::vector<DataT> h_candidatePDF(nElements, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_real_distribution<DataT> realGenerator(0.0, 1.0); std::generate(h_modelPDF.begin(), h_modelPDF.end(), [&]() { return realGenerator(dre); }); std::generate( h_candidatePDF.begin(), h_candidatePDF.end(), [&]() { return realGenerator(dre); }); // allocating and initializing memory to the GPU rmm::device_uvector<DataT> d_modelPDF(nElements, stream); rmm::device_uvector<DataT> d_candidatePDF(nElements, stream); RAFT_CUDA_TRY(cudaMemset(d_modelPDF.data(), 0, d_modelPDF.size() * sizeof(DataT))); RAFT_CUDA_TRY(cudaMemset(d_candidatePDF.data(), 0, d_candidatePDF.size() * sizeof(DataT))); raft::update_device(d_modelPDF.data(), &h_modelPDF[0], (int)nElements, stream); raft::update_device(d_candidatePDF.data(), &h_candidatePDF[0], (int)nElements, stream); // generating the golden output for (int i = 0; i < nElements; ++i) { if (h_modelPDF[i] == 0.0) truthklDivergence += 0; else truthklDivergence += h_modelPDF[i] * log(h_modelPDF[i] / h_candidatePDF[i]); } // calling the kl_divergence CUDA implementation computedklDivergence = raft::stats::kl_divergence( handle, raft::make_device_vector_view<const DataT>(d_modelPDF.data(), nElements), raft::make_device_vector_view<const DataT>(d_candidatePDF.data(), nElements)); } // declaring the data values raft::resources handle; klDivergenceParam params; int nElements = 0; DataT truthklDivergence = 0; DataT computedklDivergence = 0; cudaStream_t stream = 0; }; // setting test parameter values const std::vector<klDivergenceParam> inputs = { {500, 0.000001}, {200, 0.001}, {5000, 0.000001}, {500000, 0.000001} }; // writing the test suite typedef klDivergenceTest<double> klDivergenceTestClass; TEST_P(klDivergenceTestClass, Result) { ASSERT_NEAR(computedklDivergence, truthklDivergence, params.tolerance); } INSTANTIATE_TEST_CASE_P(klDivergence, klDivergenceTestClass, ::testing::ValuesIn(inputs)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/stddev.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/matrix/math.cuh> #include <raft/random/rng.cuh> #include <raft/stats/mean.cuh> #include <raft/stats/stddev.cuh> #include <raft/util/cudart_utils.hpp> namespace raft { namespace stats { template <typename T> struct StdDevInputs { T tolerance, mean, stddev; int rows, cols; bool sample, rowMajor; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const StdDevInputs<T>& dims) { return os; } template <typename T> class StdDevTest : public ::testing::TestWithParam<StdDevInputs<T>> { public: StdDevTest() : params(::testing::TestWithParam<StdDevInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), rows(params.rows), cols(params.cols), data(rows * cols, stream), mean_act(cols, stream), stddev_act(cols, stream), vars_act(cols, stream) { } protected: void SetUp() override { random::RngState r(params.seed); int len = rows * cols; data.resize(len, stream); mean_act.resize(cols, stream); stddev_act.resize(cols, stream); vars_act.resize(cols, stream); normal(handle, r, data.data(), len, params.mean, params.stddev); stdVarSGtest(data.data(), stream); resource::sync_stream(handle, stream); } void stdVarSGtest(T* data, cudaStream_t stream) { int rows = params.rows, cols = params.cols; if (params.rowMajor) { using layout_t = raft::row_major; mean(handle, raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols), raft::make_device_vector_view<T, int>(mean_act.data(), cols), params.sample); stddev(handle, raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols), raft::make_device_vector_view<const T, int>(mean_act.data(), cols), raft::make_device_vector_view<T, int>(stddev_act.data(), cols), params.sample); vars(handle, raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols), raft::make_device_vector_view<const T, int>(mean_act.data(), cols), raft::make_device_vector_view<T, int>(vars_act.data(), cols), params.sample); } else { using layout_t = raft::col_major; mean(handle, raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols), raft::make_device_vector_view<T>(mean_act.data(), cols), params.sample); stddev(handle, raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols), raft::make_device_vector_view<const T, int>(mean_act.data(), cols), raft::make_device_vector_view<T, int>(stddev_act.data(), cols), params.sample); vars(handle, raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols), raft::make_device_vector_view<const T, int>(mean_act.data(), cols), raft::make_device_vector_view<T, int>(vars_act.data(), cols), params.sample); } raft::matrix::seqRoot(vars_act.data(), T(1), cols, stream); } protected: raft::resources handle; cudaStream_t stream; StdDevInputs<T> params; int rows, cols; rmm::device_uvector<T> data, mean_act, stddev_act, vars_act; }; const std::vector<StdDevInputs<float>> inputsf = { {0.1f, 1.f, 2.f, 1024, 32, true, false, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 64, true, false, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 128, true, false, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 256, true, false, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 32, false, false, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 64, false, false, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 128, false, false, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 256, false, false, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 32, true, true, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 64, true, true, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 128, true, true, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 256, true, true, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 32, false, true, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 64, false, true, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 128, false, true, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 256, false, true, 1234ULL}}; const std::vector<StdDevInputs<double>> inputsd = { {0.1, 1.0, 2.0, 1024, 32, true, false, 1234ULL}, {0.1, 1.0, 2.0, 1024, 64, true, false, 1234ULL}, {0.1, 1.0, 2.0, 1024, 128, true, false, 1234ULL}, {0.1, 1.0, 2.0, 1024, 256, true, false, 1234ULL}, {0.1, -1.0, 2.0, 1024, 32, false, false, 1234ULL}, {0.1, -1.0, 2.0, 1024, 64, false, false, 1234ULL}, {0.1, -1.0, 2.0, 1024, 128, false, false, 1234ULL}, {0.1, -1.0, 2.0, 1024, 256, false, false, 1234ULL}, {0.1, 1.0, 2.0, 1024, 32, true, true, 1234ULL}, {0.1, 1.0, 2.0, 1024, 64, true, true, 1234ULL}, {0.1, 1.0, 2.0, 1024, 128, true, true, 1234ULL}, {0.1, 1.0, 2.0, 1024, 256, true, true, 1234ULL}, {0.1, -1.0, 2.0, 1024, 32, false, true, 1234ULL}, {0.1, -1.0, 2.0, 1024, 64, false, true, 1234ULL}, {0.1, -1.0, 2.0, 1024, 128, false, true, 1234ULL}, {0.1, -1.0, 2.0, 1024, 256, false, true, 1234ULL}}; typedef StdDevTest<float> StdDevTestF; TEST_P(StdDevTestF, Result) { ASSERT_TRUE(devArrMatch( params.stddev, stddev_act.data(), params.cols, CompareApprox<float>(params.tolerance), stream)); ASSERT_TRUE(devArrMatch(stddev_act.data(), vars_act.data(), params.cols, CompareApprox<float>(params.tolerance), stream)); } typedef StdDevTest<double> StdDevTestD; TEST_P(StdDevTestD, Result) { ASSERT_TRUE(devArrMatch(params.stddev, stddev_act.data(), params.cols, CompareApprox<double>(params.tolerance), stream)); ASSERT_TRUE(devArrMatch(stddev_act.data(), vars_act.data(), params.cols, CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_SUITE_P(StdDevTests, StdDevTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_SUITE_P(StdDevTests, StdDevTestD, ::testing::ValuesIn(inputsd)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/accuracy.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <optional> #include <raft/core/interruptible.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/random/rng.cuh> #include <raft/stats/accuracy.cuh> #include <raft/util/cuda_utils.cuh> #include <rmm/device_uvector.hpp> #include <stdio.h> #include <stdlib.h> #include <vector> namespace raft { namespace stats { template <typename T> struct AccuracyInputs { T tolerance; int nrows; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const AccuracyInputs<T>& dims) { return os; } template <typename T> class AccuracyTest : public ::testing::TestWithParam<AccuracyInputs<T>> { protected: AccuracyTest() : stream(resource::get_cuda_stream(handle)) {} void SetUp() override { params = ::testing::TestWithParam<AccuracyInputs<T>>::GetParam(); raft::random::RngState r(params.seed); rmm::device_uvector<int> predictions(params.nrows, stream); rmm::device_uvector<int> ref_predictions(params.nrows, stream); uniformInt(handle, r, predictions.data(), params.nrows, 0, 10); uniformInt(handle, r, ref_predictions.data(), params.nrows, 0, 10); actualVal = accuracy(handle, raft::make_device_vector_view<const int>(predictions.data(), params.nrows), raft::make_device_vector_view<const int>(ref_predictions.data(), params.nrows)); expectedVal = T(0); std::vector<int> h_predictions(params.nrows, 0); std::vector<int> h_ref_predictions(params.nrows, 0); raft::update_host(h_predictions.data(), predictions.data(), params.nrows, stream); raft::update_host(h_ref_predictions.data(), ref_predictions.data(), params.nrows, stream); unsigned long long correctly_predicted = 0ULL; for (int i = 0; i < params.nrows; ++i) { correctly_predicted += (h_predictions[i] - h_ref_predictions[i]) == 0; } expectedVal = correctly_predicted * 1.0f / params.nrows; raft::interruptible::synchronize(stream); } protected: AccuracyInputs<T> params; raft::resources handle; cudaStream_t stream = 0; T expectedVal, actualVal; }; const std::vector<AccuracyInputs<float>> inputsf = { {0.001f, 30, 1234ULL}, {0.001f, 100, 1234ULL}, {0.001f, 1000, 1234ULL}}; typedef AccuracyTest<float> AccuracyTestF; TEST_P(AccuracyTestF, Result) { auto eq = raft::CompareApprox<float>(params.tolerance); ASSERT_TRUE(match(expectedVal, actualVal, eq)); } INSTANTIATE_TEST_CASE_P(AccuracyTests, AccuracyTestF, ::testing::ValuesIn(inputsf)); const std::vector<AccuracyInputs<double>> inputsd = { {0.001, 30, 1234ULL}, {0.001, 100, 1234ULL}, {0.001, 1000, 1234ULL}}; typedef AccuracyTest<double> AccuracyTestD; TEST_P(AccuracyTestD, Result) { auto eq = raft::CompareApprox<double>(params.tolerance); ASSERT_TRUE(match(expectedVal, actualVal, eq)); } INSTANTIATE_TEST_CASE_P(AccuracyTests, AccuracyTestD, ::testing::ValuesIn(inputsd)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/completeness_score.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <algorithm> #include <gtest/gtest.h> #include <iostream> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/completeness_score.cuh> #include <raft/stats/entropy.cuh> #include <raft/stats/mutual_info_score.cuh> #include <raft/util/cudart_utils.hpp> #include <random> namespace raft { namespace stats { // parameter structure definition struct completenessParam { int nElements; int lowerLabelRange; int upperLabelRange; bool sameArrays; double tolerance; }; // test fixture class template <typename T> class completenessTest : public ::testing::TestWithParam<completenessParam> { protected: // the constructor completenessTest() : stream(resource::get_cuda_stream(handle)) {} void SetUp() override { // getting the parameters params = ::testing::TestWithParam<completenessParam>::GetParam(); nElements = params.nElements; lowerLabelRange = params.lowerLabelRange; upperLabelRange = params.upperLabelRange; // generating random value test input std::vector<int> arr1(nElements, 0); std::vector<int> arr2(nElements, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange); std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); }); if (params.sameArrays) { arr2 = arr1; } else { std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); }); } // allocating and initializing memory to the GPU rmm::device_uvector<T> truthClusterArray(nElements, stream); rmm::device_uvector<T> predClusterArray(nElements, stream); raft::update_device(truthClusterArray.data(), arr1.data(), (int)nElements, stream); raft::update_device(predClusterArray.data(), arr2.data(), (int)nElements, stream); // calculating the golden output double truthMI, truthEntropy; truthMI = raft::stats::mutual_info_score(truthClusterArray.data(), predClusterArray.data(), nElements, lowerLabelRange, upperLabelRange, stream); truthEntropy = raft::stats::entropy( predClusterArray.data(), nElements, lowerLabelRange, upperLabelRange, stream); if (truthEntropy) { truthCompleteness = truthMI / truthEntropy; } else truthCompleteness = 1.0; if (nElements == 0) truthCompleteness = 1.0; // calling the completeness CUDA implementation computedCompleteness = raft::stats::completeness_score( handle, raft::make_device_vector_view<const T>(truthClusterArray.data(), nElements), raft::make_device_vector_view<const T>(predClusterArray.data(), nElements), lowerLabelRange, upperLabelRange); } // declaring the data values raft::resources handle; completenessParam params; T lowerLabelRange, upperLabelRange; int nElements = 0; double truthCompleteness = 0; double computedCompleteness = 0; cudaStream_t stream = 0; }; // setting test parameter values const std::vector<completenessParam> inputs = {{199, 1, 10, false, 0.000001}, {200, 15, 100, false, 0.000001}, {100, 1, 20, false, 0.000001}, {10, 1, 10, false, 0.000001}, {198, 1, 100, false, 0.000001}, {300, 3, 99, false, 0.000001}, {199, 1, 10, true, 0.000001}, {200, 15, 100, true, 0.000001}, {100, 1, 20, true, 0.000001}, {10, 1, 10, true, 0.000001}, {198, 1, 100, true, 0.000001}, {300, 3, 99, true, 0.000001}}; // writing the test suite typedef completenessTest<int> completenessTestClass; TEST_P(completenessTestClass, Result) { ASSERT_NEAR(computedCompleteness, truthCompleteness, params.tolerance); } INSTANTIATE_TEST_CASE_P(completeness, completenessTestClass, ::testing::ValuesIn(inputs)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/dispersion.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <optional> #include <raft/core/interruptible.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/random/rng.cuh> #include <raft/stats/dispersion.cuh> #include <raft/util/cuda_utils.cuh> #include <rmm/device_uvector.hpp> #include <stdio.h> #include <stdlib.h> #include <vector> namespace raft { namespace stats { template <typename T> struct DispersionInputs { T tolerance; int dim, clusters; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const DispersionInputs<T>& dims) { return os; } template <typename T> class DispersionTest : public ::testing::TestWithParam<DispersionInputs<T>> { protected: DispersionTest() : stream(resource::get_cuda_stream(handle)), exp_mean(0, stream), act_mean(0, stream) { } void SetUp() override { params = ::testing::TestWithParam<DispersionInputs<T>>::GetParam(); raft::random::RngState r(params.seed); int len = params.clusters * params.dim; rmm::device_uvector<T> data(len, stream); rmm::device_uvector<int> counts(params.clusters, stream); exp_mean.resize(params.dim, stream); act_mean.resize(params.dim, stream); uniform(handle, r, data.data(), len, (T)-1.0, (T)1.0); uniformInt(handle, r, counts.data(), params.clusters, 1, 100); std::vector<int> h_counts(params.clusters, 0); raft::update_host(&(h_counts[0]), counts.data(), params.clusters, stream); npoints = 0; for (const auto& val : h_counts) { npoints += val; } actualVal = cluster_dispersion( handle, raft::make_device_matrix_view<const T, int>(data.data(), params.clusters, params.dim), raft::make_device_vector_view<const int, int>(counts.data(), params.clusters), std::make_optional(raft::make_device_vector_view<T, int>(act_mean.data(), params.dim)), npoints); expectedVal = T(0); std::vector<T> h_data(len, T(0)); raft::update_host(&(h_data[0]), data.data(), len, stream); std::vector<T> mean(params.dim, T(0)); for (int i = 0; i < params.clusters; ++i) { for (int j = 0; j < params.dim; ++j) { mean[j] += h_data[i * params.dim + j] * T(h_counts[i]); } } for (int i = 0; i < params.dim; ++i) { mean[i] /= T(npoints); } raft::update_device(exp_mean.data(), &(mean[0]), params.dim, stream); for (int i = 0; i < params.clusters; ++i) { for (int j = 0; j < params.dim; ++j) { auto diff = h_data[i * params.dim + j] - mean[j]; expectedVal += diff * diff * T(h_counts[i]); } } expectedVal = sqrt(expectedVal); raft::interruptible::synchronize(stream); } protected: DispersionInputs<T> params; raft::resources handle; rmm::device_uvector<T> exp_mean, act_mean; cudaStream_t stream = 0; int npoints; T expectedVal, actualVal; }; const std::vector<DispersionInputs<float>> inputsf = { {0.001f, 10, 1000, 1234ULL}, {0.001f, 100, 100, 1234ULL}, {0.001f, 1000, 1000, 1234ULL}}; typedef DispersionTest<float> DispersionTestF; TEST_P(DispersionTestF, Result) { auto eq = raft::CompareApprox<float>(params.tolerance); ASSERT_TRUE(devArrMatch(exp_mean.data(), act_mean.data(), params.dim, eq)); ASSERT_TRUE(match(expectedVal, actualVal, eq)); } INSTANTIATE_TEST_CASE_P(DispersionTests, DispersionTestF, ::testing::ValuesIn(inputsf)); const std::vector<DispersionInputs<double>> inputsd = { {0.001, 10, 1000, 1234ULL}, {0.001, 100, 100, 1234ULL}, {0.001, 1000, 1000, 1234ULL}}; typedef DispersionTest<double> DispersionTestD; TEST_P(DispersionTestD, Result) { auto eq = raft::CompareApprox<double>(params.tolerance); ASSERT_TRUE(devArrMatch(exp_mean.data(), act_mean.data(), params.dim, eq)); ASSERT_TRUE(match(expectedVal, actualVal, eq)); } INSTANTIATE_TEST_CASE_P(DispersionTests, DispersionTestD, ::testing::ValuesIn(inputsd)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/regression_metrics.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <algorithm> #include <gtest/gtest.h> #include <optional> #include <raft/core/interruptible.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/random/rng.cuh> #include <raft/stats/regression_metrics.cuh> #include <raft/util/cuda_utils.cuh> #include <rmm/device_uvector.hpp> #include <stdio.h> #include <stdlib.h> #include <vector> namespace raft { namespace stats { template <typename T> struct RegressionInputs { T tolerance; int len; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const RegressionInputs<T>& dims) { return os; } template <typename T> void naive_reg_metrics(std::vector<T>& predictions, std::vector<T>& ref_predictions, double& mean_abs_error, double& mean_squared_error, double& median_abs_error) { auto len = predictions.size(); double abs_diff = 0; double sq_diff = 0; std::vector<double> abs_errors(len); for (std::size_t i = 0; i < len; ++i) { auto diff = predictions[i] - ref_predictions[i]; abs_diff += abs(diff); sq_diff += diff * diff; abs_errors[i] = abs(diff); } mean_abs_error = abs_diff / len; mean_squared_error = sq_diff / len; std::sort(abs_errors.begin(), abs_errors.end()); auto middle = len / 2; if (len % 2 == 1) { median_abs_error = abs_errors[middle]; } else { median_abs_error = (abs_errors[middle] + abs_errors[middle - 1]) / 2; } } template <typename T> class RegressionTest : public ::testing::TestWithParam<RegressionInputs<T>> { protected: RegressionTest() : stream(resource::get_cuda_stream(handle)) {} void SetUp() override { params = ::testing::TestWithParam<RegressionInputs<T>>::GetParam(); raft::random::RngState r(params.seed); rmm::device_uvector<T> predictions(params.len, stream); rmm::device_uvector<T> ref_predictions(params.len, stream); uniform(handle, r, predictions.data(), params.len, T(-10.0), T(10.0)); uniform(handle, r, ref_predictions.data(), params.len, T(-10.0), T(10.0)); regression_metrics(handle, raft::make_device_vector_view<const T>(predictions.data(), params.len), raft::make_device_vector_view<const T>(ref_predictions.data(), params.len), raft::make_host_scalar_view(&mean_abs_error), raft::make_host_scalar_view(&mean_squared_error), raft::make_host_scalar_view(&median_abs_error)); std::vector<T> h_predictions(params.len, 0); std::vector<T> h_ref_predictions(params.len, 0); raft::update_host(h_predictions.data(), predictions.data(), params.len, stream); raft::update_host(h_ref_predictions.data(), ref_predictions.data(), params.len, stream); naive_reg_metrics(h_predictions, h_ref_predictions, ref_mean_abs_error, ref_mean_squared_error, ref_median_abs_error); raft::interruptible::synchronize(stream); } protected: raft::resources handle; RegressionInputs<T> params; cudaStream_t stream = 0; double mean_abs_error = 0; double mean_squared_error = 0; double median_abs_error = 0; double ref_mean_abs_error = 0; double ref_mean_squared_error = 0; double ref_median_abs_error = 0; }; const std::vector<RegressionInputs<float>> inputsf = { {0.001f, 30, 1234ULL}, {0.001f, 100, 1234ULL}, {0.001f, 4000, 1234ULL}}; typedef RegressionTest<float> RegressionTestF; TEST_P(RegressionTestF, Result) { auto eq = raft::CompareApprox<float>(params.tolerance); ASSERT_TRUE(match(ref_mean_abs_error, mean_abs_error, eq)); ASSERT_TRUE(match(ref_mean_squared_error, mean_squared_error, eq)); ASSERT_TRUE(match(ref_median_abs_error, median_abs_error, eq)); } INSTANTIATE_TEST_CASE_P(RegressionTests, RegressionTestF, ::testing::ValuesIn(inputsf)); const std::vector<RegressionInputs<double>> inputsd = { {0.001, 30, 1234ULL}, {0.001, 100, 1234ULL}, {0.001, 4000, 1234ULL}}; typedef RegressionTest<double> RegressionTestD; TEST_P(RegressionTestD, Result) { auto eq = raft::CompareApprox<double>(params.tolerance); ASSERT_TRUE(match(ref_mean_abs_error, mean_abs_error, eq)); ASSERT_TRUE(match(ref_mean_squared_error, mean_squared_error, eq)); ASSERT_TRUE(match(ref_median_abs_error, median_abs_error, eq)); } INSTANTIATE_TEST_CASE_P(RegressionTests, RegressionTestD, ::testing::ValuesIn(inputsd)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/homogeneity_score.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <algorithm> #include <gtest/gtest.h> #include <iostream> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/homogeneity_score.cuh> #include <raft/stats/mutual_info_score.cuh> #include <raft/util/cudart_utils.hpp> #include <random> namespace raft { namespace stats { // parameter structure definition struct homogeneityParam { int nElements; int lowerLabelRange; int upperLabelRange; bool sameArrays; double tolerance; }; // test fixture class template <typename T> class homogeneityTest : public ::testing::TestWithParam<homogeneityParam> { protected: // the constructor void SetUp() override { // getting the parameters params = ::testing::TestWithParam<homogeneityParam>::GetParam(); nElements = params.nElements; lowerLabelRange = params.lowerLabelRange; upperLabelRange = params.upperLabelRange; stream = resource::get_cuda_stream(handle); // generating random value test input std::vector<int> arr1(nElements, 0); std::vector<int> arr2(nElements, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange); std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); }); if (params.sameArrays) { arr2 = arr1; } else { std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); }); } // allocating and initializing memory to the GPU rmm::device_uvector<T> truthClusterArray(nElements, stream); rmm::device_uvector<T> predClusterArray(nElements, stream); raft::update_device(truthClusterArray.data(), &arr1[0], (int)nElements, stream); raft::update_device(predClusterArray.data(), &arr2[0], (int)nElements, stream); // calculating the golden output double truthMI, truthEntropy; truthMI = raft::stats::mutual_info_score(truthClusterArray.data(), predClusterArray.data(), nElements, lowerLabelRange, upperLabelRange, stream); truthEntropy = raft::stats::entropy( truthClusterArray.data(), nElements, lowerLabelRange, upperLabelRange, stream); if (truthEntropy) { truthHomogeneity = truthMI / truthEntropy; } else truthHomogeneity = 1.0; if (nElements == 0) truthHomogeneity = 1.0; // calling the homogeneity CUDA implementation computedHomogeneity = raft::stats::homogeneity_score( handle, raft::make_device_vector_view<const T>(truthClusterArray.data(), nElements), raft::make_device_vector_view<const T>(predClusterArray.data(), nElements), lowerLabelRange, upperLabelRange); } // declaring the data values raft::resources handle; homogeneityParam params; T lowerLabelRange, upperLabelRange; int nElements = 0; double truthHomogeneity = 0; double computedHomogeneity = 0; cudaStream_t stream = 0; }; // setting test parameter values const std::vector<homogeneityParam> inputs = {{199, 1, 10, false, 0.000001}, {200, 15, 100, false, 0.000001}, {100, 1, 20, false, 0.000001}, {10, 1, 10, false, 0.000001}, {198, 1, 100, false, 0.000001}, {300, 3, 99, false, 0.000001}, {199, 1, 10, true, 0.000001}, {200, 15, 100, true, 0.000001}, {100, 1, 20, true, 0.000001}, {10, 1, 10, true, 0.000001}, {198, 1, 100, true, 0.000001}, {300, 3, 99, true, 0.000001}}; // writing the test suite typedef homogeneityTest<int> homogeneityTestClass; TEST_P(homogeneityTestClass, Result) { ASSERT_NEAR(computedHomogeneity, truthHomogeneity, params.tolerance); } INSTANTIATE_TEST_CASE_P(homogeneity, homogeneityTestClass, ::testing::ValuesIn(inputs)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/neighborhood_recall.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../neighbors/ann_utils.cuh" #include "../test_utils.h" #include <raft/core/host_mdarray.hpp> #include <raft/core/mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <cuvs_internal/neighbors/naive_knn.cuh> #include <raft/stats/neighborhood_recall.cuh> #include <raft/util/itertools.hpp> #include <gtest/gtest.h> namespace raft::stats { struct NeighborhoodRecallInputs { int n_rows; int n_cols; int k; }; template <typename DistanceT, typename IdxT> class NeighborhoodRecallTest : public ::testing::TestWithParam<NeighborhoodRecallInputs> { public: NeighborhoodRecallTest() : ps{::testing::TestWithParam<NeighborhoodRecallInputs>::GetParam()}, data_1{raft::make_device_matrix<DistanceT, IdxT>(res, ps.n_rows, ps.n_cols)}, data_2{raft::make_device_matrix<DistanceT, IdxT>(res, ps.n_rows, ps.n_cols)} { } protected: void test_recall() { size_t queries_size = ps.n_rows * ps.k; // calculate nn for dataset 1 auto distances_1 = raft::make_device_matrix<DistanceT, IdxT>(res, ps.n_rows, ps.k); auto indices_1 = raft::make_device_matrix<IdxT, IdxT>(res, ps.n_rows, ps.k); cuvs::neighbors::naive_knn<DistanceT, DistanceT, IdxT>( res, distances_1.data_handle(), indices_1.data_handle(), data_1.data_handle(), data_1.data_handle(), ps.n_rows, ps.n_rows, ps.n_cols, ps.k, cuvs::distance::DistanceType::L2Expanded); std::vector<DistanceT> distances_1_h(queries_size); std::vector<IdxT> indices_1_h(queries_size); raft::copy(distances_1_h.data(), distances_1.data_handle(), ps.n_rows * ps.k, raft::resource::get_cuda_stream(res)); raft::copy(indices_1_h.data(), indices_1.data_handle(), ps.n_rows * ps.k, raft::resource::get_cuda_stream(res)); // calculate nn for dataset 2 auto distances_2 = raft::make_device_matrix<DistanceT, IdxT>(res, ps.n_rows, ps.k); auto indices_2 = raft::make_device_matrix<IdxT, IdxT>(res, ps.n_rows, ps.k); cuvs::neighbors::naive_knn<DistanceT, DistanceT, IdxT>( res, distances_2.data_handle(), indices_2.data_handle(), data_2.data_handle(), data_2.data_handle(), ps.n_rows, ps.n_rows, ps.n_cols, ps.k, cuvs::distance::DistanceType::L2Expanded); std::vector<DistanceT> distances_2_h(queries_size); std::vector<IdxT> indices_2_h(queries_size); raft::copy(distances_2_h.data(), distances_2.data_handle(), ps.n_rows * ps.k, raft::resource::get_cuda_stream(res)); raft::copy(indices_2_h.data(), indices_2.data_handle(), ps.n_rows * ps.k, raft::resource::get_cuda_stream(res)); raft::resource::sync_stream(res); // find CPU recall scores [[maybe_unused]] auto [indices_only_recall_h, mc1, tc1] = cuvs::neighbors::calc_recall(indices_1_h, indices_2_h, ps.n_rows, ps.k); [[maybe_unused]] auto [recall_h, mc2, tc2] = cuvs::neighbors::calc_recall( indices_1_h, indices_2_h, distances_1_h, distances_2_h, ps.n_rows, ps.k, 0.001); // find GPU recall scores auto s1 = 0; auto indices_only_recall_scalar = raft::make_host_scalar<double>(s1); neighborhood_recall(res, raft::make_const_mdspan(indices_1.view()), raft::make_const_mdspan(indices_2.view()), indices_only_recall_scalar.view()); auto s2 = 0; auto recall_scalar = raft::make_host_scalar<double>(s2); DistanceT s3 = 0.001; auto eps_mda = raft::make_host_scalar<DistanceT>(s3); neighborhood_recall<IdxT, IdxT, double, DistanceT>(res, raft::make_const_mdspan(indices_1.view()), raft::make_const_mdspan(indices_2.view()), recall_scalar.view(), raft::make_const_mdspan(distances_1.view()), raft::make_const_mdspan(distances_2.view())); // assert correctness ASSERT_TRUE(raft::match(indices_only_recall_h, *indices_only_recall_scalar.data_handle(), raft::CompareApprox<double>(0.01))); ASSERT_TRUE( raft::match(recall_h, *recall_scalar.data_handle(), raft::CompareApprox<double>(0.01))); } void SetUp() override { // form two random datasets raft::random::Rng r1(1234ULL); r1.normal(data_1.data_handle(), ps.n_rows * ps.n_cols, DistanceT(0.1), DistanceT(2.0), raft::resource::get_cuda_stream(res)); raft::random::Rng r2(21111ULL); r2.normal(data_2.data_handle(), ps.n_rows * ps.n_cols, DistanceT(0.1), DistanceT(2.0), raft::resource::get_cuda_stream(res)); resource::sync_stream(res); } private: raft::resources res; NeighborhoodRecallInputs ps; raft::device_matrix<DistanceT, IdxT> data_1; raft::device_matrix<DistanceT, IdxT> data_2; }; const std::vector<NeighborhoodRecallInputs> inputs = raft::util::itertools::product<NeighborhoodRecallInputs>({10, 50, 100}, // n_rows {80, 100}, // n_cols {32, 64}); // k using NeighborhoodRecallTestF_U32 = NeighborhoodRecallTest<float, std::uint32_t>; TEST_P(NeighborhoodRecallTestF_U32, AnnCagra) { this->test_recall(); } INSTANTIATE_TEST_CASE_P(NeighborhoodRecallTest, NeighborhoodRecallTestF_U32, ::testing::ValuesIn(inputs)); } // end namespace raft::stats
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/r2_score.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <optional> #include <raft/core/interruptible.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/random/rng.cuh> #include <raft/stats/r2_score.cuh> #include <raft/util/cuda_utils.cuh> #include <rmm/device_uvector.hpp> #include <stdio.h> #include <stdlib.h> #include <vector> namespace raft { namespace stats { template <typename T> struct R2_scoreInputs { T tolerance; int nrows; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const R2_scoreInputs<T>& dims) { return os; } template <typename T> class R2_scoreTest : public ::testing::TestWithParam<R2_scoreInputs<T>> { protected: R2_scoreTest() : stream(resource::get_cuda_stream(handle)) {} void SetUp() override { params = ::testing::TestWithParam<R2_scoreInputs<T>>::GetParam(); raft::random::RngState r(params.seed); rmm::device_uvector<T> y(params.nrows, stream); rmm::device_uvector<T> y_hat(params.nrows, stream); uniform(handle, r, y.data(), params.nrows, (T)-1.0, (T)1.0); uniform(handle, r, y_hat.data(), params.nrows, (T)-1.0, (T)1.0); actualVal = r2_score(handle, raft::make_device_vector_view<const T>(y.data(), params.nrows), raft::make_device_vector_view<const T>(y_hat.data(), params.nrows)); expectedVal = T(0); std::vector<T> h_y(params.nrows, 0); std::vector<T> h_y_hat(params.nrows, 0); raft::update_host(h_y.data(), y.data(), params.nrows, stream); raft::update_host(h_y_hat.data(), y_hat.data(), params.nrows, stream); T mean = T(0); for (int i = 0; i < params.nrows; ++i) { mean += h_y[i]; } mean /= params.nrows; std::vector<T> sse_arr(params.nrows, 0); std::vector<T> ssto_arr(params.nrows, 0); T sse = T(0); T ssto = T(0); for (int i = 0; i < params.nrows; ++i) { sse += (h_y[i] - h_y_hat[i]) * (h_y[i] - h_y_hat[i]); ssto += (h_y[i] - mean) * (h_y[i] - mean); } expectedVal = 1.0 - sse / ssto; raft::interruptible::synchronize(stream); } protected: R2_scoreInputs<T> params; raft::resources handle; cudaStream_t stream = 0; T expectedVal, actualVal; }; const std::vector<R2_scoreInputs<float>> inputsf = { {0.001f, 30, 1234ULL}, {0.001f, 100, 1234ULL}, {0.001f, 1000, 1234ULL}}; typedef R2_scoreTest<float> R2_scoreTestF; TEST_P(R2_scoreTestF, Result) { auto eq = raft::CompareApprox<float>(params.tolerance); ASSERT_TRUE(match(expectedVal, actualVal, eq)); } INSTANTIATE_TEST_CASE_P(R2_scoreTests, R2_scoreTestF, ::testing::ValuesIn(inputsf)); const std::vector<R2_scoreInputs<double>> inputsd = { {0.001, 30, 1234ULL}, {0.001, 100, 1234ULL}, {0.001, 1000, 1234ULL}}; typedef R2_scoreTest<double> R2_scoreTestD; TEST_P(R2_scoreTestD, Result) { auto eq = raft::CompareApprox<double>(params.tolerance); ASSERT_TRUE(match(expectedVal, actualVal, eq)); } INSTANTIATE_TEST_CASE_P(R2_scoreTests, R2_scoreTestD, ::testing::ValuesIn(inputsd)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/information_criterion.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/information_criterion.cuh> #include <raft/core/resources.hpp> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <gtest/gtest.h> #include <cmath> #include <random> #include <vector> namespace raft { namespace stats { template <typename T> void naive_ic( T* h_ic, const T* h_loglike, IC_Type ic_type, int n_params, int batch_size, int n_samples) { T ic_base{}; T N = static_cast<T>(n_params); T M = static_cast<T>(n_samples); switch (ic_type) { case AIC: ic_base = (T)2 * N; break; case AICc: ic_base = (T)2 * (N + (N * (N + (T)1)) / (M - N - (T)1)); break; case BIC: ic_base = std::log(M) * N; break; } #pragma omp parallel for for (int bid = 0; bid < batch_size; bid++) { h_ic[bid] = ic_base - (T)2.0 * h_loglike[bid]; } } template <typename T> struct BatchedICInputs { int batch_size; int n_params; int n_samples; IC_Type ic_type; T tolerance; }; template <typename T> class BatchedICTest : public ::testing::TestWithParam<BatchedICInputs<T>> { public: BatchedICTest() : params(::testing::TestWithParam<BatchedICInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), res_d(sizeof(T) * params.batch_size, stream) { } protected: void SetUp() override { using std::vector; // Create arrays std::vector<T> loglike_h = std::vector<T>(params.batch_size); res_h.resize(params.batch_size); rmm::device_uvector<T> loglike_d(sizeof(T) * params.batch_size, stream); // Generate random data std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<T> udis(0.001, 1.0); // 0 has no log for (int i = 0; i < params.batch_size; i++) loglike_h[i] = std::log(udis(gen)); // Copy the data to the device raft::update_device(loglike_d.data(), loglike_h.data(), params.batch_size, stream); // Compute the tested results information_criterion_batched( handle, raft::make_device_vector_view<const T>(loglike_d.data(), params.batch_size), raft::make_device_vector_view(res_d.data(), params.batch_size), params.ic_type, params.n_params, params.n_samples); // Compute the expected results naive_ic(res_h.data(), loglike_h.data(), params.ic_type, params.n_params, params.batch_size, params.n_samples); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } protected: raft::resources handle; cudaStream_t stream = 0; BatchedICInputs<T> params; rmm::device_uvector<T> res_d; std::vector<T> res_h; }; // Test parameters (op, n_batches, m, n, p, q, tolerance) const std::vector<BatchedICInputs<double>> inputsd = { {1, 5, 52, AIC, 1e-3}, {10, 7, 100, AICc, 1e-3}, {67, 2, 350, BIC, 1e-3}}; // Test parameters (op, n_batches, m, n, p, q, tolerance) const std::vector<BatchedICInputs<float>> inputsf = { {1, 5, 52, AIC, 1e-3}, {10, 7, 100, AICc, 1e-3}, {67, 2, 350, BIC, 1e-3}}; using BatchedICTestD = BatchedICTest<double>; using BatchedICTestF = BatchedICTest<float>; TEST_P(BatchedICTestD, Result) { ASSERT_TRUE(devArrMatchHost(res_h.data(), res_d.data(), params.batch_size, raft::CompareApprox<double>(params.tolerance), stream)); } TEST_P(BatchedICTestF, Result) { ASSERT_TRUE(devArrMatchHost(res_h.data(), res_d.data(), params.batch_size, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(BatchedICTests, BatchedICTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(BatchedICTests, BatchedICTestF, ::testing::ValuesIn(inputsf)); } // namespace stats } // namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/cov.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/random/rng.cuh> #include <raft/stats/cov.cuh> #include <raft/stats/mean.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> namespace raft { namespace stats { template <typename T> struct CovInputs { T tolerance, mean, var; int rows, cols; bool sample, rowMajor, stable; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const CovInputs<T>& dims) { return os; } template <typename T> class CovTest : public ::testing::TestWithParam<CovInputs<T>> { protected: CovTest() : data(0, stream), mean_act(0, stream), cov_act(0, stream), cov_cm(0, stream), cov_cm_ref(0, stream) { } void SetUp() override { raft::resources handle; cudaStream_t stream = resource::get_cuda_stream(handle); params = ::testing::TestWithParam<CovInputs<T>>::GetParam(); params.tolerance *= 2; raft::random::RngState r(params.seed); int rows = params.rows, cols = params.cols; auto len = rows * cols; T var = params.var; data.resize(len, stream); mean_act.resize(cols, stream); cov_act.resize(cols * cols, stream); normal(handle, r, data.data(), len, params.mean, var); raft::stats::mean( mean_act.data(), data.data(), cols, rows, params.sample, params.rowMajor, stream); if (params.rowMajor) { using layout = raft::row_major; cov(handle, raft::make_device_matrix_view<T, std::uint32_t, layout>(data.data(), rows, cols), raft::make_device_vector_view<const T, std::uint32_t>(mean_act.data(), cols), raft::make_device_matrix_view<T, std::uint32_t, layout>(cov_act.data(), cols, cols), params.sample, params.stable); } else { using layout = raft::col_major; cov(handle, raft::make_device_matrix_view<T, std::uint32_t, layout>(data.data(), rows, cols), raft::make_device_vector_view<const T, std::uint32_t>(mean_act.data(), cols), raft::make_device_matrix_view<T, std::uint32_t, layout>(cov_act.data(), cols, cols), params.sample, params.stable); } T data_h[6] = {1.0, 2.0, 5.0, 4.0, 2.0, 1.0}; T cov_cm_ref_h[4] = {4.3333, -2.8333, -2.8333, 2.333}; cov_cm.resize(4, stream); cov_cm_ref.resize(4, stream); rmm::device_uvector<T> data_cm(6, stream); rmm::device_uvector<T> mean_cm(2, stream); raft::update_device(data_cm.data(), data_h, 6, stream); raft::update_device(cov_cm_ref.data(), cov_cm_ref_h, 4, stream); raft::stats::mean(mean_cm.data(), data_cm.data(), 2, 3, true, false, stream); cov(handle, cov_cm.data(), data_cm.data(), mean_cm.data(), 2, 3, true, false, true, stream); } protected: cublasHandle_t handle; cudaStream_t stream = 0; CovInputs<T> params; rmm::device_uvector<T> data, mean_act, cov_act, cov_cm, cov_cm_ref; }; ///@todo: add stable=false after it has been implemented const std::vector<CovInputs<float>> inputsf = { {0.03f, 1.f, 2.f, 32 * 1024, 32, true, false, true, 1234ULL}, {0.03f, 1.f, 2.f, 32 * 1024, 64, true, false, true, 1234ULL}, {0.03f, 1.f, 2.f, 32 * 1024, 128, true, false, true, 1234ULL}, {0.03f, 1.f, 2.f, 32 * 1024, 256, true, false, true, 1234ULL}, {0.03f, -1.f, 2.f, 32 * 1024, 32, false, false, true, 1234ULL}, {0.03f, -1.f, 2.f, 32 * 1024, 64, false, false, true, 1234ULL}, {0.03f, -1.f, 2.f, 32 * 1024, 128, false, false, true, 1234ULL}, {0.03f, -1.f, 2.f, 32 * 1024, 256, false, false, true, 1234ULL}, {0.03f, 1.f, 2.f, 32 * 1024, 32, true, true, true, 1234ULL}, {0.03f, 1.f, 2.f, 32 * 1024, 64, true, true, true, 1234ULL}, {0.03f, 1.f, 2.f, 32 * 1024, 128, true, true, true, 1234ULL}, {0.03f, 1.f, 2.f, 32 * 1024, 256, true, true, true, 1234ULL}, {0.03f, -1.f, 2.f, 32 * 1024, 32, false, true, true, 1234ULL}, {0.03f, -1.f, 2.f, 32 * 1024, 64, false, true, true, 1234ULL}, {0.03f, -1.f, 2.f, 32 * 1024, 128, false, true, true, 1234ULL}, {0.03f, -1.f, 2.f, 32 * 1024, 256, false, true, true, 1234ULL}}; const std::vector<CovInputs<double>> inputsd = { {0.03, 1.0, 2.0, 32 * 1024, 32, true, false, true, 1234ULL}, {0.03, 1.0, 2.0, 32 * 1024, 64, true, false, true, 1234ULL}, {0.03, 1.0, 2.0, 32 * 1024, 128, true, false, true, 1234ULL}, {0.03, 1.0, 2.0, 32 * 1024, 256, true, false, true, 1234ULL}, {0.03, -1.0, 2.0, 32 * 1024, 32, false, false, true, 1234ULL}, {0.03, -1.0, 2.0, 32 * 1024, 64, false, false, true, 1234ULL}, {0.03, -1.0, 2.0, 32 * 1024, 128, false, false, true, 1234ULL}, {0.03, -1.0, 2.0, 32 * 1024, 256, false, false, true, 1234ULL}, {0.03, 1.0, 2.0, 32 * 1024, 32, true, true, true, 1234ULL}, {0.03, 1.0, 2.0, 32 * 1024, 64, true, true, true, 1234ULL}, {0.03, 1.0, 2.0, 32 * 1024, 128, true, true, true, 1234ULL}, {0.03, 1.0, 2.0, 32 * 1024, 256, true, true, true, 1234ULL}, {0.03, -1.0, 2.0, 32 * 1024, 32, false, true, true, 1234ULL}, {0.03, -1.0, 2.0, 32 * 1024, 64, false, true, true, 1234ULL}, {0.03, -1.0, 2.0, 32 * 1024, 128, false, true, true, 1234ULL}, {0.03, -1.0, 2.0, 32 * 1024, 256, false, true, true, 1234ULL}}; typedef CovTest<float> CovTestF; TEST_P(CovTestF, Result) { ASSERT_TRUE(raft::diagonalMatch(params.var * params.var, cov_act.data(), params.cols, params.cols, raft::CompareApprox<float>(params.tolerance))); } typedef CovTest<double> CovTestD; TEST_P(CovTestD, Result) { ASSERT_TRUE(raft::diagonalMatch(params.var * params.var, cov_act.data(), params.cols, params.cols, raft::CompareApprox<double>(params.tolerance))); } typedef CovTest<float> CovTestSmallF; TEST_P(CovTestSmallF, Result) { ASSERT_TRUE(raft::devArrMatch( cov_cm_ref.data(), cov_cm.data(), 2, 2, raft::CompareApprox<float>(params.tolerance))); } typedef CovTest<double> CovTestSmallD; TEST_P(CovTestSmallD, Result) { ASSERT_TRUE(raft::devArrMatch( cov_cm_ref.data(), cov_cm.data(), 2, 2, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(CovTests, CovTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(CovTests, CovTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(CovTests, CovTestSmallF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(CovTests, CovTestSmallD, ::testing::ValuesIn(inputsd)); } // namespace stats } // namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/silhouette_score.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <algorithm> #include <cuvs/distance/distance_types.hpp> #include <gtest/gtest.h> #include <iostream> #include <raft/core/resource/cuda_stream.hpp> #include <raft/util/cudart_utils.hpp> #include <raft/stats/silhouette_score.cuh> #include <random> #include <rmm/device_uvector.hpp> namespace raft { namespace stats { // parameter structure definition struct silhouetteScoreParam { int nRows; int nCols; int nLabels; cuvs::distance::DistanceType metric; int chunk; double tolerance; }; // test fixture class template <typename LabelT, typename DataT> class silhouetteScoreTest : public ::testing::TestWithParam<silhouetteScoreParam> { protected: silhouetteScoreTest() : d_X(0, resource::get_cuda_stream(handle)), sampleSilScore(0, resource::get_cuda_stream(handle)), d_labels(0, resource::get_cuda_stream(handle)) { } void host_silhouette_score() { // generating random value test input std::vector<double> h_X(nElements, 0.0); std::vector<int> h_labels(nRows, 0); std::random_device rd; std::default_random_engine dre(nElements * nLabels); std::uniform_int_distribution<int> intGenerator(0, nLabels - 1); std::uniform_real_distribution<double> realGenerator(0, 100); std::generate(h_X.begin(), h_X.end(), [&]() { return realGenerator(dre); }); std::generate(h_labels.begin(), h_labels.end(), [&]() { return intGenerator(dre); }); // allocating and initializing memory to the GPU auto stream = resource::get_cuda_stream(handle); d_X.resize(nElements, stream); d_labels.resize(nElements, stream); RAFT_CUDA_TRY(cudaMemsetAsync(d_X.data(), 0, d_X.size() * sizeof(DataT), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(d_labels.data(), 0, d_labels.size() * sizeof(LabelT), stream)); sampleSilScore.resize(nElements, stream); raft::update_device(d_X.data(), &h_X[0], (int)nElements, stream); raft::update_device(d_labels.data(), &h_labels[0], (int)nElements, stream); // finding the distance matrix rmm::device_uvector<double> d_distanceMatrix(nRows * nRows, stream); double* h_distanceMatrix = (double*)malloc(nRows * nRows * sizeof(double*)); cuvs::distance::pairwise_distance( handle, d_X.data(), d_X.data(), d_distanceMatrix.data(), nRows, nRows, nCols, params.metric); resource::sync_stream(handle, stream); raft::update_host(h_distanceMatrix, d_distanceMatrix.data(), nRows * nRows, stream); // finding the bincount array double* binCountArray = (double*)malloc(nLabels * sizeof(double*)); memset(binCountArray, 0, nLabels * sizeof(double)); for (int i = 0; i < nRows; ++i) { binCountArray[h_labels[i]] += 1; } // finding the average intra cluster distance for every element double* a = (double*)malloc(nRows * sizeof(double*)); for (int i = 0; i < nRows; ++i) { int myLabel = h_labels[i]; double sumOfIntraClusterD = 0; for (int j = 0; j < nRows; ++j) { if (h_labels[j] == myLabel) { sumOfIntraClusterD += h_distanceMatrix[i * nRows + j]; } } if (binCountArray[myLabel] <= 1) a[i] = -1; else a[i] = sumOfIntraClusterD / (binCountArray[myLabel] - 1); } // finding the average inter cluster distance for every element double* b = (double*)malloc(nRows * sizeof(double*)); for (int i = 0; i < nRows; ++i) { int myLabel = h_labels[i]; double minAvgInterCD = ULLONG_MAX; for (int j = 0; j < nLabels; ++j) { int curClLabel = j; if (curClLabel == myLabel) continue; double avgInterCD = 0; for (int k = 0; k < nRows; ++k) { if (h_labels[k] == curClLabel) { avgInterCD += h_distanceMatrix[i * nRows + k]; } } if (binCountArray[curClLabel]) avgInterCD /= binCountArray[curClLabel]; else avgInterCD = ULLONG_MAX; minAvgInterCD = min(minAvgInterCD, avgInterCD); } b[i] = minAvgInterCD; } // finding the silhouette score for every element double* truthSampleSilScore = (double*)malloc(nRows * sizeof(double*)); for (int i = 0; i < nRows; ++i) { if (a[i] == -1) truthSampleSilScore[i] = 0; else if (a[i] == 0 && b[i] == 0) truthSampleSilScore[i] = 0; else truthSampleSilScore[i] = (b[i] - a[i]) / max(a[i], b[i]); truthSilhouetteScore += truthSampleSilScore[i]; } truthSilhouetteScore /= nRows; } // the constructor void SetUp() override { // getting the parameters params = ::testing::TestWithParam<silhouetteScoreParam>::GetParam(); nRows = params.nRows; nCols = params.nCols; nLabels = params.nLabels; chunk = params.chunk; nElements = nRows * nCols; host_silhouette_score(); // calling the silhouette_score CUDA implementation computedSilhouetteScore = raft::stats::silhouette_score( handle, raft::make_device_matrix_view<const DataT>(d_X.data(), nRows, nCols), raft::make_device_vector_view<const LabelT>(d_labels.data(), nRows), std::make_optional(raft::make_device_vector_view(sampleSilScore.data(), nRows)), nLabels, params.metric); batchedSilhouetteScore = raft::stats::silhouette_score_batched( handle, raft::make_device_matrix_view<const DataT>(d_X.data(), nRows, nCols), raft::make_device_vector_view<const LabelT>(d_labels.data(), nRows), std::make_optional(raft::make_device_vector_view(sampleSilScore.data(), nRows)), nLabels, chunk, params.metric); } // declaring the data values raft::resources handle; silhouetteScoreParam params; int nLabels; rmm::device_uvector<DataT> d_X; rmm::device_uvector<DataT> sampleSilScore; rmm::device_uvector<LabelT> d_labels; int nRows; int nCols; int nElements; double truthSilhouetteScore = 0; double computedSilhouetteScore = 0; double batchedSilhouetteScore = 0; int chunk; }; // setting test parameter values const std::vector<silhouetteScoreParam> inputs = { {4, 2, 3, cuvs::distance::DistanceType::L2Expanded, 4, 0.00001}, {4, 2, 2, cuvs::distance::DistanceType::L2SqrtUnexpanded, 2, 0.00001}, {8, 8, 3, cuvs::distance::DistanceType::L2Unexpanded, 4, 0.00001}, {11, 2, 5, cuvs::distance::DistanceType::L2Expanded, 3, 0.00001}, {40, 2, 8, cuvs::distance::DistanceType::L2Expanded, 10, 0.00001}, {12, 7, 3, cuvs::distance::DistanceType::CosineExpanded, 8, 0.00001}, {7, 5, 5, cuvs::distance::DistanceType::L1, 2, 0.00001}}; // writing the test suite typedef silhouetteScoreTest<int, double> silhouetteScoreTestClass; TEST_P(silhouetteScoreTestClass, Result) { ASSERT_NEAR(computedSilhouetteScore, truthSilhouetteScore, params.tolerance); ASSERT_NEAR(batchedSilhouetteScore, truthSilhouetteScore, params.tolerance); } INSTANTIATE_TEST_CASE_P(silhouetteScore, silhouetteScoreTestClass, ::testing::ValuesIn(inputs)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/histogram.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/interruptible.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/random/rng.cuh> #include <raft/stats/histogram.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> namespace raft { namespace stats { // Note: this kernel also updates the input vector to take care of OOB bins! RAFT_KERNEL naiveHistKernel(int* bins, int nbins, int* in, int nrows) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; auto offset = blockIdx.y * nrows; auto binOffset = blockIdx.y * nbins; for (; tid < nrows; tid += stride) { int id = in[offset + tid]; if (id < 0) id = 0; else if (id >= nbins) id = nbins - 1; in[offset + tid] = id; raft::myAtomicAdd(bins + binOffset + id, 1); } } void naiveHist(int* bins, int nbins, int* in, int nrows, int ncols, cudaStream_t stream) { const int TPB = 128; int nblksx = raft::ceildiv(nrows, TPB); dim3 blks(nblksx, ncols); naiveHistKernel<<<blks, TPB, 0, stream>>>(bins, nbins, in, nrows); RAFT_CUDA_TRY(cudaGetLastError()); } struct HistInputs { int nrows, ncols, nbins; bool isNormal; HistType type; int start, end; unsigned long long int seed; }; class HistTest : public ::testing::TestWithParam<HistInputs> { protected: HistTest() : in(0, resource::get_cuda_stream(handle)), bins(0, resource::get_cuda_stream(handle)), ref_bins(0, resource::get_cuda_stream(handle)) { } void SetUp() override { params = ::testing::TestWithParam<HistInputs>::GetParam(); raft::random::RngState r(params.seed); auto stream = resource::get_cuda_stream(handle); int len = params.nrows * params.ncols; in.resize(len, stream); if (params.isNormal) { normalInt(handle, r, in.data(), len, params.start, params.end); } else { uniformInt(handle, r, in.data(), len, params.start, params.end); } bins.resize(params.nbins * params.ncols, stream); ref_bins.resize(params.nbins * params.ncols, stream); RAFT_CUDA_TRY( cudaMemsetAsync(ref_bins.data(), 0, sizeof(int) * params.nbins * params.ncols, stream)); naiveHist(ref_bins.data(), params.nbins, in.data(), params.nrows, params.ncols, stream); histogram(handle, params.type, raft::make_device_matrix_view<const int, int, raft::col_major>( in.data(), params.nrows, params.ncols), raft::make_device_matrix_view<int, int, raft::col_major>( bins.data(), params.nbins, params.ncols)); resource::sync_stream(handle); } protected: raft::resources handle; HistInputs params; rmm::device_uvector<int> in, bins, ref_bins; }; class HistMdspanTest : public ::testing::TestWithParam<HistInputs> { protected: HistMdspanTest() : in(0, resource::get_cuda_stream(handle)), bins(0, resource::get_cuda_stream(handle)), ref_bins(0, resource::get_cuda_stream(handle)) { } void SetUp() override { params = ::testing::TestWithParam<HistInputs>::GetParam(); raft::random::RngState r(params.seed); auto stream = resource::get_cuda_stream(handle); int len = params.nrows * params.ncols; in.resize(len, stream); raft::device_vector_view<int, int> in_view(in.data(), in.size()); if (params.isNormal) { normalInt(handle, r, in_view, params.start, params.end); } else { uniformInt(handle, r, in_view, params.start, params.end); } bins.resize(params.nbins * params.ncols, stream); ref_bins.resize(params.nbins * params.ncols, stream); RAFT_CUDA_TRY( cudaMemsetAsync(ref_bins.data(), 0, sizeof(int) * params.nbins * params.ncols, stream)); naiveHist(ref_bins.data(), params.nbins, in.data(), params.nrows, params.ncols, stream); histogram<int>( params.type, bins.data(), params.nbins, in.data(), params.nrows, params.ncols, stream); resource::sync_stream(handle); } protected: raft::resources handle; HistInputs params; rmm::device_uvector<int> in, bins, ref_bins; }; static const int oneK = 1024; static const int oneM = oneK * oneK; const std::vector<HistInputs> inputs = { {oneM, 1, 2 * oneM, false, HistTypeGmem, 0, 2 * oneM, 1234ULL}, {oneM, 1, 2 * oneM, true, HistTypeGmem, 1000, 50, 1234ULL}, {oneM + 1, 1, 2 * oneM, false, HistTypeGmem, 0, 2 * oneM, 1234ULL}, {oneM + 1, 1, 2 * oneM, true, HistTypeGmem, 1000, 50, 1234ULL}, {oneM + 2, 1, 2 * oneM, false, HistTypeGmem, 0, 2 * oneM, 1234ULL}, {oneM + 2, 1, 2 * oneM, true, HistTypeGmem, 1000, 50, 1234ULL}, {oneM, 21, 2 * oneM, false, HistTypeGmem, 0, 2 * oneM, 1234ULL}, {oneM, 21, 2 * oneM, true, HistTypeGmem, 1000, 50, 1234ULL}, {oneM + 1, 21, 2 * oneM, false, HistTypeGmem, 0, 2 * oneM, 1234ULL}, {oneM + 1, 21, 2 * oneM, true, HistTypeGmem, 1000, 50, 1234ULL}, {oneM + 2, 21, 2 * oneM, false, HistTypeGmem, 0, 2 * oneM, 1234ULL}, {oneM + 2, 21, 2 * oneM, true, HistTypeGmem, 1000, 50, 1234ULL}, {oneM, 1, 2 * oneK, false, HistTypeSmem, 0, 2 * oneK, 1234ULL}, {oneM, 1, 2 * oneK, true, HistTypeSmem, 1000, 50, 1234ULL}, {oneM + 1, 1, 2 * oneK, false, HistTypeSmem, 0, 2 * oneK, 1234ULL}, {oneM + 1, 1, 2 * oneK, true, HistTypeSmem, 1000, 50, 1234ULL}, {oneM + 2, 1, 2 * oneK, false, HistTypeSmem, 0, 2 * oneK, 1234ULL}, {oneM + 2, 1, 2 * oneK, true, HistTypeSmem, 1000, 50, 1234ULL}, {oneM, 21, 2 * oneK, false, HistTypeSmem, 0, 2 * oneK, 1234ULL}, {oneM, 21, 2 * oneK, true, HistTypeSmem, 1000, 50, 1234ULL}, {oneM + 1, 21, 2 * oneK, false, HistTypeSmem, 0, 2 * oneK, 1234ULL}, {oneM + 1, 21, 2 * oneK, true, HistTypeSmem, 1000, 50, 1234ULL}, {oneM + 2, 21, 2 * oneK, false, HistTypeSmem, 0, 2 * oneK, 1234ULL}, {oneM + 2, 21, 2 * oneK, true, HistTypeSmem, 1000, 50, 1234ULL}, {oneM, 1, 2 * oneK, false, HistTypeSmemMatchAny, 0, 2 * oneK, 1234ULL}, {oneM, 1, 2 * oneK, true, HistTypeSmemMatchAny, 1000, 50, 1234ULL}, {oneM + 1, 1, 2 * oneK, false, HistTypeSmemMatchAny, 0, 2 * oneK, 1234ULL}, {oneM + 1, 1, 2 * oneK, true, HistTypeSmemMatchAny, 1000, 50, 1234ULL}, {oneM + 2, 1, 2 * oneK, false, HistTypeSmemMatchAny, 0, 2 * oneK, 1234ULL}, {oneM + 2, 1, 2 * oneK, true, HistTypeSmemMatchAny, 1000, 50, 1234ULL}, {oneM, 21, 2 * oneK, false, HistTypeSmemMatchAny, 0, 2 * oneK, 1234ULL}, {oneM, 21, 2 * oneK, true, HistTypeSmemMatchAny, 1000, 50, 1234ULL}, {oneM + 1, 21, 2 * oneK, false, HistTypeSmemMatchAny, 0, 2 * oneK, 1234ULL}, {oneM + 1, 21, 2 * oneK, true, HistTypeSmemMatchAny, 1000, 50, 1234ULL}, {oneM + 2, 21, 2 * oneK, false, HistTypeSmemMatchAny, 0, 2 * oneK, 1234ULL}, {oneM + 2, 21, 2 * oneK, true, HistTypeSmemMatchAny, 1000, 50, 1234ULL}, {oneM, 1, 2 * oneK, false, HistTypeSmemBits16, 0, 2 * oneK, 1234ULL}, {oneM, 1, 2 * oneK, true, HistTypeSmemBits16, 1000, 50, 1234ULL}, {oneM + 1, 1, 2 * oneK, false, HistTypeSmemBits16, 0, 2 * oneK, 1234ULL}, {oneM + 1, 1, 2 * oneK, true, HistTypeSmemBits16, 1000, 50, 1234ULL}, {oneM + 2, 1, 2 * oneK, false, HistTypeSmemBits16, 0, 2 * oneK, 1234ULL}, {oneM + 2, 1, 2 * oneK, true, HistTypeSmemBits16, 1000, 50, 1234ULL}, {oneM, 21, 2 * oneK, false, HistTypeSmemBits16, 0, 2 * oneK, 1234ULL}, {oneM, 21, 2 * oneK, true, HistTypeSmemBits16, 1000, 50, 1234ULL}, {oneM + 1, 21, 2 * oneK, false, HistTypeSmemBits16, 0, 2 * oneK, 1234ULL}, {oneM + 1, 21, 2 * oneK, true, HistTypeSmemBits16, 1000, 50, 1234ULL}, {oneM + 2, 21, 2 * oneK, false, HistTypeSmemBits16, 0, 2 * oneK, 1234ULL}, {oneM + 2, 21, 2 * oneK, true, HistTypeSmemBits16, 1000, 50, 1234ULL}, {oneM, 1, 2 * oneK, false, HistTypeSmemBits8, 0, 2 * oneK, 1234ULL}, {oneM, 1, 2 * oneK, true, HistTypeSmemBits8, 1000, 50, 1234ULL}, {oneM + 1, 1, 2 * oneK, false, HistTypeSmemBits8, 0, 2 * oneK, 1234ULL}, {oneM + 1, 1, 2 * oneK, true, HistTypeSmemBits8, 1000, 50, 1234ULL}, {oneM + 2, 1, 2 * oneK, false, HistTypeSmemBits8, 0, 2 * oneK, 1234ULL}, {oneM + 2, 1, 2 * oneK, true, HistTypeSmemBits8, 1000, 50, 1234ULL}, {oneM, 21, 2 * oneK, false, HistTypeSmemBits8, 0, 2 * oneK, 1234ULL}, {oneM, 21, 2 * oneK, true, HistTypeSmemBits8, 1000, 50, 1234ULL}, {oneM + 1, 21, 2 * oneK, false, HistTypeSmemBits8, 0, 2 * oneK, 1234ULL}, {oneM + 1, 21, 2 * oneK, true, HistTypeSmemBits8, 1000, 50, 1234ULL}, {oneM + 2, 21, 2 * oneK, false, HistTypeSmemBits8, 0, 2 * oneK, 1234ULL}, {oneM + 2, 21, 2 * oneK, true, HistTypeSmemBits8, 1000, 50, 1234ULL}, {oneM, 1, 2 * oneK, false, HistTypeSmemBits4, 0, 2 * oneK, 1234ULL}, {oneM, 1, 2 * oneK, true, HistTypeSmemBits4, 1000, 50, 1234ULL}, {oneM + 1, 1, 2 * oneK, false, HistTypeSmemBits4, 0, 2 * oneK, 1234ULL}, {oneM + 1, 1, 2 * oneK, true, HistTypeSmemBits4, 1000, 50, 1234ULL}, {oneM + 2, 1, 2 * oneK, false, HistTypeSmemBits4, 0, 2 * oneK, 1234ULL}, {oneM + 2, 1, 2 * oneK, true, HistTypeSmemBits4, 1000, 50, 1234ULL}, {oneM, 21, 2 * oneK, false, HistTypeSmemBits4, 0, 2 * oneK, 1234ULL}, {oneM, 21, 2 * oneK, true, HistTypeSmemBits4, 1000, 50, 1234ULL}, {oneM + 1, 21, 2 * oneK, false, HistTypeSmemBits4, 0, 2 * oneK, 1234ULL}, {oneM + 1, 21, 2 * oneK, true, HistTypeSmemBits4, 1000, 50, 1234ULL}, {oneM + 2, 21, 2 * oneK, false, HistTypeSmemBits4, 0, 2 * oneK, 1234ULL}, {oneM + 2, 21, 2 * oneK, true, HistTypeSmemBits4, 1000, 50, 1234ULL}, {oneM, 1, 2 * oneK, false, HistTypeSmemBits2, 0, 2 * oneK, 1234ULL}, {oneM, 1, 2 * oneK, true, HistTypeSmemBits2, 1000, 50, 1234ULL}, {oneM + 1, 1, 2 * oneK, false, HistTypeSmemBits2, 0, 2 * oneK, 1234ULL}, {oneM + 1, 1, 2 * oneK, true, HistTypeSmemBits2, 1000, 50, 1234ULL}, {oneM + 2, 1, 2 * oneK, false, HistTypeSmemBits2, 0, 2 * oneK, 1234ULL}, {oneM + 2, 1, 2 * oneK, true, HistTypeSmemBits2, 1000, 50, 1234ULL}, {oneM, 21, 2 * oneK, false, HistTypeSmemBits2, 0, 2 * oneK, 1234ULL}, {oneM, 21, 2 * oneK, true, HistTypeSmemBits2, 1000, 50, 1234ULL}, {oneM + 1, 21, 2 * oneK, false, HistTypeSmemBits2, 0, 2 * oneK, 1234ULL}, {oneM + 1, 21, 2 * oneK, true, HistTypeSmemBits2, 1000, 50, 1234ULL}, {oneM + 2, 21, 2 * oneK, false, HistTypeSmemBits2, 0, 2 * oneK, 1234ULL}, {oneM + 2, 21, 2 * oneK, true, HistTypeSmemBits2, 1000, 50, 1234ULL}, {oneM, 1, 2 * oneK, false, HistTypeSmemBits1, 0, 2 * oneK, 1234ULL}, {oneM, 1, 2 * oneK, true, HistTypeSmemBits1, 1000, 50, 1234ULL}, {oneM + 1, 1, 2 * oneK, false, HistTypeSmemBits1, 0, 2 * oneK, 1234ULL}, {oneM + 1, 1, 2 * oneK, true, HistTypeSmemBits1, 1000, 50, 1234ULL}, {oneM + 2, 1, 2 * oneK, false, HistTypeSmemBits1, 0, 2 * oneK, 1234ULL}, {oneM + 2, 1, 2 * oneK, true, HistTypeSmemBits1, 1000, 50, 1234ULL}, {oneM, 21, 2 * oneK, false, HistTypeSmemBits1, 0, 2 * oneK, 1234ULL}, {oneM, 21, 2 * oneK, true, HistTypeSmemBits1, 1000, 50, 1234ULL}, {oneM + 1, 21, 2 * oneK, false, HistTypeSmemBits1, 0, 2 * oneK, 1234ULL}, {oneM + 1, 21, 2 * oneK, true, HistTypeSmemBits1, 1000, 50, 1234ULL}, {oneM + 2, 21, 2 * oneK, false, HistTypeSmemBits1, 0, 2 * oneK, 1234ULL}, {oneM + 2, 21, 2 * oneK, true, HistTypeSmemBits1, 1000, 50, 1234ULL}, {oneM, 1, 2 * oneM, false, HistTypeSmemHash, 0, 2 * oneM, 1234ULL}, {oneM, 1, 2 * oneM, true, HistTypeSmemHash, 1000, 50, 1234ULL}, {oneM + 1, 1, 2 * oneM, false, HistTypeSmemHash, 0, 2 * oneM, 1234ULL}, {oneM + 1, 1, 2 * oneM, true, HistTypeSmemHash, 1000, 50, 1234ULL}, {oneM + 2, 1, 2 * oneM, false, HistTypeSmemHash, 0, 2 * oneM, 1234ULL}, {oneM + 2, 1, 2 * oneM, true, HistTypeSmemHash, 1000, 50, 1234ULL}, {oneM, 1, 2 * oneK, false, HistTypeSmemHash, 0, 2 * oneK, 1234ULL}, {oneM, 1, 2 * oneK, true, HistTypeSmemHash, 1000, 50, 1234ULL}, {oneM + 1, 1, 2 * oneK, false, HistTypeSmemHash, 0, 2 * oneK, 1234ULL}, {oneM + 1, 1, 2 * oneK, true, HistTypeSmemHash, 1000, 50, 1234ULL}, {oneM + 2, 1, 2 * oneK, false, HistTypeSmemHash, 0, 2 * oneK, 1234ULL}, {oneM + 2, 1, 2 * oneK, true, HistTypeSmemHash, 1000, 50, 1234ULL}, {oneM, 21, 2 * oneM, false, HistTypeSmemHash, 0, 2 * oneM, 1234ULL}, {oneM, 21, 2 * oneM, true, HistTypeSmemHash, 1000, 50, 1234ULL}, {oneM + 1, 21, 2 * oneM, false, HistTypeSmemHash, 0, 2 * oneM, 1234ULL}, {oneM + 1, 21, 2 * oneM, true, HistTypeSmemHash, 1000, 50, 1234ULL}, {oneM + 2, 21, 2 * oneM, false, HistTypeSmemHash, 0, 2 * oneM, 1234ULL}, {oneM + 2, 21, 2 * oneM, true, HistTypeSmemHash, 1000, 50, 1234ULL}, {oneM, 21, 2 * oneK, false, HistTypeSmemHash, 0, 2 * oneK, 1234ULL}, {oneM, 21, 2 * oneK, true, HistTypeSmemHash, 1000, 50, 1234ULL}, {oneM + 1, 21, 2 * oneK, false, HistTypeSmemHash, 0, 2 * oneK, 1234ULL}, {oneM + 1, 21, 2 * oneK, true, HistTypeSmemHash, 1000, 50, 1234ULL}, {oneM + 2, 21, 2 * oneK, false, HistTypeSmemHash, 0, 2 * oneK, 1234ULL}, {oneM + 2, 21, 2 * oneK, true, HistTypeSmemHash, 1000, 50, 1234ULL}, {oneM, 1, 2 * oneM, false, HistTypeAuto, 0, 2 * oneM, 1234ULL}, {oneM, 1, 2 * oneM, true, HistTypeAuto, 1000, 50, 1234ULL}, {oneM + 1, 1, 2 * oneM, false, HistTypeAuto, 0, 2 * oneM, 1234ULL}, {oneM + 1, 1, 2 * oneM, true, HistTypeAuto, 1000, 50, 1234ULL}, {oneM + 2, 1, 2 * oneM, false, HistTypeAuto, 0, 2 * oneM, 1234ULL}, {oneM + 2, 1, 2 * oneM, true, HistTypeAuto, 1000, 50, 1234ULL}, {oneM, 1, 2 * oneK, false, HistTypeAuto, 0, 2 * oneK, 1234ULL}, {oneM, 1, 2 * oneK, true, HistTypeAuto, 1000, 50, 1234ULL}, {oneM + 1, 1, 2 * oneK, false, HistTypeAuto, 0, 2 * oneK, 1234ULL}, {oneM + 1, 1, 2 * oneK, true, HistTypeAuto, 1000, 50, 1234ULL}, {oneM + 2, 1, 2 * oneK, false, HistTypeAuto, 0, 2 * oneK, 1234ULL}, {oneM + 2, 1, 2 * oneK, true, HistTypeAuto, 1000, 50, 1234ULL}, {oneM, 21, 2 * oneM, false, HistTypeAuto, 0, 2 * oneM, 1234ULL}, {oneM, 21, 2 * oneM, true, HistTypeAuto, 1000, 50, 1234ULL}, {oneM + 1, 21, 2 * oneM, false, HistTypeAuto, 0, 2 * oneM, 1234ULL}, {oneM + 1, 21, 2 * oneM, true, HistTypeAuto, 1000, 50, 1234ULL}, {oneM + 2, 21, 2 * oneM, false, HistTypeAuto, 0, 2 * oneM, 1234ULL}, {oneM + 2, 21, 2 * oneM, true, HistTypeAuto, 1000, 50, 1234ULL}, {oneM, 21, 2 * oneK, false, HistTypeAuto, 0, 2 * oneK, 1234ULL}, {oneM, 21, 2 * oneK, true, HistTypeAuto, 1000, 50, 1234ULL}, {oneM + 1, 21, 2 * oneK, false, HistTypeAuto, 0, 2 * oneK, 1234ULL}, {oneM + 1, 21, 2 * oneK, true, HistTypeAuto, 1000, 50, 1234ULL}, {oneM + 2, 21, 2 * oneK, false, HistTypeAuto, 0, 2 * oneK, 1234ULL}, {oneM + 2, 21, 2 * oneK, true, HistTypeAuto, 1000, 50, 1234ULL}, }; TEST_P(HistTest, Result) { ASSERT_TRUE(raft::devArrMatch( ref_bins.data(), bins.data(), params.nbins * params.ncols, raft::Compare<int>())); } INSTANTIATE_TEST_CASE_P(HistTests, HistTest, ::testing::ValuesIn(inputs)); TEST_P(HistMdspanTest, Result) { ASSERT_TRUE(raft::devArrMatch( ref_bins.data(), bins.data(), params.nbins * params.ncols, raft::Compare<int>())); } INSTANTIATE_TEST_CASE_P(HistMdspanTests, HistMdspanTest, ::testing::ValuesIn(inputs)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/rand_index.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <raft/core/resource/cuda_stream.hpp> #include <raft/util/cudart_utils.hpp> #include <gtest/gtest.h> #include <algorithm> #include <iostream> #include <raft/core/resources.hpp> #include <raft/stats/rand_index.cuh> #include <random> namespace raft { namespace stats { // parameter structure definition struct randIndexParam { uint64_t nElements; int lowerLabelRange; int upperLabelRange; double tolerance; }; // test fixture class template <typename T> class randIndexTest : public ::testing::TestWithParam<randIndexParam> { protected: // the constructor void SetUp() override { // getting the parameters params = ::testing::TestWithParam<randIndexParam>::GetParam(); size = params.nElements; lowerLabelRange = params.lowerLabelRange; upperLabelRange = params.upperLabelRange; // generating random value test input std::vector<int> arr1(size, 0); std::vector<int> arr2(size, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange); std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); }); std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); }); // generating the golden output int64_t a_truth = 0; int64_t b_truth = 0; for (uint64_t iter = 0; iter < size; ++iter) { for (uint64_t jiter = 0; jiter < iter; ++jiter) { if (arr1[iter] == arr1[jiter] && arr2[iter] == arr2[jiter]) { ++a_truth; } else if (arr1[iter] != arr1[jiter] && arr2[iter] != arr2[jiter]) { ++b_truth; } } } uint64_t nChooseTwo = (size * (size - 1)) / 2; truthRandIndex = (double)(((double)(a_truth + b_truth)) / (double)nChooseTwo); // allocating and initializing memory to the GPU stream = resource::get_cuda_stream(handle); rmm::device_uvector<T> firstClusterArray(size, stream); rmm::device_uvector<T> secondClusterArray(size, stream); RAFT_CUDA_TRY( cudaMemsetAsync(firstClusterArray.data(), 0, firstClusterArray.size() * sizeof(T), stream)); RAFT_CUDA_TRY( cudaMemsetAsync(secondClusterArray.data(), 0, secondClusterArray.size() * sizeof(T), stream)); raft::update_device(firstClusterArray.data(), &arr1[0], (int)size, stream); raft::update_device(secondClusterArray.data(), &arr2[0], (int)size, stream); // calling the rand_index CUDA implementation computedRandIndex = raft::stats::rand_index( handle, raft::make_device_vector_view<const T>(firstClusterArray.data(), size), raft::make_device_vector_view<const T>(secondClusterArray.data(), size)); } // declaring the data values raft::resources handle; randIndexParam params; int lowerLabelRange = 0, upperLabelRange = 2; uint64_t size = 0; double truthRandIndex = 0; double computedRandIndex = 0; cudaStream_t stream = 0; }; // setting test parameter values const std::vector<randIndexParam> inputs = {{199, 1, 10, 0.000001}, {200, 1, 100, 0.000001}, {10, 1, 1200, 0.000001}, {100, 1, 10000, 0.000001}, {198, 1, 100, 0.000001}, {300, 3, 99, 0.000001}, {2, 0, 0, 0.00001}}; // writing the test suite typedef randIndexTest<int> randIndexTestClass; TEST_P(randIndexTestClass, Result) { ASSERT_NEAR(computedRandIndex, truthRandIndex, params.tolerance); } INSTANTIATE_TEST_CASE_P(randIndex, randIndexTestClass, ::testing::ValuesIn(inputs)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/stats/mutual_info_score.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <algorithm> #include <gtest/gtest.h> #include <iostream> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/stats/mutual_info_score.cuh> #include <raft/util/cudart_utils.hpp> #include <random> namespace raft { namespace stats { // parameter structure definition struct mutualInfoParam { int nElements; int lowerLabelRange; int upperLabelRange; bool sameArrays; double tolerance; }; // test fixture class template <typename T> class mutualInfoTest : public ::testing::TestWithParam<mutualInfoParam> { protected: // the constructor void SetUp() override { // getting the parameters params = ::testing::TestWithParam<mutualInfoParam>::GetParam(); nElements = params.nElements; lowerLabelRange = params.lowerLabelRange; upperLabelRange = params.upperLabelRange; // generating random value test input std::vector<int> arr1(nElements, 0); std::vector<int> arr2(nElements, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange); std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); }); if (params.sameArrays) { arr2 = arr1; } else { std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); }); } // generating the golden output // calculating the contingency matrix int numUniqueClasses = upperLabelRange - lowerLabelRange + 1; size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int); int* hGoldenOutput = (int*)malloc(sizeOfMat); memset(hGoldenOutput, 0, sizeOfMat); int i, j; for (i = 0; i < nElements; i++) { int row = arr1[i] - lowerLabelRange; int column = arr2[i] - lowerLabelRange; hGoldenOutput[row * numUniqueClasses + column] += 1; } int* a = (int*)malloc(numUniqueClasses * sizeof(int)); int* b = (int*)malloc(numUniqueClasses * sizeof(int)); memset(a, 0, numUniqueClasses * sizeof(int)); memset(b, 0, numUniqueClasses * sizeof(int)); // and also the reducing contingency matrix along row and column for (i = 0; i < numUniqueClasses; ++i) { for (j = 0; j < numUniqueClasses; ++j) { a[i] += hGoldenOutput[i * numUniqueClasses + j]; b[i] += hGoldenOutput[j * numUniqueClasses + i]; } } // calculating the truth mutual information for (int i = 0; i < numUniqueClasses; ++i) { for (int j = 0; j < numUniqueClasses; ++j) { if (a[i] * b[j] != 0 && hGoldenOutput[i * numUniqueClasses + j] != 0) { truthmutualInfo += (double)(hGoldenOutput[i * numUniqueClasses + j]) * (log((double)(double(nElements) * hGoldenOutput[i * numUniqueClasses + j])) - log((double)(a[i] * b[j]))); } } } truthmutualInfo /= nElements; // allocating and initializing memory to the GPU stream = resource::get_cuda_stream(handle); rmm::device_uvector<T> firstClusterArray(nElements, stream); rmm::device_uvector<T> secondClusterArray(nElements, stream); RAFT_CUDA_TRY( cudaMemsetAsync(firstClusterArray.data(), 0, firstClusterArray.size() * sizeof(T), stream)); RAFT_CUDA_TRY( cudaMemsetAsync(secondClusterArray.data(), 0, secondClusterArray.size() * sizeof(T), stream)); raft::update_device(firstClusterArray.data(), &arr1[0], (int)nElements, stream); raft::update_device(secondClusterArray.data(), &arr2[0], (int)nElements, stream); // calling the mutualInfo CUDA implementation computedmutualInfo = raft::stats::mutual_info_score( handle, raft::make_device_vector_view<const T>(firstClusterArray.data(), nElements), raft::make_device_vector_view<const T>(secondClusterArray.data(), nElements), lowerLabelRange, upperLabelRange); } // declaring the data values raft::resources handle; mutualInfoParam params; T lowerLabelRange, upperLabelRange; int nElements = 0; double truthmutualInfo = 0; double computedmutualInfo = 0; cudaStream_t stream = 0; }; // setting test parameter values const std::vector<mutualInfoParam> inputs = {{199, 1, 10, false, 0.000001}, {200, 15, 100, false, 0.000001}, {100, 1, 20, false, 0.000001}, {10, 1, 10, false, 0.000001}, {198, 1, 100, false, 0.000001}, {300, 3, 99, false, 0.000001}, {199, 1, 10, true, 0.000001}, {200, 15, 100, true, 0.000001}, {100, 1, 20, true, 0.000001}, {10, 1, 10, true, 0.000001}, {198, 1, 100, true, 0.000001}, {300, 3, 99, true, 0.000001}}; // writing the test suite typedef mutualInfoTest<int> mutualInfoTestClass; TEST_P(mutualInfoTestClass, Result) { ASSERT_NEAR(computedmutualInfo, truthmutualInfo, params.tolerance); } INSTANTIATE_TEST_CASE_P(mutualInfo, mutualInfoTestClass, ::testing::ValuesIn(inputs)); } // end namespace stats } // end namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/cluster/kmeans_find_k.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.h" #include <gtest/gtest.h> #include <optional> #include <raft/core/resource/cuda_stream.hpp> #include <vector> #include <cuvs/cluster/kmeans.cuh> #include <raft/core/cudart_utils.hpp> #include <raft/core/resources.hpp> #include <raft/random/make_blobs.cuh> #include <raft/util/cuda_utils.cuh> namespace cuvs { template <typename T> struct KmeansFindKInputs { int n_row; int n_col; int n_clusters; T tol; bool weighted; }; template <typename T> class KmeansFindKTest : public ::testing::TestWithParam<KmeansFindKInputs<T>> { protected: KmeansFindKTest() : stream(resource::get_cuda_stream(handle)), best_k(raft::make_host_scalar<int>(0)) { } void basicTest() { testparams = ::testing::TestWithParam<KmeansFindKInputs<T>>::GetParam(); int n_samples = testparams.n_row; int n_features = testparams.n_col; int n_clusters = testparams.n_clusters; auto X = raft::make_device_matrix<T, int>(handle, n_samples, n_features); auto labels = raft::make_device_vector<int, int>(handle, n_samples); raft::random::make_blobs<T, int>(X.data_handle(), labels.data_handle(), n_samples, n_features, n_clusters, stream, true, nullptr, nullptr, T(.001), false, (T)-10.0f, (T)10.0f, (uint64_t)1234); auto inertia = raft::make_host_scalar<T>(0); auto n_iter = raft::make_host_scalar<int>(0); auto X_view = raft::make_device_matrix_view<const T, int>(X.data_handle(), X.extent(0), X.extent(1)); cuvs::cluster::kmeans::find_k<int, T>( handle, X_view, best_k.view(), inertia.view(), n_iter.view(), n_clusters); resource::sync_stream(handle, stream); } void SetUp() override { basicTest(); } protected: raft::resources handle; cudaStream_t stream; KmeansFindKInputs<T> testparams; raft::host_scalar<int> best_k; }; const std::vector<KmeansFindKInputs<float>> inputsf2 = {{1000, 32, 8, 0.001f, true}, {1000, 32, 8, 0.001f, false}, {1000, 100, 20, 0.001f, true}, {1000, 100, 20, 0.001f, false}, {10000, 32, 10, 0.001f, true}, {10000, 32, 10, 0.001f, false}, {10000, 100, 50, 0.001f, true}, {10000, 100, 50, 0.001f, false}, {10000, 500, 100, 0.001f, true}, {10000, 500, 100, 0.001f, false}}; const std::vector<KmeansFindKInputs<double>> inputsd2 = {{1000, 32, 5, 0.0001, true}, {1000, 32, 5, 0.0001, false}, {1000, 100, 20, 0.0001, true}, {1000, 100, 20, 0.0001, false}, {10000, 32, 10, 0.0001, true}, {10000, 32, 10, 0.0001, false}, {10000, 100, 50, 0.0001, true}, {10000, 100, 50, 0.0001, false}, {10000, 500, 100, 0.0001, true}, {10000, 500, 100, 0.0001, false}}; typedef KmeansFindKTest<float> KmeansFindKTestF; TEST_P(KmeansFindKTestF, Result) { if (best_k.view()[0] != testparams.n_clusters) { std::cout << best_k.view()[0] << " " << testparams.n_clusters << std::endl; } ASSERT_TRUE(best_k.view()[0] == testparams.n_clusters); } typedef KmeansFindKTest<double> KmeansFindKTestD; TEST_P(KmeansFindKTestD, Result) { if (best_k.view()[0] != testparams.n_clusters) { std::cout << best_k.view()[0] << " " << testparams.n_clusters << std::endl; } ASSERT_TRUE(best_k.view()[0] == testparams.n_clusters); } INSTANTIATE_TEST_CASE_P(KmeansFindKTests, KmeansFindKTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(KmeansFindKTests, KmeansFindKTestD, ::testing::ValuesIn(inputsd2)); } // namespace cuvs
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/cluster/linkage.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // XXX: We allow the instantiation of masked_l2_nn here: // raft::linkage::FixConnectivitiesRedOp<value_idx, value_t> red_op(params.n_row); // raft::linkage::cross_component_nn<value_idx, value_t>( // handle, out_edges, data.data(), colors.data(), params.n_row, params.n_col, red_op); // // TODO: consider adding this to libraft.so or creating an instance in a // separate translation unit for this test. #undef RAFT_EXPLICIT_INSTANTIATE_ONLY #include "../test_utils.cuh" #include <raft/core/resource/cuda_stream.hpp> #include <cuvs/distance/distance_types.hpp> #include <raft/linalg/transpose.cuh> #include <raft/sparse/coo.hpp> #include <cuvs/cluster/single_linkage.cuh> #include <raft/core/device_mdspan.hpp> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <gtest/gtest.h> #include <vector> namespace cuvs { using namespace std; template <typename T, typename IdxT> struct LinkageInputs { IdxT n_row; IdxT n_col; std::vector<T> data; std::vector<IdxT> expected_labels; int n_clusters; bool use_knn; int c; }; /** * @brief kernel to calculate the values of a and b * @param firstClusterArray: the array of classes of type T * @param secondClusterArray: the array of classes of type T * @param size: the size of the data points * @param a: number of pairs of points that both the clusters have classified the same * @param b: number of pairs of points that both the clusters have classified differently */ template <typename T, int BLOCK_DIM_X, int BLOCK_DIM_Y> RAFT_KERNEL computeTheNumerator( const T* firstClusterArray, const T* secondClusterArray, uint64_t size, uint64_t* a, uint64_t* b) { // calculating the indices of pairs of datapoints compared by the current thread uint64_t j = threadIdx.x + blockIdx.x * blockDim.x; uint64_t i = threadIdx.y + blockIdx.y * blockDim.y; // thread-local variables to count a and b uint64_t myA = 0, myB = 0; if (i < size && j < size && j < i) { // checking if the pair have been classified the same by both the clusters if (firstClusterArray[i] == firstClusterArray[j] && secondClusterArray[i] == secondClusterArray[j]) { ++myA; } // checking if the pair have been classified differently by both the clusters else if (firstClusterArray[i] != firstClusterArray[j] && secondClusterArray[i] != secondClusterArray[j]) { ++myB; } } // specialize blockReduce for a 2D block of 1024 threads of type uint64_t typedef cub::BlockReduce<uint64_t, BLOCK_DIM_X, cub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y> BlockReduce; // Allocate shared memory for blockReduce __shared__ typename BlockReduce::TempStorage temp_storage; // summing up thread-local counts specific to a block myA = BlockReduce(temp_storage).Sum(myA); __syncthreads(); myB = BlockReduce(temp_storage).Sum(myB); __syncthreads(); // executed once per block if (threadIdx.x == 0 && threadIdx.y == 0) { raft::myAtomicAdd<unsigned long long int>((unsigned long long int*)a, myA); raft::myAtomicAdd<unsigned long long int>((unsigned long long int*)b, myB); } } /** * @brief Function to calculate RandIndex * <a href="https://en.wikipedia.org/wiki/Rand_index">more info on rand index</a> * @param firstClusterArray: the array of classes of type T * @param secondClusterArray: the array of classes of type T * @param size: the size of the data points of type uint64_t * @param stream: the cudaStream object */ template <typename T> double compute_rand_index(T* firstClusterArray, T* secondClusterArray, uint64_t size, cudaStream_t stream) { // rand index for size less than 2 is not defined ASSERT(size >= 2, "Rand Index for size less than 2 not defined!"); // allocating and initializing memory for a and b in the GPU rmm::device_uvector<uint64_t> arr_buf(2, stream); RAFT_CUDA_TRY(cudaMemsetAsync(arr_buf.data(), 0, 2 * sizeof(uint64_t), stream)); // kernel configuration static const int BLOCK_DIM_Y = 16, BLOCK_DIM_X = 16; dim3 numThreadsPerBlock(BLOCK_DIM_X, BLOCK_DIM_Y); dim3 numBlocks(raft::ceildiv<int>(size, numThreadsPerBlock.x), raft::ceildiv<int>(size, numThreadsPerBlock.y)); // calling the kernel computeTheNumerator<T, BLOCK_DIM_X, BLOCK_DIM_Y><<<numBlocks, numThreadsPerBlock, 0, stream>>>( firstClusterArray, secondClusterArray, size, arr_buf.data(), arr_buf.data() + 1); // synchronizing and updating the calculated values of a and b from device to host uint64_t ab_host[2] = {0}; raft::update_host(ab_host, arr_buf.data(), 2, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); // error handling RAFT_CUDA_TRY(cudaGetLastError()); // denominator uint64_t nChooseTwo = size * (size - 1) / 2; // calculating the rand_index return (double)(((double)(ab_host[0] + ab_host[1])) / (double)nChooseTwo); } template <typename T, typename IdxT> ::std::ostream& operator<<(::std::ostream& os, const LinkageInputs<T, IdxT>& dims) { return os; } template <typename T, typename IdxT> class LinkageTest : public ::testing::TestWithParam<LinkageInputs<T, IdxT>> { public: LinkageTest() : params(::testing::TestWithParam<LinkageInputs<T, IdxT>>::GetParam()), labels(0, resource::get_cuda_stream(handle)), labels_ref(0, resource::get_cuda_stream(handle)) { } protected: void basicTest() { auto stream = resource::get_cuda_stream(handle); labels.resize(params.n_row, stream); labels_ref.resize(params.n_row, stream); rmm::device_uvector<T> data(params.n_row * params.n_col, stream); raft::copy(data.data(), params.data.data(), data.size(), stream); raft::copy(labels_ref.data(), params.expected_labels.data(), params.n_row, stream); rmm::device_uvector<IdxT> out_children(params.n_row * 2, stream); auto data_view = raft::make_device_matrix_view<const T, IdxT, row_major>( data.data(), params.n_row, params.n_col); auto dendrogram_view = raft::make_device_matrix_view<IdxT, IdxT, row_major>(out_children.data(), params.n_row, 2); auto labels_view = raft::make_device_vector_view<IdxT, IdxT>(labels.data(), params.n_row); if (params.use_knn) { cuvs::cluster::hierarchy:: single_linkage<T, IdxT, cuvs::cluster::hierarchy::LinkageDistance::KNN_GRAPH>( handle, data_view, dendrogram_view, labels_view, cuvs::distance::DistanceType::L2SqrtExpanded, params.n_clusters, std::make_optional<int>(params.c)); } else { cuvs::cluster::hierarchy:: single_linkage<T, IdxT, cuvs::cluster::hierarchy::LinkageDistance::PAIRWISE>( handle, data_view, dendrogram_view, labels_view, cuvs::distance::DistanceType::L2SqrtExpanded, params.n_clusters, std::make_optional<int>(params.c)); } resource::sync_stream(handle, stream); score = compute_rand_index(labels.data(), labels_ref.data(), params.n_row, stream); } void SetUp() override { basicTest(); } protected: raft::resources handle; LinkageInputs<T, IdxT> params; rmm::device_uvector<IdxT> labels, labels_ref; double score; }; const std::vector<LinkageInputs<float, int>> linkage_inputsf2 = { // Test n_clusters == n_points {10, 5, {0.21390334, 0.50261639, 0.91036676, 0.59166485, 0.71162682, 0.10248392, 0.77782677, 0.43772379, 0.4035871, 0.3282796, 0.47544681, 0.59862974, 0.12319357, 0.06239463, 0.28200272, 0.1345717, 0.50498218, 0.5113505, 0.16233086, 0.62165332, 0.42281548, 0.933117, 0.41386077, 0.23264562, 0.73325968, 0.37537541, 0.70719873, 0.14522645, 0.73279625, 0.9126674, 0.84854131, 0.28890216, 0.85267903, 0.74703138, 0.83842071, 0.34942792, 0.27864171, 0.70911132, 0.21338564, 0.32035554, 0.73788331, 0.46926692, 0.57570162, 0.42559178, 0.87120209, 0.22734951, 0.01847905, 0.75549396, 0.76166195, 0.66613745}, {9, 8, 7, 6, 5, 4, 3, 2, 1, 0}, 10, true, -1}, // // Test outlier points {9, 2, {-1, -50, 3, 4, 5000, 10000, 1, 3, 4, 5, 0.000005, 0.00002, 2000000, 500000, 10, 50, 30, 5}, {6, 0, 5, 0, 0, 4, 3, 2, 1}, 7, true, -1}, // Test n_clusters == (n_points / 2) {10, 5, {0.21390334, 0.50261639, 0.91036676, 0.59166485, 0.71162682, 0.10248392, 0.77782677, 0.43772379, 0.4035871, 0.3282796, 0.47544681, 0.59862974, 0.12319357, 0.06239463, 0.28200272, 0.1345717, 0.50498218, 0.5113505, 0.16233086, 0.62165332, 0.42281548, 0.933117, 0.41386077, 0.23264562, 0.73325968, 0.37537541, 0.70719873, 0.14522645, 0.73279625, 0.9126674, 0.84854131, 0.28890216, 0.85267903, 0.74703138, 0.83842071, 0.34942792, 0.27864171, 0.70911132, 0.21338564, 0.32035554, 0.73788331, 0.46926692, 0.57570162, 0.42559178, 0.87120209, 0.22734951, 0.01847905, 0.75549396, 0.76166195, 0.66613745}, {1, 0, 4, 0, 0, 3, 2, 0, 2, 1}, 5, true, -1}, // Test n_points == 100 {100, 10, {6.26168372e-01, 9.30437651e-01, 6.02450208e-01, 2.73025296e-01, 9.53050619e-01, 3.32164396e-01, 6.88942598e-01, 5.79163537e-01, 6.70341547e-01, 2.70140602e-02, 9.30429671e-01, 7.17721157e-01, 9.89948537e-01, 7.75253347e-01, 1.34491522e-02, 2.48522428e-02, 3.51413378e-01, 7.64405834e-01, 7.86373507e-01, 7.18748577e-01, 8.66998621e-01, 6.80316582e-01, 2.51288712e-01, 4.91078420e-01, 3.76246281e-01, 4.86828710e-01, 5.67464772e-01, 5.30734742e-01, 8.99478296e-01, 7.66699088e-01, 9.49339111e-01, 3.55248484e-01, 9.06046929e-01, 4.48407772e-01, 6.96395305e-01, 2.44277335e-01, 7.74840000e-01, 5.21046603e-01, 4.66423971e-02, 5.12019638e-02, 8.95019614e-01, 5.28956953e-01, 4.31536306e-01, 5.83857744e-01, 4.41787364e-01, 4.68656523e-01, 5.73971433e-01, 6.79989654e-01, 3.19650588e-01, 6.12579596e-01, 6.49126442e-02, 8.39131142e-01, 2.85252117e-01, 5.84848929e-01, 9.46507115e-01, 8.58440748e-01, 3.61528940e-01, 2.44215959e-01, 3.80101125e-01, 4.57128957e-02, 8.82216988e-01, 8.31498633e-01, 7.23474381e-01, 7.75788607e-01, 1.40864146e-01, 6.62092382e-01, 5.13985168e-01, 3.00686418e-01, 8.70109949e-01, 2.43187753e-01, 2.89391938e-01, 2.84214238e-01, 8.70985521e-01, 8.77491176e-01, 6.72537226e-01, 3.30929686e-01, 1.85934324e-01, 9.16222614e-01, 6.18239142e-01, 2.64768597e-01, 5.76145451e-01, 8.62961369e-01, 6.84757925e-01, 7.60549082e-01, 1.27645356e-01, 4.51004673e-01, 3.92292980e-01, 4.63170803e-01, 4.35449330e-02, 2.17583404e-01, 5.71832605e-02, 2.06763039e-01, 3.70116249e-01, 2.09750028e-01, 6.17283019e-01, 8.62549231e-01, 9.84156240e-02, 2.66249156e-01, 3.87635103e-01, 2.85591012e-02, 4.24826068e-01, 4.45795088e-01, 6.86227676e-01, 1.08848960e-01, 5.96731841e-02, 3.71770228e-01, 1.91548833e-01, 6.95136078e-01, 9.00700636e-01, 8.76363105e-01, 2.67334632e-01, 1.80619709e-01, 7.94060419e-01, 1.42854171e-02, 1.09372387e-01, 8.74028108e-01, 6.46403232e-01, 4.86588834e-01, 5.93446175e-02, 6.11886291e-01, 8.83865057e-01, 3.15879821e-01, 2.27043992e-01, 9.76764951e-01, 6.15620336e-01, 9.76199360e-01, 2.40548962e-01, 3.21795663e-01, 8.75087904e-02, 8.11234663e-01, 6.96070480e-01, 8.12062321e-01, 1.21958818e-01, 3.44348628e-02, 8.72630414e-01, 3.06162776e-01, 1.76043529e-02, 9.45894971e-01, 5.33896401e-01, 6.21642973e-01, 4.93062535e-01, 4.48984262e-01, 2.24560379e-01, 4.24052195e-02, 4.43447610e-01, 8.95646149e-01, 6.05220676e-01, 1.81840491e-01, 9.70831206e-01, 2.12563586e-02, 6.92582693e-01, 7.55946922e-01, 7.95086143e-01, 6.05328941e-01, 3.99350764e-01, 4.32846636e-01, 9.81114529e-01, 4.98266428e-01, 6.37127930e-03, 1.59085889e-01, 6.34682067e-05, 5.59429440e-01, 7.38827633e-01, 8.93214770e-01, 2.16494306e-01, 9.35430573e-02, 4.75665868e-02, 7.80503518e-01, 7.86240041e-01, 7.06854594e-01, 2.13725879e-02, 7.68246091e-01, 4.50234808e-01, 5.21231104e-01, 5.01989826e-03, 4.22081572e-02, 1.65337732e-01, 8.54134740e-01, 4.99430262e-01, 8.94525601e-01, 1.14028379e-01, 3.69739861e-01, 1.32955599e-01, 2.65563824e-01, 2.52811151e-01, 1.44792843e-01, 6.88449594e-01, 4.44921417e-01, 8.23296587e-01, 1.93266317e-01, 1.19033309e-01, 1.36368966e-01, 3.42600285e-01, 5.64505195e-01, 5.57594559e-01, 7.44257892e-01, 8.38231569e-02, 4.11548847e-01, 3.21010077e-01, 8.55081359e-01, 4.30105779e-01, 1.16229135e-01, 9.87731964e-02, 3.14712335e-01, 4.50880592e-01, 2.72289598e-01, 6.31615256e-01, 8.97432958e-01, 4.44764250e-01, 8.03776440e-01, 2.68767748e-02, 2.43374608e-01, 4.02141103e-01, 4.98881209e-01, 5.33173003e-01, 8.82890436e-01, 7.16149148e-01, 4.19664401e-01, 2.29335357e-01, 2.88637806e-01, 3.44696803e-01, 6.78171906e-01, 5.69849716e-01, 5.86454477e-01, 3.54474989e-01, 9.03876540e-01, 6.45980000e-01, 6.34887593e-01, 7.88039746e-02, 2.04814126e-01, 7.82251754e-01, 2.43147074e-01, 7.50951808e-01, 1.72799092e-02, 2.95349590e-01, 6.57991826e-01, 8.81214312e-01, 5.73970708e-01, 2.77610881e-01, 1.82155097e-01, 7.69797417e-02, 6.44792402e-01, 9.46950998e-01, 7.73064845e-01, 6.04733624e-01, 5.80094567e-01, 1.67498426e-01, 2.66514296e-01, 6.50140368e-01, 1.91170299e-01, 2.08752199e-01, 3.01664091e-01, 9.85033484e-01, 2.92909152e-01, 8.65816607e-01, 1.85222119e-01, 2.28814559e-01, 1.34286382e-02, 2.89234322e-01, 8.18668708e-01, 4.71706924e-01, 9.23199803e-01, 2.80879188e-01, 1.47319284e-01, 4.13915748e-01, 9.31274932e-02, 6.66322195e-01, 9.66953974e-01, 3.19405786e-01, 6.69486551e-01, 5.03096313e-02, 6.95225201e-01, 5.78469859e-01, 6.29481655e-01, 1.39252534e-01, 1.22564968e-01, 6.80663678e-01, 6.34607157e-01, 6.42765834e-01, 1.57127410e-02, 2.92132086e-01, 5.24423878e-01, 4.68676824e-01, 2.86003928e-01, 7.18608322e-01, 8.95617933e-01, 5.48844309e-01, 1.74517278e-01, 5.24379196e-01, 2.13526524e-01, 5.88375435e-01, 9.88560185e-01, 4.17435771e-01, 6.14438688e-01, 9.53760881e-01, 5.27151288e-01, 7.03017278e-01, 3.44448559e-01, 4.47059676e-01, 2.83414901e-01, 1.98979011e-01, 4.24917361e-01, 5.73172761e-01, 2.32398853e-02, 1.65887230e-01, 4.05552785e-01, 9.29665524e-01, 2.26135696e-01, 9.20563384e-01, 7.65259963e-01, 4.54820075e-01, 8.97710267e-01, 3.78559302e-03, 9.15219382e-01, 3.55705698e-01, 6.94905124e-01, 8.58540202e-01, 3.89790666e-01, 2.49478206e-01, 7.93679304e-01, 4.75830027e-01, 4.40425353e-01, 3.70579459e-01, 1.40578049e-01, 1.70386675e-01, 7.04056121e-01, 4.85963102e-01, 9.68450060e-01, 6.77178001e-01, 2.65934654e-01, 2.58915007e-01, 6.70052890e-01, 2.61945109e-01, 8.46207759e-01, 1.01928951e-01, 2.85611334e-01, 2.45776933e-01, 2.66658783e-01, 3.71724077e-01, 4.34319025e-01, 4.24407347e-01, 7.15417683e-01, 8.07997684e-01, 1.64296275e-01, 6.01638065e-01, 8.60606804e-02, 2.68719187e-01, 5.11764101e-01, 9.75844338e-01, 7.81226782e-01, 2.20925515e-01, 7.18135040e-01, 9.82395577e-01, 8.39160243e-01, 9.08058083e-01, 6.88010677e-01, 8.14271847e-01, 5.12460821e-01, 1.17311345e-01, 5.96075228e-01, 9.17455497e-01, 2.12052706e-01, 7.04074603e-01, 8.72872565e-02, 8.76047818e-01, 6.96235046e-01, 8.54801557e-01, 2.49729159e-01, 9.76594604e-01, 2.87386363e-01, 2.36461559e-02, 9.94075254e-01, 4.25193986e-01, 7.61869994e-01, 5.13334255e-01, 6.44711165e-02, 8.92156689e-01, 3.55235167e-01, 1.08154647e-01, 8.78446825e-01, 2.43833016e-01, 9.23071293e-01, 2.72724115e-01, 9.46631338e-01, 3.74510294e-01, 4.08451278e-02, 9.78392777e-01, 3.65079221e-01, 6.37199516e-01, 5.51144906e-01, 5.25978080e-01, 1.42803678e-01, 4.05451674e-01, 7.79788219e-01, 6.26009784e-01, 3.35249497e-01, 1.43159543e-02, 1.80363779e-01, 5.05096904e-01, 2.82619947e-01, 5.83561392e-01, 3.10951324e-01, 8.73223968e-01, 4.38545619e-01, 4.81348800e-01, 6.68497085e-01, 3.79345401e-01, 9.58832501e-01, 1.89869550e-01, 2.34083070e-01, 2.94066207e-01, 5.74892667e-02, 6.92106828e-02, 9.61127686e-02, 6.72650672e-02, 8.47345378e-01, 2.80916761e-01, 7.32177357e-03, 9.80785961e-01, 5.73192225e-02, 8.48781331e-01, 8.83225408e-01, 7.34398275e-01, 7.70381941e-01, 6.20778343e-01, 8.96822048e-01, 5.40732486e-01, 3.69704071e-01, 5.77305837e-01, 2.08221827e-01, 7.34275341e-01, 1.06110900e-01, 3.49496706e-01, 8.34948910e-01, 1.56403291e-02, 6.78576376e-01, 8.96141268e-01, 5.94835119e-01, 1.43943153e-01, 3.49618530e-01, 2.10440392e-01, 3.46585620e-01, 1.05153093e-01, 3.45446174e-01, 2.72177079e-01, 7.07946300e-01, 4.33717726e-02, 3.31232203e-01, 3.91874320e-01, 4.76338141e-01, 6.22777789e-01, 2.95989228e-02, 4.32855769e-01, 7.61049310e-01, 3.63279149e-01, 9.47210350e-01, 6.43721247e-01, 6.58025802e-01, 1.05247633e-02, 5.29974442e-01, 7.30675767e-01, 4.30041079e-01, 6.62634841e-01, 8.25936616e-01, 9.91253704e-01, 6.79399281e-01, 5.44177006e-01, 7.52876048e-01, 3.32139049e-01, 7.98732398e-01, 7.38865223e-01, 9.16055132e-01, 6.11736493e-01, 9.63672879e-01, 1.83778839e-01, 7.27558919e-02, 5.91602822e-01, 3.25235484e-01, 2.34741217e-01, 9.52346277e-01, 9.18556407e-01, 9.35373324e-01, 6.89209070e-01, 2.56049054e-01, 6.17975395e-01, 7.82285691e-01, 9.84983432e-01, 6.62322741e-01, 2.04144457e-01, 3.98446577e-01, 1.38918297e-01, 3.05919921e-01, 3.14043787e-01, 5.91072666e-01, 7.44703771e-01, 8.92272567e-01, 9.78017873e-01, 9.01203161e-01, 1.41526372e-01, 4.14878484e-01, 6.80683651e-01, 5.01733152e-02, 8.14635389e-01, 2.27926375e-01, 9.03269815e-01, 8.68443745e-01, 9.86939190e-01, 7.40779486e-01, 2.61005311e-01, 3.19276232e-01, 9.69509248e-01, 1.11908818e-01, 4.49198556e-01, 1.27056715e-01, 3.84064823e-01, 5.14591811e-01, 2.10747488e-01, 9.53884090e-01, 8.43167950e-01, 4.51187972e-01, 3.75331782e-01, 6.23566461e-01, 3.55290379e-01, 2.95705968e-01, 1.69622690e-01, 1.42981830e-01, 2.72180991e-01, 9.46468040e-01, 3.70932500e-01, 9.94292830e-01, 4.62587505e-01, 7.14817405e-01, 2.45370540e-02, 3.00906377e-01, 5.75768304e-01, 9.71448393e-01, 6.95574827e-02, 3.93693854e-01, 5.29306116e-01, 5.04694554e-01, 6.73797120e-02, 6.76596969e-01, 5.50948898e-01, 3.24909641e-01, 7.70337719e-01, 6.51842631e-03, 3.03264879e-01, 7.61037886e-03, 2.72289601e-01, 1.50502041e-01, 6.71103888e-02, 7.41503703e-01, 1.92088941e-01, 2.19043977e-01, 9.09320161e-01, 2.37993569e-01, 6.18107973e-02, 8.31447852e-01, 2.23355609e-01, 1.84789435e-01, 4.16104518e-01, 4.21573859e-01, 8.72446305e-02, 2.97294197e-01, 4.50328256e-01, 8.72199917e-01, 2.51279916e-01, 4.86219272e-01, 7.57071329e-01, 4.85655942e-01, 1.06187277e-01, 4.92341327e-01, 1.46017513e-01, 5.25421017e-01, 4.22637906e-01, 2.24685018e-01, 8.72648431e-01, 5.54051490e-01, 1.80745062e-01, 2.12756336e-01, 5.20883169e-01, 7.60363654e-01, 8.30254678e-01, 5.00003328e-01, 4.69017439e-01, 6.38105527e-01, 3.50638261e-02, 5.22217353e-02, 9.06516882e-02, 8.52975842e-01, 1.19985883e-01, 3.74926753e-01, 6.50302066e-01, 1.98875727e-01, 6.28362507e-02, 4.32693501e-01, 3.10500685e-01, 6.20732833e-01, 4.58503272e-01, 3.20790034e-01, 7.91284868e-01, 7.93054570e-01, 2.93406765e-01, 8.95399023e-01, 1.06441034e-01, 7.53085241e-02, 8.67523104e-01, 1.47963482e-01, 1.25584706e-01, 3.81545040e-02, 6.34338619e-01, 1.76368938e-02, 5.75553531e-02, 5.31607516e-01, 2.63869588e-01, 9.41945823e-01, 9.24028838e-02, 5.21496463e-01, 7.74866558e-01, 5.65210610e-01, 7.28015327e-02, 6.51963790e-01, 8.94727453e-01, 4.49571590e-01, 1.29932405e-01, 8.64026259e-01, 9.92599934e-01, 7.43721560e-01, 8.87300215e-01, 1.06369925e-01, 8.11335531e-01, 7.87734900e-01, 9.87344678e-01, 5.32502820e-01, 4.42612382e-01, 9.64041183e-01, 1.66085871e-01, 1.12937664e-01, 5.24423470e-01, 6.54689333e-01, 4.59119726e-01, 5.22774091e-01, 3.08722276e-02, 6.26979315e-01, 4.49754105e-01, 8.07495757e-01, 2.34199499e-01, 1.67765675e-01, 9.22168418e-01, 3.73210378e-01, 8.04432575e-01, 5.61890354e-01, 4.47025593e-01, 6.43155678e-01, 2.40407640e-01, 5.91631279e-01, 1.59369206e-01, 7.75799090e-01, 8.32067212e-01, 5.59791576e-02, 6.39105224e-01, 4.85274738e-01, 2.12630838e-01, 2.81431312e-02, 7.16205363e-01, 6.83885011e-01, 5.23869697e-01, 9.99418314e-01, 8.35331599e-01, 4.69877463e-02, 6.74712562e-01, 7.99273684e-01, 2.77001890e-02, 5.75809742e-01, 2.78513031e-01, 8.36209905e-01, 7.25472379e-01, 4.87173943e-01, 7.88311357e-01, 9.64676177e-01, 1.75752651e-01, 4.98112580e-01, 8.08850418e-02, 6.40981131e-01, 4.06647450e-01, 8.46539387e-01, 2.12620694e-01, 9.11012851e-01, 8.25041445e-01, 8.90065575e-01, 9.63626055e-01, 5.96689242e-01, 1.63372670e-01, 4.51640148e-01, 3.43026542e-01, 5.80658851e-01, 2.82327625e-01, 4.75535418e-01, 6.27760926e-01, 8.46314115e-01, 9.61961932e-01, 3.19806094e-01, 5.05508062e-01, 5.28102944e-01, 6.13045057e-01, 7.44714938e-01, 1.50586073e-01, 7.91878033e-01, 4.89839179e-01, 3.10496849e-01, 8.82309038e-01, 2.86922314e-01, 4.84687559e-01, 5.20838630e-01, 4.62955493e-01, 2.38185305e-01, 5.47259907e-02, 7.10916137e-01, 7.31887202e-01, 6.25602317e-01, 8.77741168e-01, 4.19881322e-01, 4.81222328e-01, 1.28224501e-01, 2.46034010e-01, 3.34971854e-01, 7.37216484e-01, 5.62134821e-02, 7.14089724e-01, 9.85549393e-01, 4.66295827e-01, 3.08722434e-03, 4.70237690e-01, 2.66524167e-01, 7.93875484e-01, 4.54795911e-02, 8.09702944e-01, 1.47709735e-02, 1.70082405e-01, 6.35905179e-01, 3.75379109e-01, 4.30315011e-01, 3.15788760e-01, 5.58065230e-01, 2.24643800e-01, 2.42142981e-01, 6.57283636e-01, 3.34921891e-01, 1.26588975e-01, 7.68064155e-01, 9.43856291e-01, 4.47518596e-01, 5.44453573e-01, 9.95764932e-01, 7.16444391e-01, 8.51019765e-01, 1.01179183e-01, 4.45473958e-01, 4.60327322e-01, 4.96895844e-02, 4.72907738e-01, 5.58987444e-01, 3.41027487e-01, 1.56175026e-01, 7.58283148e-01, 6.83600909e-01, 2.14623396e-01, 3.27348880e-01, 3.92517893e-01, 6.70418431e-01, 5.16440832e-01, 8.63140348e-01, 5.73277464e-01, 3.46608058e-01, 7.39396341e-01, 7.20852434e-01, 2.35653246e-02, 3.89935659e-01, 7.53783745e-01, 6.34563528e-01, 8.79339335e-01, 7.41599159e-02, 5.62433904e-01, 6.15553852e-01, 4.56956324e-01, 5.20047447e-01, 5.26845015e-02, 5.58471266e-01, 1.63632233e-01, 5.38936665e-02, 6.49593683e-01, 2.56838748e-01, 8.99035326e-01, 7.20847756e-01, 5.68954684e-01, 7.43684755e-01, 5.70924238e-01, 3.82318724e-01, 4.89328290e-01, 5.62208561e-01, 4.97540804e-02, 4.18011085e-01, 6.88041565e-01, 2.16234653e-01, 7.89548214e-01, 8.46136387e-01, 8.46816189e-01, 1.73842353e-01, 6.11627842e-02, 8.44440559e-01, 4.50646654e-01, 3.74785037e-01, 4.87196697e-01, 4.56276448e-01, 9.13284391e-01, 4.15715464e-01, 7.13597697e-01, 1.23641270e-02, 5.10031271e-01, 4.74601930e-02, 2.55731159e-01, 3.22090006e-01, 1.91165703e-01, 4.51170940e-01, 7.50843157e-01, 4.42420576e-01, 4.25380660e-01, 4.50667257e-01, 6.55689206e-01, 9.68257670e-02, 1.96528793e-01, 8.97343028e-01, 4.99940904e-01, 6.65504083e-01, 9.41828079e-01, 4.54397338e-01, 5.61893331e-01, 5.09839880e-01, 4.53117514e-01, 8.96804127e-02, 1.74888861e-01, 6.65641378e-01, 2.81668336e-01, 1.89532742e-01, 5.61668382e-01, 8.68330157e-02, 8.25092797e-01, 5.18106324e-01, 1.71904024e-01, 3.68385523e-01, 1.62005436e-01, 7.48507399e-01, 9.30274827e-01, 2.38198517e-01, 9.52222901e-01, 5.23587800e-01, 6.94384557e-01, 1.09338652e-01, 4.83356794e-01, 2.73050402e-01, 3.68027050e-01, 5.92366466e-01, 1.83192289e-01, 8.60376029e-01, 7.13926203e-01, 8.16750052e-01, 1.57890291e-01, 6.25691951e-01, 5.24831646e-01, 1.73873797e-01, 1.02429784e-01, 9.17488471e-01, 4.03584434e-01, 9.31170884e-01, 2.79386137e-01, 8.77745206e-01, 2.45200576e-01, 1.28896951e-01, 3.15713052e-01, 5.27874291e-01, 2.16444335e-01, 7.03883817e-01, 7.74738919e-02, 8.42422142e-01, 3.75598924e-01, 3.51002411e-01, 6.22752776e-01, 4.82407943e-01, 7.43107867e-01, 9.46182666e-01, 9.44344819e-01, 3.28124763e-01, 1.06147431e-01, 1.65102684e-01, 3.84060507e-01, 2.91057722e-01, 7.68173662e-02, 1.03543651e-01, 6.76698940e-01, 1.43141994e-01, 7.21342202e-01, 6.69471294e-03, 9.07298311e-01, 5.57080171e-01, 8.10954489e-01, 4.11120526e-01, 2.06407453e-01, 2.59590556e-01, 7.58512718e-01, 5.79873897e-01, 2.92875650e-01, 2.83686529e-01, 2.42829343e-01, 9.19323719e-01, 3.46832864e-01, 3.58238858e-01, 7.42827585e-01, 2.05760059e-01, 9.58438860e-01, 5.66326411e-01, 6.60292846e-01, 5.61095078e-02, 6.79465531e-01, 7.05118513e-01, 4.44713264e-01, 2.09732933e-01, 5.22732436e-01, 1.74396512e-01, 5.29356748e-01, 4.38475687e-01, 4.94036404e-01, 4.09785794e-01, 6.40025507e-01, 5.79371821e-01, 1.57726118e-01, 6.04572263e-01, 5.41072639e-01, 5.18847173e-01, 1.97093284e-01, 8.91767002e-01, 4.29050835e-01, 8.25490570e-01, 3.87699807e-01, 4.50705808e-01, 2.49371643e-01, 3.36074898e-01, 9.29925118e-01, 6.65393649e-01, 9.07275994e-01, 3.73075859e-01, 4.14044139e-03, 2.37463702e-01, 2.25893784e-01, 2.46900245e-01, 4.50350196e-01, 3.48618117e-01, 5.07193932e-01, 5.23435142e-01, 8.13611417e-01, 8.92715622e-01, 1.02623450e-01, 3.06088345e-01, 7.80461650e-01, 2.21453645e-01, 2.01419652e-01, 2.84254457e-01, 3.68286735e-01, 7.39358243e-01, 8.97879394e-01, 9.81599566e-01, 7.56526442e-01, 7.37645545e-01, 4.23976657e-02, 8.25922012e-01, 2.60956996e-01, 2.90702065e-01, 8.98388344e-01, 3.03733299e-01, 8.49071471e-01, 3.45835425e-01, 7.65458276e-01, 5.68094872e-01, 8.93770930e-01, 9.93161641e-01, 5.63368667e-02, 4.26548945e-01, 5.46745780e-01, 5.75674571e-01, 7.94599487e-01, 7.18935553e-02, 4.46492976e-01, 6.40240123e-01, 2.73246969e-01, 2.00465968e-01, 1.30718835e-01, 1.92492005e-01, 1.96617189e-01, 6.61271644e-01, 8.12687657e-01, 8.66342445e-01 }, {0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, true, -4}, {10, 5, {0.21390334, 0.50261639, 0.91036676, 0.59166485, 0.71162682, 0.10248392, 0.77782677, 0.43772379, 0.4035871, 0.3282796, 0.47544681, 0.59862974, 0.12319357, 0.06239463, 0.28200272, 0.1345717, 0.50498218, 0.5113505, 0.16233086, 0.62165332, 0.42281548, 0.933117, 0.41386077, 0.23264562, 0.73325968, 0.37537541, 0.70719873, 0.14522645, 0.73279625, 0.9126674, 0.84854131, 0.28890216, 0.85267903, 0.74703138, 0.83842071, 0.34942792, 0.27864171, 0.70911132, 0.21338564, 0.32035554, 0.73788331, 0.46926692, 0.57570162, 0.42559178, 0.87120209, 0.22734951, 0.01847905, 0.75549396, 0.76166195, 0.66613745}, {9, 8, 7, 6, 5, 4, 3, 2, 1, 0}, 10, false, 5}, // Test outlier points {9, 2, {-1, -50, 3, 4, 5000, 10000, 1, 3, 4, 5, 0.000005, 0.00002, 2000000, 500000, 10, 50, 30, 5}, {6, 0, 5, 0, 0, 4, 3, 2, 1}, 7, false, 5}, // Test n_clusters == (n_points / 2) {10, 5, {0.21390334, 0.50261639, 0.91036676, 0.59166485, 0.71162682, 0.10248392, 0.77782677, 0.43772379, 0.4035871, 0.3282796, 0.47544681, 0.59862974, 0.12319357, 0.06239463, 0.28200272, 0.1345717, 0.50498218, 0.5113505, 0.16233086, 0.62165332, 0.42281548, 0.933117, 0.41386077, 0.23264562, 0.73325968, 0.37537541, 0.70719873, 0.14522645, 0.73279625, 0.9126674, 0.84854131, 0.28890216, 0.85267903, 0.74703138, 0.83842071, 0.34942792, 0.27864171, 0.70911132, 0.21338564, 0.32035554, 0.73788331, 0.46926692, 0.57570162, 0.42559178, 0.87120209, 0.22734951, 0.01847905, 0.75549396, 0.76166195, 0.66613745}, {1, 0, 4, 0, 0, 3, 2, 0, 2, 1}, 5, false, 10}, // Test n_points == 100 {100, 10, {6.26168372e-01, 9.30437651e-01, 6.02450208e-01, 2.73025296e-01, 9.53050619e-01, 3.32164396e-01, 6.88942598e-01, 5.79163537e-01, 6.70341547e-01, 2.70140602e-02, 9.30429671e-01, 7.17721157e-01, 9.89948537e-01, 7.75253347e-01, 1.34491522e-02, 2.48522428e-02, 3.51413378e-01, 7.64405834e-01, 7.86373507e-01, 7.18748577e-01, 8.66998621e-01, 6.80316582e-01, 2.51288712e-01, 4.91078420e-01, 3.76246281e-01, 4.86828710e-01, 5.67464772e-01, 5.30734742e-01, 8.99478296e-01, 7.66699088e-01, 9.49339111e-01, 3.55248484e-01, 9.06046929e-01, 4.48407772e-01, 6.96395305e-01, 2.44277335e-01, 7.74840000e-01, 5.21046603e-01, 4.66423971e-02, 5.12019638e-02, 8.95019614e-01, 5.28956953e-01, 4.31536306e-01, 5.83857744e-01, 4.41787364e-01, 4.68656523e-01, 5.73971433e-01, 6.79989654e-01, 3.19650588e-01, 6.12579596e-01, 6.49126442e-02, 8.39131142e-01, 2.85252117e-01, 5.84848929e-01, 9.46507115e-01, 8.58440748e-01, 3.61528940e-01, 2.44215959e-01, 3.80101125e-01, 4.57128957e-02, 8.82216988e-01, 8.31498633e-01, 7.23474381e-01, 7.75788607e-01, 1.40864146e-01, 6.62092382e-01, 5.13985168e-01, 3.00686418e-01, 8.70109949e-01, 2.43187753e-01, 2.89391938e-01, 2.84214238e-01, 8.70985521e-01, 8.77491176e-01, 6.72537226e-01, 3.30929686e-01, 1.85934324e-01, 9.16222614e-01, 6.18239142e-01, 2.64768597e-01, 5.76145451e-01, 8.62961369e-01, 6.84757925e-01, 7.60549082e-01, 1.27645356e-01, 4.51004673e-01, 3.92292980e-01, 4.63170803e-01, 4.35449330e-02, 2.17583404e-01, 5.71832605e-02, 2.06763039e-01, 3.70116249e-01, 2.09750028e-01, 6.17283019e-01, 8.62549231e-01, 9.84156240e-02, 2.66249156e-01, 3.87635103e-01, 2.85591012e-02, 4.24826068e-01, 4.45795088e-01, 6.86227676e-01, 1.08848960e-01, 5.96731841e-02, 3.71770228e-01, 1.91548833e-01, 6.95136078e-01, 9.00700636e-01, 8.76363105e-01, 2.67334632e-01, 1.80619709e-01, 7.94060419e-01, 1.42854171e-02, 1.09372387e-01, 8.74028108e-01, 6.46403232e-01, 4.86588834e-01, 5.93446175e-02, 6.11886291e-01, 8.83865057e-01, 3.15879821e-01, 2.27043992e-01, 9.76764951e-01, 6.15620336e-01, 9.76199360e-01, 2.40548962e-01, 3.21795663e-01, 8.75087904e-02, 8.11234663e-01, 6.96070480e-01, 8.12062321e-01, 1.21958818e-01, 3.44348628e-02, 8.72630414e-01, 3.06162776e-01, 1.76043529e-02, 9.45894971e-01, 5.33896401e-01, 6.21642973e-01, 4.93062535e-01, 4.48984262e-01, 2.24560379e-01, 4.24052195e-02, 4.43447610e-01, 8.95646149e-01, 6.05220676e-01, 1.81840491e-01, 9.70831206e-01, 2.12563586e-02, 6.92582693e-01, 7.55946922e-01, 7.95086143e-01, 6.05328941e-01, 3.99350764e-01, 4.32846636e-01, 9.81114529e-01, 4.98266428e-01, 6.37127930e-03, 1.59085889e-01, 6.34682067e-05, 5.59429440e-01, 7.38827633e-01, 8.93214770e-01, 2.16494306e-01, 9.35430573e-02, 4.75665868e-02, 7.80503518e-01, 7.86240041e-01, 7.06854594e-01, 2.13725879e-02, 7.68246091e-01, 4.50234808e-01, 5.21231104e-01, 5.01989826e-03, 4.22081572e-02, 1.65337732e-01, 8.54134740e-01, 4.99430262e-01, 8.94525601e-01, 1.14028379e-01, 3.69739861e-01, 1.32955599e-01, 2.65563824e-01, 2.52811151e-01, 1.44792843e-01, 6.88449594e-01, 4.44921417e-01, 8.23296587e-01, 1.93266317e-01, 1.19033309e-01, 1.36368966e-01, 3.42600285e-01, 5.64505195e-01, 5.57594559e-01, 7.44257892e-01, 8.38231569e-02, 4.11548847e-01, 3.21010077e-01, 8.55081359e-01, 4.30105779e-01, 1.16229135e-01, 9.87731964e-02, 3.14712335e-01, 4.50880592e-01, 2.72289598e-01, 6.31615256e-01, 8.97432958e-01, 4.44764250e-01, 8.03776440e-01, 2.68767748e-02, 2.43374608e-01, 4.02141103e-01, 4.98881209e-01, 5.33173003e-01, 8.82890436e-01, 7.16149148e-01, 4.19664401e-01, 2.29335357e-01, 2.88637806e-01, 3.44696803e-01, 6.78171906e-01, 5.69849716e-01, 5.86454477e-01, 3.54474989e-01, 9.03876540e-01, 6.45980000e-01, 6.34887593e-01, 7.88039746e-02, 2.04814126e-01, 7.82251754e-01, 2.43147074e-01, 7.50951808e-01, 1.72799092e-02, 2.95349590e-01, 6.57991826e-01, 8.81214312e-01, 5.73970708e-01, 2.77610881e-01, 1.82155097e-01, 7.69797417e-02, 6.44792402e-01, 9.46950998e-01, 7.73064845e-01, 6.04733624e-01, 5.80094567e-01, 1.67498426e-01, 2.66514296e-01, 6.50140368e-01, 1.91170299e-01, 2.08752199e-01, 3.01664091e-01, 9.85033484e-01, 2.92909152e-01, 8.65816607e-01, 1.85222119e-01, 2.28814559e-01, 1.34286382e-02, 2.89234322e-01, 8.18668708e-01, 4.71706924e-01, 9.23199803e-01, 2.80879188e-01, 1.47319284e-01, 4.13915748e-01, 9.31274932e-02, 6.66322195e-01, 9.66953974e-01, 3.19405786e-01, 6.69486551e-01, 5.03096313e-02, 6.95225201e-01, 5.78469859e-01, 6.29481655e-01, 1.39252534e-01, 1.22564968e-01, 6.80663678e-01, 6.34607157e-01, 6.42765834e-01, 1.57127410e-02, 2.92132086e-01, 5.24423878e-01, 4.68676824e-01, 2.86003928e-01, 7.18608322e-01, 8.95617933e-01, 5.48844309e-01, 1.74517278e-01, 5.24379196e-01, 2.13526524e-01, 5.88375435e-01, 9.88560185e-01, 4.17435771e-01, 6.14438688e-01, 9.53760881e-01, 5.27151288e-01, 7.03017278e-01, 3.44448559e-01, 4.47059676e-01, 2.83414901e-01, 1.98979011e-01, 4.24917361e-01, 5.73172761e-01, 2.32398853e-02, 1.65887230e-01, 4.05552785e-01, 9.29665524e-01, 2.26135696e-01, 9.20563384e-01, 7.65259963e-01, 4.54820075e-01, 8.97710267e-01, 3.78559302e-03, 9.15219382e-01, 3.55705698e-01, 6.94905124e-01, 8.58540202e-01, 3.89790666e-01, 2.49478206e-01, 7.93679304e-01, 4.75830027e-01, 4.40425353e-01, 3.70579459e-01, 1.40578049e-01, 1.70386675e-01, 7.04056121e-01, 4.85963102e-01, 9.68450060e-01, 6.77178001e-01, 2.65934654e-01, 2.58915007e-01, 6.70052890e-01, 2.61945109e-01, 8.46207759e-01, 1.01928951e-01, 2.85611334e-01, 2.45776933e-01, 2.66658783e-01, 3.71724077e-01, 4.34319025e-01, 4.24407347e-01, 7.15417683e-01, 8.07997684e-01, 1.64296275e-01, 6.01638065e-01, 8.60606804e-02, 2.68719187e-01, 5.11764101e-01, 9.75844338e-01, 7.81226782e-01, 2.20925515e-01, 7.18135040e-01, 9.82395577e-01, 8.39160243e-01, 9.08058083e-01, 6.88010677e-01, 8.14271847e-01, 5.12460821e-01, 1.17311345e-01, 5.96075228e-01, 9.17455497e-01, 2.12052706e-01, 7.04074603e-01, 8.72872565e-02, 8.76047818e-01, 6.96235046e-01, 8.54801557e-01, 2.49729159e-01, 9.76594604e-01, 2.87386363e-01, 2.36461559e-02, 9.94075254e-01, 4.25193986e-01, 7.61869994e-01, 5.13334255e-01, 6.44711165e-02, 8.92156689e-01, 3.55235167e-01, 1.08154647e-01, 8.78446825e-01, 2.43833016e-01, 9.23071293e-01, 2.72724115e-01, 9.46631338e-01, 3.74510294e-01, 4.08451278e-02, 9.78392777e-01, 3.65079221e-01, 6.37199516e-01, 5.51144906e-01, 5.25978080e-01, 1.42803678e-01, 4.05451674e-01, 7.79788219e-01, 6.26009784e-01, 3.35249497e-01, 1.43159543e-02, 1.80363779e-01, 5.05096904e-01, 2.82619947e-01, 5.83561392e-01, 3.10951324e-01, 8.73223968e-01, 4.38545619e-01, 4.81348800e-01, 6.68497085e-01, 3.79345401e-01, 9.58832501e-01, 1.89869550e-01, 2.34083070e-01, 2.94066207e-01, 5.74892667e-02, 6.92106828e-02, 9.61127686e-02, 6.72650672e-02, 8.47345378e-01, 2.80916761e-01, 7.32177357e-03, 9.80785961e-01, 5.73192225e-02, 8.48781331e-01, 8.83225408e-01, 7.34398275e-01, 7.70381941e-01, 6.20778343e-01, 8.96822048e-01, 5.40732486e-01, 3.69704071e-01, 5.77305837e-01, 2.08221827e-01, 7.34275341e-01, 1.06110900e-01, 3.49496706e-01, 8.34948910e-01, 1.56403291e-02, 6.78576376e-01, 8.96141268e-01, 5.94835119e-01, 1.43943153e-01, 3.49618530e-01, 2.10440392e-01, 3.46585620e-01, 1.05153093e-01, 3.45446174e-01, 2.72177079e-01, 7.07946300e-01, 4.33717726e-02, 3.31232203e-01, 3.91874320e-01, 4.76338141e-01, 6.22777789e-01, 2.95989228e-02, 4.32855769e-01, 7.61049310e-01, 3.63279149e-01, 9.47210350e-01, 6.43721247e-01, 6.58025802e-01, 1.05247633e-02, 5.29974442e-01, 7.30675767e-01, 4.30041079e-01, 6.62634841e-01, 8.25936616e-01, 9.91253704e-01, 6.79399281e-01, 5.44177006e-01, 7.52876048e-01, 3.32139049e-01, 7.98732398e-01, 7.38865223e-01, 9.16055132e-01, 6.11736493e-01, 9.63672879e-01, 1.83778839e-01, 7.27558919e-02, 5.91602822e-01, 3.25235484e-01, 2.34741217e-01, 9.52346277e-01, 9.18556407e-01, 9.35373324e-01, 6.89209070e-01, 2.56049054e-01, 6.17975395e-01, 7.82285691e-01, 9.84983432e-01, 6.62322741e-01, 2.04144457e-01, 3.98446577e-01, 1.38918297e-01, 3.05919921e-01, 3.14043787e-01, 5.91072666e-01, 7.44703771e-01, 8.92272567e-01, 9.78017873e-01, 9.01203161e-01, 1.41526372e-01, 4.14878484e-01, 6.80683651e-01, 5.01733152e-02, 8.14635389e-01, 2.27926375e-01, 9.03269815e-01, 8.68443745e-01, 9.86939190e-01, 7.40779486e-01, 2.61005311e-01, 3.19276232e-01, 9.69509248e-01, 1.11908818e-01, 4.49198556e-01, 1.27056715e-01, 3.84064823e-01, 5.14591811e-01, 2.10747488e-01, 9.53884090e-01, 8.43167950e-01, 4.51187972e-01, 3.75331782e-01, 6.23566461e-01, 3.55290379e-01, 2.95705968e-01, 1.69622690e-01, 1.42981830e-01, 2.72180991e-01, 9.46468040e-01, 3.70932500e-01, 9.94292830e-01, 4.62587505e-01, 7.14817405e-01, 2.45370540e-02, 3.00906377e-01, 5.75768304e-01, 9.71448393e-01, 6.95574827e-02, 3.93693854e-01, 5.29306116e-01, 5.04694554e-01, 6.73797120e-02, 6.76596969e-01, 5.50948898e-01, 3.24909641e-01, 7.70337719e-01, 6.51842631e-03, 3.03264879e-01, 7.61037886e-03, 2.72289601e-01, 1.50502041e-01, 6.71103888e-02, 7.41503703e-01, 1.92088941e-01, 2.19043977e-01, 9.09320161e-01, 2.37993569e-01, 6.18107973e-02, 8.31447852e-01, 2.23355609e-01, 1.84789435e-01, 4.16104518e-01, 4.21573859e-01, 8.72446305e-02, 2.97294197e-01, 4.50328256e-01, 8.72199917e-01, 2.51279916e-01, 4.86219272e-01, 7.57071329e-01, 4.85655942e-01, 1.06187277e-01, 4.92341327e-01, 1.46017513e-01, 5.25421017e-01, 4.22637906e-01, 2.24685018e-01, 8.72648431e-01, 5.54051490e-01, 1.80745062e-01, 2.12756336e-01, 5.20883169e-01, 7.60363654e-01, 8.30254678e-01, 5.00003328e-01, 4.69017439e-01, 6.38105527e-01, 3.50638261e-02, 5.22217353e-02, 9.06516882e-02, 8.52975842e-01, 1.19985883e-01, 3.74926753e-01, 6.50302066e-01, 1.98875727e-01, 6.28362507e-02, 4.32693501e-01, 3.10500685e-01, 6.20732833e-01, 4.58503272e-01, 3.20790034e-01, 7.91284868e-01, 7.93054570e-01, 2.93406765e-01, 8.95399023e-01, 1.06441034e-01, 7.53085241e-02, 8.67523104e-01, 1.47963482e-01, 1.25584706e-01, 3.81545040e-02, 6.34338619e-01, 1.76368938e-02, 5.75553531e-02, 5.31607516e-01, 2.63869588e-01, 9.41945823e-01, 9.24028838e-02, 5.21496463e-01, 7.74866558e-01, 5.65210610e-01, 7.28015327e-02, 6.51963790e-01, 8.94727453e-01, 4.49571590e-01, 1.29932405e-01, 8.64026259e-01, 9.92599934e-01, 7.43721560e-01, 8.87300215e-01, 1.06369925e-01, 8.11335531e-01, 7.87734900e-01, 9.87344678e-01, 5.32502820e-01, 4.42612382e-01, 9.64041183e-01, 1.66085871e-01, 1.12937664e-01, 5.24423470e-01, 6.54689333e-01, 4.59119726e-01, 5.22774091e-01, 3.08722276e-02, 6.26979315e-01, 4.49754105e-01, 8.07495757e-01, 2.34199499e-01, 1.67765675e-01, 9.22168418e-01, 3.73210378e-01, 8.04432575e-01, 5.61890354e-01, 4.47025593e-01, 6.43155678e-01, 2.40407640e-01, 5.91631279e-01, 1.59369206e-01, 7.75799090e-01, 8.32067212e-01, 5.59791576e-02, 6.39105224e-01, 4.85274738e-01, 2.12630838e-01, 2.81431312e-02, 7.16205363e-01, 6.83885011e-01, 5.23869697e-01, 9.99418314e-01, 8.35331599e-01, 4.69877463e-02, 6.74712562e-01, 7.99273684e-01, 2.77001890e-02, 5.75809742e-01, 2.78513031e-01, 8.36209905e-01, 7.25472379e-01, 4.87173943e-01, 7.88311357e-01, 9.64676177e-01, 1.75752651e-01, 4.98112580e-01, 8.08850418e-02, 6.40981131e-01, 4.06647450e-01, 8.46539387e-01, 2.12620694e-01, 9.11012851e-01, 8.25041445e-01, 8.90065575e-01, 9.63626055e-01, 5.96689242e-01, 1.63372670e-01, 4.51640148e-01, 3.43026542e-01, 5.80658851e-01, 2.82327625e-01, 4.75535418e-01, 6.27760926e-01, 8.46314115e-01, 9.61961932e-01, 3.19806094e-01, 5.05508062e-01, 5.28102944e-01, 6.13045057e-01, 7.44714938e-01, 1.50586073e-01, 7.91878033e-01, 4.89839179e-01, 3.10496849e-01, 8.82309038e-01, 2.86922314e-01, 4.84687559e-01, 5.20838630e-01, 4.62955493e-01, 2.38185305e-01, 5.47259907e-02, 7.10916137e-01, 7.31887202e-01, 6.25602317e-01, 8.77741168e-01, 4.19881322e-01, 4.81222328e-01, 1.28224501e-01, 2.46034010e-01, 3.34971854e-01, 7.37216484e-01, 5.62134821e-02, 7.14089724e-01, 9.85549393e-01, 4.66295827e-01, 3.08722434e-03, 4.70237690e-01, 2.66524167e-01, 7.93875484e-01, 4.54795911e-02, 8.09702944e-01, 1.47709735e-02, 1.70082405e-01, 6.35905179e-01, 3.75379109e-01, 4.30315011e-01, 3.15788760e-01, 5.58065230e-01, 2.24643800e-01, 2.42142981e-01, 6.57283636e-01, 3.34921891e-01, 1.26588975e-01, 7.68064155e-01, 9.43856291e-01, 4.47518596e-01, 5.44453573e-01, 9.95764932e-01, 7.16444391e-01, 8.51019765e-01, 1.01179183e-01, 4.45473958e-01, 4.60327322e-01, 4.96895844e-02, 4.72907738e-01, 5.58987444e-01, 3.41027487e-01, 1.56175026e-01, 7.58283148e-01, 6.83600909e-01, 2.14623396e-01, 3.27348880e-01, 3.92517893e-01, 6.70418431e-01, 5.16440832e-01, 8.63140348e-01, 5.73277464e-01, 3.46608058e-01, 7.39396341e-01, 7.20852434e-01, 2.35653246e-02, 3.89935659e-01, 7.53783745e-01, 6.34563528e-01, 8.79339335e-01, 7.41599159e-02, 5.62433904e-01, 6.15553852e-01, 4.56956324e-01, 5.20047447e-01, 5.26845015e-02, 5.58471266e-01, 1.63632233e-01, 5.38936665e-02, 6.49593683e-01, 2.56838748e-01, 8.99035326e-01, 7.20847756e-01, 5.68954684e-01, 7.43684755e-01, 5.70924238e-01, 3.82318724e-01, 4.89328290e-01, 5.62208561e-01, 4.97540804e-02, 4.18011085e-01, 6.88041565e-01, 2.16234653e-01, 7.89548214e-01, 8.46136387e-01, 8.46816189e-01, 1.73842353e-01, 6.11627842e-02, 8.44440559e-01, 4.50646654e-01, 3.74785037e-01, 4.87196697e-01, 4.56276448e-01, 9.13284391e-01, 4.15715464e-01, 7.13597697e-01, 1.23641270e-02, 5.10031271e-01, 4.74601930e-02, 2.55731159e-01, 3.22090006e-01, 1.91165703e-01, 4.51170940e-01, 7.50843157e-01, 4.42420576e-01, 4.25380660e-01, 4.50667257e-01, 6.55689206e-01, 9.68257670e-02, 1.96528793e-01, 8.97343028e-01, 4.99940904e-01, 6.65504083e-01, 9.41828079e-01, 4.54397338e-01, 5.61893331e-01, 5.09839880e-01, 4.53117514e-01, 8.96804127e-02, 1.74888861e-01, 6.65641378e-01, 2.81668336e-01, 1.89532742e-01, 5.61668382e-01, 8.68330157e-02, 8.25092797e-01, 5.18106324e-01, 1.71904024e-01, 3.68385523e-01, 1.62005436e-01, 7.48507399e-01, 9.30274827e-01, 2.38198517e-01, 9.52222901e-01, 5.23587800e-01, 6.94384557e-01, 1.09338652e-01, 4.83356794e-01, 2.73050402e-01, 3.68027050e-01, 5.92366466e-01, 1.83192289e-01, 8.60376029e-01, 7.13926203e-01, 8.16750052e-01, 1.57890291e-01, 6.25691951e-01, 5.24831646e-01, 1.73873797e-01, 1.02429784e-01, 9.17488471e-01, 4.03584434e-01, 9.31170884e-01, 2.79386137e-01, 8.77745206e-01, 2.45200576e-01, 1.28896951e-01, 3.15713052e-01, 5.27874291e-01, 2.16444335e-01, 7.03883817e-01, 7.74738919e-02, 8.42422142e-01, 3.75598924e-01, 3.51002411e-01, 6.22752776e-01, 4.82407943e-01, 7.43107867e-01, 9.46182666e-01, 9.44344819e-01, 3.28124763e-01, 1.06147431e-01, 1.65102684e-01, 3.84060507e-01, 2.91057722e-01, 7.68173662e-02, 1.03543651e-01, 6.76698940e-01, 1.43141994e-01, 7.21342202e-01, 6.69471294e-03, 9.07298311e-01, 5.57080171e-01, 8.10954489e-01, 4.11120526e-01, 2.06407453e-01, 2.59590556e-01, 7.58512718e-01, 5.79873897e-01, 2.92875650e-01, 2.83686529e-01, 2.42829343e-01, 9.19323719e-01, 3.46832864e-01, 3.58238858e-01, 7.42827585e-01, 2.05760059e-01, 9.58438860e-01, 5.66326411e-01, 6.60292846e-01, 5.61095078e-02, 6.79465531e-01, 7.05118513e-01, 4.44713264e-01, 2.09732933e-01, 5.22732436e-01, 1.74396512e-01, 5.29356748e-01, 4.38475687e-01, 4.94036404e-01, 4.09785794e-01, 6.40025507e-01, 5.79371821e-01, 1.57726118e-01, 6.04572263e-01, 5.41072639e-01, 5.18847173e-01, 1.97093284e-01, 8.91767002e-01, 4.29050835e-01, 8.25490570e-01, 3.87699807e-01, 4.50705808e-01, 2.49371643e-01, 3.36074898e-01, 9.29925118e-01, 6.65393649e-01, 9.07275994e-01, 3.73075859e-01, 4.14044139e-03, 2.37463702e-01, 2.25893784e-01, 2.46900245e-01, 4.50350196e-01, 3.48618117e-01, 5.07193932e-01, 5.23435142e-01, 8.13611417e-01, 8.92715622e-01, 1.02623450e-01, 3.06088345e-01, 7.80461650e-01, 2.21453645e-01, 2.01419652e-01, 2.84254457e-01, 3.68286735e-01, 7.39358243e-01, 8.97879394e-01, 9.81599566e-01, 7.56526442e-01, 7.37645545e-01, 4.23976657e-02, 8.25922012e-01, 2.60956996e-01, 2.90702065e-01, 8.98388344e-01, 3.03733299e-01, 8.49071471e-01, 3.45835425e-01, 7.65458276e-01, 5.68094872e-01, 8.93770930e-01, 9.93161641e-01, 5.63368667e-02, 4.26548945e-01, 5.46745780e-01, 5.75674571e-01, 7.94599487e-01, 7.18935553e-02, 4.46492976e-01, 6.40240123e-01, 2.73246969e-01, 2.00465968e-01, 1.30718835e-01, 1.92492005e-01, 1.96617189e-01, 6.61271644e-01, 8.12687657e-01, 8.66342445e-01 }, {0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 10, false, 5}}; typedef LinkageTest<float, int> LinkageTestF_Int; TEST_P(LinkageTestF_Int, Result) { EXPECT_TRUE(score == 1.0); } INSTANTIATE_TEST_CASE_P(LinkageTest, LinkageTestF_Int, ::testing::ValuesIn(linkage_inputsf2)); } // namespace cuvs
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/cluster/kmeans.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <optional> #include <raft/core/resource/cuda_stream.hpp> #include <vector> #include <cuvs/cluster/kmeans.cuh> #include <cuvs/stats/adjusted_rand_index.cuh> #include <raft/core/cudart_utils.hpp> #include <raft/core/operators.hpp> #include <raft/core/resources.hpp> #include <raft/random/make_blobs.cuh> #include <raft/util/cuda_utils.cuh> #include <rmm/device_uvector.hpp> #include <thrust/fill.h> namespace raft { template <typename T> struct KmeansInputs { int n_row; int n_col; int n_clusters; T tol; bool weighted; }; template <typename DataT, typename IndexT> void run_cluster_cost(const raft::resources& handle, raft::device_vector_view<DataT, IndexT> minClusterDistance, rmm::device_uvector<char>& workspace, raft::device_scalar_view<DataT> clusterCost) { cuvs::cluster::kmeans::cluster_cost( handle, minClusterDistance, workspace, clusterCost, raft::add_op{}); } template <typename T> class KmeansTest : public ::testing::TestWithParam<KmeansInputs<T>> { protected: KmeansTest() : d_labels(0, resource::get_cuda_stream(handle)), d_labels_ref(0, resource::get_cuda_stream(handle)), d_centroids(0, resource::get_cuda_stream(handle)), d_sample_weight(0, resource::get_cuda_stream(handle)) { } void apiTest() { testparams = ::testing::TestWithParam<KmeansInputs<T>>::GetParam(); auto stream = resource::get_cuda_stream(handle); int n_samples = testparams.n_row; int n_features = testparams.n_col; params.n_clusters = testparams.n_clusters; params.tol = testparams.tol; params.n_init = 1; params.rng_state.seed = 1; params.oversampling_factor = 0; raft::random::RngState rng(params.rng_state.seed, params.rng_state.type); auto X = raft::make_device_matrix<T, int>(handle, n_samples, n_features); auto labels = raft::make_device_vector<int, int>(handle, n_samples); raft::random::make_blobs<T, int>(X.data_handle(), labels.data_handle(), n_samples, n_features, params.n_clusters, stream, true, nullptr, nullptr, T(1.0), false, (T)-10.0f, (T)10.0f, (uint64_t)1234); d_labels.resize(n_samples, stream); d_labels_ref.resize(n_samples, stream); d_centroids.resize(params.n_clusters * n_features, stream); raft::copy(d_labels_ref.data(), labels.data_handle(), n_samples, stream); rmm::device_uvector<T> d_sample_weight(n_samples, stream); thrust::fill( thrust::cuda::par.on(stream), d_sample_weight.data(), d_sample_weight.data() + n_samples, 1); auto weight_view = raft::make_device_vector_view<const T, int>(d_sample_weight.data(), n_samples); T inertia = 0; int n_iter = 0; rmm::device_uvector<char> workspace(0, stream); rmm::device_uvector<T> L2NormBuf_OR_DistBuf(0, stream); rmm::device_uvector<T> inRankCp(0, stream); auto X_view = raft::make_const_mdspan(X.view()); auto centroids_view = raft::make_device_matrix_view<T, int>(d_centroids.data(), params.n_clusters, n_features); auto miniX = raft::make_device_matrix<T, int>(handle, n_samples / 4, n_features); // Initialize kmeans on a portion of X cuvs::cluster::kmeans::shuffle_and_gather( handle, X_view, raft::make_device_matrix_view<T, int>(miniX.data_handle(), miniX.extent(0), miniX.extent(1)), miniX.extent(0), params.rng_state.seed); cuvs::cluster::kmeans::init_plus_plus( handle, params, raft::make_const_mdspan(miniX.view()), centroids_view, workspace); auto minClusterDistance = raft::make_device_vector<T, int>(handle, n_samples); auto minClusterAndDistance = raft::make_device_vector<raft::KeyValuePair<int, T>, int>(handle, n_samples); auto L2NormX = raft::make_device_vector<T, int>(handle, n_samples); auto clusterCostBefore = raft::make_device_scalar<T>(handle, 0); auto clusterCostAfter = raft::make_device_scalar<T>(handle, 0); raft::linalg::rowNorm(L2NormX.data_handle(), X.data_handle(), X.extent(1), X.extent(0), raft::linalg::L2Norm, true, stream); cuvs::cluster::kmeans::min_cluster_distance(handle, X_view, centroids_view, minClusterDistance.view(), L2NormX.view(), L2NormBuf_OR_DistBuf, params.metric, params.batch_samples, params.batch_centroids, workspace); run_cluster_cost(handle, minClusterDistance.view(), workspace, clusterCostBefore.view()); // Run a fit of kmeans cuvs::cluster::kmeans::fit_main(handle, params, X_view, weight_view, centroids_view, raft::make_host_scalar_view(&inertia), raft::make_host_scalar_view(&n_iter), workspace); // Check that the cluster cost decreased cuvs::cluster::kmeans::min_cluster_distance(handle, X_view, centroids_view, minClusterDistance.view(), L2NormX.view(), L2NormBuf_OR_DistBuf, params.metric, params.batch_samples, params.batch_centroids, workspace); run_cluster_cost(handle, minClusterDistance.view(), workspace, clusterCostAfter.view()); T h_clusterCostBefore = T(0); T h_clusterCostAfter = T(0); raft::update_host(&h_clusterCostBefore, clusterCostBefore.data_handle(), 1, stream); raft::update_host(&h_clusterCostAfter, clusterCostAfter.data_handle(), 1, stream); ASSERT_TRUE(h_clusterCostAfter < h_clusterCostBefore); // Count samples in clusters using 2 methods and compare them // Fill minClusterAndDistance cuvs::cluster::kmeans::min_cluster_and_distance( handle, X_view, raft::make_device_matrix_view<const T, int>( d_centroids.data(), params.n_clusters, n_features), minClusterAndDistance.view(), L2NormX.view(), L2NormBuf_OR_DistBuf, params.metric, params.batch_samples, params.batch_centroids, workspace); cuvs::cluster::kmeans::KeyValueIndexOp<int, T> conversion_op; cub::TransformInputIterator<int, cuvs::cluster::kmeans::KeyValueIndexOp<int, T>, raft::KeyValuePair<int, T>*> itr(minClusterAndDistance.data_handle(), conversion_op); auto sampleCountInCluster = raft::make_device_vector<T, int>(handle, params.n_clusters); auto weigthInCluster = raft::make_device_vector<T, int>(handle, params.n_clusters); auto newCentroids = raft::make_device_matrix<T, int>(handle, params.n_clusters, n_features); cuvs::cluster::kmeans::update_centroids(handle, X_view, weight_view, raft::make_device_matrix_view<const T, int>( d_centroids.data(), params.n_clusters, n_features), itr, weigthInCluster.view(), newCentroids.view()); cuvs::cluster::kmeans::count_samples_in_cluster(handle, params, X_view, L2NormX.view(), newCentroids.view(), workspace, sampleCountInCluster.view()); ASSERT_TRUE(devArrMatch(sampleCountInCluster.data_handle(), weigthInCluster.data_handle(), params.n_clusters, CompareApprox<T>(params.tol))); } void basicTest() { testparams = ::testing::TestWithParam<KmeansInputs<T>>::GetParam(); int n_samples = testparams.n_row; int n_features = testparams.n_col; params.n_clusters = testparams.n_clusters; params.tol = testparams.tol; params.n_init = 5; params.rng_state.seed = 1; params.oversampling_factor = 0; auto X = raft::make_device_matrix<T, int>(handle, n_samples, n_features); auto labels = raft::make_device_vector<int, int>(handle, n_samples); auto stream = resource::get_cuda_stream(handle); raft::random::make_blobs<T, int>(X.data_handle(), labels.data_handle(), n_samples, n_features, params.n_clusters, stream, true, nullptr, nullptr, T(1.0), false, (T)-10.0f, (T)10.0f, (uint64_t)1234); d_labels.resize(n_samples, stream); d_labels_ref.resize(n_samples, stream); d_centroids.resize(params.n_clusters * n_features, stream); std::optional<raft::device_vector_view<const T>> d_sw = std::nullopt; auto d_centroids_view = raft::make_device_matrix_view<T, int>(d_centroids.data(), params.n_clusters, n_features); if (testparams.weighted) { d_sample_weight.resize(n_samples, stream); d_sw = std::make_optional( raft::make_device_vector_view<const T, int>(d_sample_weight.data(), n_samples)); thrust::fill(thrust::cuda::par.on(stream), d_sample_weight.data(), d_sample_weight.data() + n_samples, 1); } raft::copy(d_labels_ref.data(), labels.data_handle(), n_samples, stream); T inertia = 0; int n_iter = 0; auto X_view = raft::make_const_mdspan(X.view()); cuvs::cluster::kmeans_fit_predict<T, int>( handle, params, X_view, d_sw, d_centroids_view, raft::make_device_vector_view<int, int>(d_labels.data(), n_samples), raft::make_host_scalar_view<T>(&inertia), raft::make_host_scalar_view<int>(&n_iter)); resource::sync_stream(handle, stream); score = cuvs::stats::adjusted_rand_index( d_labels_ref.data(), d_labels.data(), n_samples, resource::get_cuda_stream(handle)); if (score < 1.0) { std::stringstream ss; ss << "Expected: " << raft::arr2Str(d_labels_ref.data(), 25, "d_labels_ref", stream); std::cout << (ss.str().c_str()) << '\n'; ss.str(std::string()); ss << "Actual: " << raft::arr2Str(d_labels.data(), 25, "d_labels", stream); std::cout << (ss.str().c_str()) << '\n'; std::cout << "Score = " << score << '\n'; } } void SetUp() override { basicTest(); apiTest(); } protected: raft::resources handle; KmeansInputs<T> testparams; rmm::device_uvector<int> d_labels; rmm::device_uvector<int> d_labels_ref; rmm::device_uvector<T> d_centroids; rmm::device_uvector<T> d_sample_weight; double score; cuvs::cluster::KMeansParams params; }; const std::vector<KmeansInputs<float>> inputsf2 = {{1000, 32, 5, 0.0001f, true}, {1000, 32, 5, 0.0001f, false}, {1000, 100, 20, 0.0001f, true}, {1000, 100, 20, 0.0001f, false}, {10000, 32, 10, 0.0001f, true}, {10000, 32, 10, 0.0001f, false}, {10000, 100, 50, 0.0001f, true}, {10000, 100, 50, 0.0001f, false}, {10000, 500, 100, 0.0001f, true}, {10000, 500, 100, 0.0001f, false}}; const std::vector<KmeansInputs<double>> inputsd2 = {{1000, 32, 5, 0.0001, true}, {1000, 32, 5, 0.0001, false}, {1000, 100, 20, 0.0001, true}, {1000, 100, 20, 0.0001, false}, {10000, 32, 10, 0.0001, true}, {10000, 32, 10, 0.0001, false}, {10000, 100, 50, 0.0001, true}, {10000, 100, 50, 0.0001, false}, {10000, 500, 100, 0.0001, true}, {10000, 500, 100, 0.0001, false}}; typedef KmeansTest<float> KmeansTestF; TEST_P(KmeansTestF, Result) { ASSERT_TRUE(score == 1.0); } typedef KmeansTest<double> KmeansTestD; TEST_P(KmeansTestD, Result) { ASSERT_TRUE(score == 1.0); } INSTANTIATE_TEST_CASE_P(KmeansTests, KmeansTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(KmeansTests, KmeansTestD, ::testing::ValuesIn(inputsd2)); } // namespace raft
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/cluster/cluster_solvers.cu
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <iostream> #include <memory> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/device_id.hpp> #include <raft/core/resources.hpp> #include <cuvs/spectral/cluster_solvers.cuh> #include <cuvs/spectral/modularity_maximization.cuh> namespace cuvs { namespace spectral { TEST(Raft, ClusterSolvers) { using namespace matrix; using index_type = int; using value_type = double; raft::resources h; index_type maxiter{100}; value_type tol{1.0e-10}; unsigned long long seed{100110021003}; auto stream = resource::get_cuda_stream(h); index_type n{100}; index_type d{10}; index_type k{5}; // nullptr expected to trigger exceptions: // value_type* eigvecs{nullptr}; index_type* codes{nullptr}; cluster_solver_config_t<index_type, value_type> cfg{k, maxiter, tol, seed}; kmeans_solver_t<index_type, value_type> cluster_solver{cfg}; EXPECT_ANY_THROW(cluster_solver.solve(h, n, d, eigvecs, codes)); } TEST(Raft, ModularitySolvers) { using namespace matrix; using index_type = int; using value_type = double; raft::resources h; ASSERT_EQ(0, resource::get_device_id(h)); index_type neigvs{10}; index_type maxiter{100}; index_type restart_iter{10}; value_type tol{1.0e-10}; bool reorthog{true}; // nullptr expected to trigger exceptions: // index_type* clusters{nullptr}; value_type* eigvals{nullptr}; value_type* eigvecs{nullptr}; unsigned long long seed{100110021003}; eigen_solver_config_t<index_type, value_type> eig_cfg{ neigvs, maxiter, restart_iter, tol, reorthog, seed}; lanczos_solver_t<index_type, value_type> eig_solver{eig_cfg}; index_type k{5}; cluster_solver_config_t<index_type, value_type> clust_cfg{k, maxiter, tol, seed}; kmeans_solver_t<index_type, value_type> cluster_solver{clust_cfg}; auto stream = resource::get_cuda_stream(h); sparse_matrix_t<index_type, value_type> sm{h, nullptr, nullptr, nullptr, 0, 0}; EXPECT_ANY_THROW(spectral::modularity_maximization( h, sm, eig_solver, cluster_solver, clusters, eigvals, eigvecs)); value_type modularity{0}; EXPECT_ANY_THROW(spectral::analyzeModularity(h, sm, k, clusters, modularity)); } } // namespace spectral } // namespace cuvs
0
rapidsai_public_repos/cuvs/cpp/test
rapidsai_public_repos/cuvs/cpp/test/cluster/kmeans_balanced.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.h" #include <gtest/gtest.h> #include <optional> #include <raft/core/resource/cuda_stream.hpp> #include <vector> #include <cuvs/cluster/kmeans_balanced.cuh> #include <cuvs/stats/adjusted_rand_index.cuh> #include <raft/core/cudart_utils.hpp> #include <raft/core/handle.hpp> #include <raft/core/operators.hpp> #include <raft/linalg/unary_op.cuh> #include <raft/random/make_blobs.cuh> #include <raft/util/cuda_utils.cuh> #include <rmm/device_uvector.hpp> #include <thrust/fill.h> /* This test takes advantage of the fact that make_blobs generates balanced clusters. * It doesn't currently test whether the algorithm can make balanced clusters with an imbalanced * dataset. */ namespace cuvs { template <typename MathT, typename IdxT> struct KmeansBalancedInputs { IdxT n_rows; IdxT n_cols; IdxT n_clusters; cuvs::cluster::kmeans_balanced_params kb_params; MathT tol; }; template <typename MathT, typename IdxT> ::std::ostream& operator<<(::std::ostream& os, const KmeansBalancedInputs<MathT, IdxT>& p) { os << "{ " << p.n_rows << ", " << p.n_cols << ", " << p.n_clusters << ", " << p.kb_params.n_iters << static_cast<int>(p.kb_params.metric) << '}' << std::endl; return os; } template <typename DataT, typename MathT, typename LabelT, typename IdxT, typename MappingOpT> class KmeansBalancedTest : public ::testing::TestWithParam<KmeansBalancedInputs<MathT, IdxT>> { protected: KmeansBalancedTest() : stream(resource::get_cuda_stream(handle)), d_labels(0, stream), d_labels_ref(0, stream), d_centroids(0, stream) { } void basicTest() { MappingOpT op{}; auto p = ::testing::TestWithParam<KmeansBalancedInputs<MathT, IdxT>>::GetParam(); auto X = raft::make_device_matrix<DataT, IdxT>(handle, p.n_rows, p.n_cols); auto blob_labels = raft::make_device_vector<IdxT, IdxT>(handle, p.n_rows); MathT* blobs_ptr; rmm::device_uvector<MathT> blobs(0, stream); if constexpr (!std::is_same_v<DataT, MathT>) { blobs.resize(p.n_rows * p.n_cols, stream); blobs_ptr = blobs.data(); } else { blobs_ptr = X.data_handle(); } raft::random::make_blobs<MathT, IdxT>(blobs_ptr, blob_labels.data_handle(), p.n_rows, p.n_cols, p.n_clusters, stream, true, nullptr, nullptr, MathT{0.1}, true, MathT{-1}, MathT{1}, (uint64_t)1234); // Convert blobs dataset to DataT if necessary if constexpr (!std::is_same_v<DataT, MathT>) { raft::linalg::unaryOp( X.data_handle(), blobs.data(), p.n_rows * p.n_cols, op.reverse_op, stream); } d_labels.resize(p.n_rows, stream); d_labels_ref.resize(p.n_rows, stream); d_centroids.resize(p.n_clusters * p.n_cols, stream); raft::linalg::unaryOp( d_labels_ref.data(), blob_labels.data_handle(), p.n_rows, raft::cast_op<LabelT>(), stream); auto X_view = raft::make_device_matrix_view<const DataT, IdxT>(X.data_handle(), X.extent(0), X.extent(1)); auto d_centroids_view = raft::make_device_matrix_view<MathT, IdxT>(d_centroids.data(), p.n_clusters, p.n_cols); auto d_labels_view = raft::make_device_vector_view<LabelT, IdxT>(d_labels.data(), p.n_rows); cuvs::cluster::kmeans_balanced::fit_predict( handle, p.kb_params, X_view, d_centroids_view, d_labels_view, op); resource::sync_stream(handle, stream); score = cuvs::stats::adjusted_rand_index( d_labels_ref.data(), d_labels.data(), p.n_rows, resource::get_cuda_stream(handle)); if (score < 1.0) { std::stringstream ss; ss << "Expected: " << raft::arr2Str(d_labels_ref.data(), 25, "d_labels_ref", stream); std::cout << (ss.str().c_str()) << '\n'; ss.str(std::string()); ss << "Actual: " << raft::arr2Str(d_labels.data(), 25, "d_labels", stream); std::cout << (ss.str().c_str()) << '\n'; std::cout << "Score = " << score << '\n'; } } void SetUp() override { basicTest(); } protected: raft::handle_t handle; cudaStream_t stream; rmm::device_uvector<LabelT> d_labels; rmm::device_uvector<LabelT> d_labels_ref; rmm::device_uvector<MathT> d_centroids; double score; }; template <typename MathT, typename IdxT> std::vector<KmeansBalancedInputs<MathT, IdxT>> get_kmeans_balanced_inputs() { std::vector<KmeansBalancedInputs<MathT, IdxT>> out; KmeansBalancedInputs<MathT, IdxT> p; p.kb_params.n_iters = 20; p.kb_params.metric = cuvs::distance::DistanceType::L2Expanded; p.tol = MathT{0.0001}; std::vector<std::tuple<size_t, size_t, size_t>> row_cols_k = {{1000, 32, 5}, {1000, 100, 20}, {10000, 32, 10}, {10000, 100, 50}, {10000, 500, 100}, {1000000, 128, 10}}; for (auto& rck : row_cols_k) { p.n_rows = static_cast<IdxT>(std::get<0>(rck)); p.n_cols = static_cast<IdxT>(std::get<1>(rck)); p.n_clusters = static_cast<IdxT>(std::get<2>(rck)); out.push_back(p); } return out; } const auto inputsf_i32 = get_kmeans_balanced_inputs<float, int>(); const auto inputsd_i32 = get_kmeans_balanced_inputs<double, int>(); const auto inputsf_i64 = get_kmeans_balanced_inputs<float, int64_t>(); const auto inputsd_i64 = get_kmeans_balanced_inputs<double, int64_t>(); #define KB_TEST(test_type, test_name, test_inputs) \ typedef RAFT_DEPAREN(test_type) test_name; \ TEST_P(test_name, Result) { ASSERT_TRUE(score == 1.0); } \ INSTANTIATE_TEST_CASE_P(KmeansBalancedTests, test_name, ::testing::ValuesIn(test_inputs)) /* * First set of tests: no conversion */ KB_TEST((KmeansBalancedTest<float, float, uint32_t, int, raft::identity_op>), KmeansBalancedTestFFU32I32, inputsf_i32); KB_TEST((KmeansBalancedTest<double, double, uint32_t, int, raft::identity_op>), KmeansBalancedTestDDU32I32, inputsd_i32); KB_TEST((KmeansBalancedTest<float, float, uint32_t, int64_t, raft::identity_op>), KmeansBalancedTestFFU32I64, inputsf_i64); KB_TEST((KmeansBalancedTest<double, double, uint32_t, int64_t, raft::identity_op>), KmeansBalancedTestDDU32I64, inputsd_i64); KB_TEST((KmeansBalancedTest<float, float, int, int, raft::identity_op>), KmeansBalancedTestFFI32I32, inputsf_i32); KB_TEST((KmeansBalancedTest<float, float, int, int64_t, raft::identity_op>), KmeansBalancedTestFFI32I64, inputsf_i64); KB_TEST((KmeansBalancedTest<float, float, int64_t, int, raft::identity_op>), KmeansBalancedTestFFI64I32, inputsf_i32); KB_TEST((KmeansBalancedTest<float, float, int64_t, int64_t, raft::identity_op>), KmeansBalancedTestFFI64I64, inputsf_i64); /* * Second set of tests: integer dataset with conversion */ template <typename DataT, typename MathT> struct i2f_scaler { // Note: with a scaling factor of 42, and generating blobs with centers between -1 and 1 with a // standard deviation of 0.1, it's statistically very unlikely that we'd overflow const raft::compose_op<raft::div_const_op<MathT>, raft::cast_op<MathT>> op{ raft::div_const_op<MathT>{42}, raft::cast_op<MathT>{}}; const raft::compose_op<raft::cast_op<DataT>, raft::mul_const_op<MathT>> reverse_op{ raft::cast_op<DataT>{}, raft::mul_const_op<MathT>{42}}; RAFT_INLINE_FUNCTION auto operator()(const DataT& x) const { return op(x); }; }; KB_TEST((KmeansBalancedTest<int8_t, float, uint32_t, int, i2f_scaler<int8_t, float>>), KmeansBalancedTestFI8U32I32, inputsf_i32); KB_TEST((KmeansBalancedTest<int8_t, double, uint32_t, int, i2f_scaler<int8_t, double>>), KmeansBalancedTestDI8U32I32, inputsd_i32); } // namespace cuvs
0
rapidsai_public_repos/cuvs/cpp
rapidsai_public_repos/cuvs/cpp/internal/CMakeLists.txt
# ============================================================================= # Copyright (c) 2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License # is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing permissions and limitations under # the License. # ============================================================================= if(BUILD_TESTS OR BUILD_MICRO_BENCH) add_library(cuvs_internal INTERFACE) target_include_directories( cuvs_internal INTERFACE "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/internal>" ) target_compile_features(cuvs_internal INTERFACE cxx_std_17 $<BUILD_INTERFACE:cuda_std_17>) endif()
0
rapidsai_public_repos/cuvs/cpp/internal/cuvs_internal
rapidsai_public_repos/cuvs/cpp/internal/cuvs_internal/neighbors/naive_knn.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuvs/distance/distance_types.hpp> #include <cuvs/spatial/knn/detail/ann_utils.cuh> #include <raft/matrix/detail/select_k.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/core/resource/cuda_stream.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/mr/device/device_memory_resource.hpp> namespace cuvs::neighbors { template <typename EvalT, typename DataT, typename IdxT> RAFT_KERNEL naive_distance_kernel(EvalT* dist, const DataT* x, const DataT* y, IdxT m, IdxT n, IdxT k, cuvs::distance::DistanceType metric) { IdxT midx = IdxT(threadIdx.x) + IdxT(blockIdx.x) * IdxT(blockDim.x); if (midx >= m) return; IdxT grid_size = IdxT(blockDim.y) * IdxT(gridDim.y); for (IdxT nidx = threadIdx.y + blockIdx.y * blockDim.y; nidx < n; nidx += grid_size) { EvalT acc = EvalT(0); for (IdxT i = 0; i < k; ++i) { IdxT xidx = i + midx * k; IdxT yidx = i + nidx * k; auto xv = EvalT(x[xidx]); auto yv = EvalT(y[yidx]); switch (metric) { case cuvs::distance::DistanceType::InnerProduct: { acc += xv * yv; } break; case cuvs::distance::DistanceType::L2SqrtExpanded: case cuvs::distance::DistanceType::L2SqrtUnexpanded: case cuvs::distance::DistanceType::L2Expanded: case cuvs::distance::DistanceType::L2Unexpanded: { auto diff = xv - yv; acc += diff * diff; } break; default: break; } } switch (metric) { case cuvs::distance::DistanceType::L2SqrtExpanded: case cuvs::distance::DistanceType::L2SqrtUnexpanded: { acc = raft::sqrt(acc); } break; default: break; } dist[midx * n + nidx] = acc; } } /** * Naive, but flexible bruteforce KNN search. * * TODO: either replace this with brute_force_knn or with distance+select_k * when either distance or brute_force_knn support 8-bit int inputs. */ template <typename EvalT, typename DataT, typename IdxT> void naive_knn(raft::resources const& handle, EvalT* dist_topk, IdxT* indices_topk, const DataT* x, const DataT* y, size_t n_inputs, size_t input_len, size_t dim, uint32_t k, cuvs::distance::DistanceType type) { rmm::mr::device_memory_resource* mr = nullptr; auto pool_guard = raft::get_pool_memory_resource(mr, 1024 * 1024); auto stream = raft::resource::get_cuda_stream(handle); dim3 block_dim(16, 32, 1); // maximum reasonable grid size in `y` direction auto grid_y = static_cast<uint16_t>(std::min<size_t>(raft::ceildiv<size_t>(input_len, block_dim.y), 32768)); // bound the memory used by this function size_t max_batch_size = std::min<size_t>(n_inputs, raft::ceildiv<size_t>(size_t(1) << size_t(27), input_len)); rmm::device_uvector<EvalT> dist(max_batch_size * input_len, stream, mr); for (size_t offset = 0; offset < n_inputs; offset += max_batch_size) { size_t batch_size = std::min(max_batch_size, n_inputs - offset); dim3 grid_dim(raft::ceildiv<size_t>(batch_size, block_dim.x), grid_y, 1); naive_distance_kernel<EvalT, DataT, IdxT><<<grid_dim, block_dim, 0, stream>>>( dist.data(), x + offset * dim, y, batch_size, input_len, dim, type); matrix::detail::select_k<EvalT, IdxT>(handle, dist.data(), nullptr, batch_size, input_len, static_cast<int>(k), dist_topk + offset * k, indices_topk + offset * k, type != cuvs::distance::DistanceType::InnerProduct, mr); } RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } } // namespace cuvs::neighbors
0
rapidsai_public_repos/cuvs/cpp/internal/cuvs_internal
rapidsai_public_repos/cuvs/cpp/internal/cuvs_internal/neighbors/refine_helper.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuvs_internal/neighbors/naive_knn.cuh> #include <raft/core/resource/cuda_stream.hpp> #include <cuvs/distance/distance_types.hpp> #include <raft/core/device_mdarray.hpp> #include <raft/core/device_mdspan.hpp> #include <raft/core/host_mdarray.hpp> #include <raft/core/host_mdspan.hpp> #include <raft/core/resources.hpp> #include <raft/random/rng.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> namespace cuvs::neighbors { template <typename IdxT> struct RefineInputs { IdxT n_queries; IdxT n_rows; IdxT dim; IdxT k; // after refinement IdxT k0; // initial k before refinement (k0 >= k). cuvs::distance::DistanceType metric; bool host_data; }; /** Helper class to allocate arrays and generate input data for refinement test and benchmark. */ template <typename DataT, typename DistanceT, typename IdxT> class RefineHelper { public: RefineHelper(const raft::resources& handle, RefineInputs<IdxT> params) : handle_(handle), stream_(resource::get_cuda_stream(handle)), p(params), dataset(handle), queries(handle), refined_distances(handle), refined_indices(handle), candidates(handle), dataset_host(handle), queries_host(handle), candidates_host(handle), refined_distances_host(handle), refined_indices_host(handle) { raft::random::RngState rng(1234ULL); dataset = raft::make_device_matrix<DataT, IdxT>(handle_, p.n_rows, p.dim); queries = raft::make_device_matrix<DataT, IdxT>(handle_, p.n_queries, p.dim); if constexpr (std::is_same<DataT, float>{}) { raft::random::uniform( handle, rng, dataset.data_handle(), dataset.size(), DataT(-10.0), DataT(10.0)); raft::random::uniform( handle, rng, queries.data_handle(), queries.size(), DataT(-10.0), DataT(10.0)); } else { raft::random::uniformInt( handle, rng, dataset.data_handle(), dataset.size(), DataT(1), DataT(20)); raft::random::uniformInt( handle, rng, queries.data_handle(), queries.size(), DataT(1), DataT(20)); } refined_distances = raft::make_device_matrix<DistanceT, IdxT>(handle_, p.n_queries, p.k); refined_indices = raft::make_device_matrix<IdxT, IdxT>(handle_, p.n_queries, p.k); // Generate candidate vectors { candidates = raft::make_device_matrix<IdxT, IdxT>(handle_, p.n_queries, p.k0); rmm::device_uvector<DistanceT> distances_tmp(p.n_queries * p.k0, stream_); naive_knn<DistanceT, DataT, IdxT>(handle_, distances_tmp.data(), candidates.data_handle(), queries.data_handle(), dataset.data_handle(), p.n_queries, p.n_rows, p.dim, p.k0, p.metric); resource::sync_stream(handle_, stream_); } if (p.host_data) { dataset_host = raft::make_host_matrix<DataT, IdxT>(p.n_rows, p.dim); queries_host = raft::make_host_matrix<DataT, IdxT>(p.n_queries, p.dim); candidates_host = raft::make_host_matrix<IdxT, IdxT>(p.n_queries, p.k0); raft::copy(dataset_host.data_handle(), dataset.data_handle(), dataset.size(), stream_); raft::copy(queries_host.data_handle(), queries.data_handle(), queries.size(), stream_); raft::copy( candidates_host.data_handle(), candidates.data_handle(), candidates.size(), stream_); refined_distances_host = raft::make_host_matrix<DistanceT, IdxT>(p.n_queries, p.k); refined_indices_host = raft::make_host_matrix<IdxT, IdxT>(p.n_queries, p.k); resource::sync_stream(handle_, stream_); } // Generate ground thruth for testing. { rmm::device_uvector<DistanceT> distances_dev(p.n_queries * p.k, stream_); rmm::device_uvector<IdxT> indices_dev(p.n_queries * p.k, stream_); naive_knn<DistanceT, DataT, IdxT>(handle_, distances_dev.data(), indices_dev.data(), queries.data_handle(), dataset.data_handle(), p.n_queries, p.n_rows, p.dim, p.k, p.metric); true_refined_distances_host.resize(p.n_queries * p.k); true_refined_indices_host.resize(p.n_queries * p.k); raft::copy(true_refined_indices_host.data(), indices_dev.data(), indices_dev.size(), stream_); raft::copy( true_refined_distances_host.data(), distances_dev.data(), distances_dev.size(), stream_); resource::sync_stream(handle_, stream_); } } public: RefineInputs<IdxT> p; const raft::resources& handle_; rmm::cuda_stream_view stream_; raft::device_matrix<DataT, IdxT, row_major> dataset; raft::device_matrix<DataT, IdxT, row_major> queries; raft::device_matrix<IdxT, IdxT, row_major> candidates; // Neighbor candidate indices raft::device_matrix<IdxT, IdxT, row_major> refined_indices; raft::device_matrix<DistanceT, IdxT, row_major> refined_distances; raft::host_matrix<DataT, IdxT, row_major> dataset_host; raft::host_matrix<DataT, IdxT, row_major> queries_host; raft::host_matrix<IdxT, IdxT, row_major> candidates_host; raft::host_matrix<IdxT, IdxT, row_major> refined_indices_host; raft::host_matrix<DistanceT, IdxT, row_major> refined_distances_host; std::vector<IdxT> true_refined_indices_host; std::vector<DistanceT> true_refined_distances_host; }; } // namespace cuvs::neighbors
0
rapidsai_public_repos/cuvs/cpp/internal/cuvs_internal
rapidsai_public_repos/cuvs/cpp/internal/cuvs_internal/matrix/select_k.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuvs/neighbors/detail/selection_faiss.cuh> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/matrix/detail/select_radix.cuh> #include <raft/matrix/detail/select_warpsort.cuh> #include <raft/matrix/select_k.cuh> namespace raft::matrix::select { struct params { size_t batch_size; size_t len; int k; bool select_min; bool use_index_input = true; bool use_same_leading_bits = false; bool use_memory_pool = true; double frac_infinities = 0.0; }; inline auto operator<<(std::ostream& os, const params& ss) -> std::ostream& { os << "params{batch_size: " << ss.batch_size; os << ", len: " << ss.len; os << ", k: " << ss.k; os << (ss.select_min ? ", asc" : ", dsc"); if (!ss.use_index_input) { os << ", no-input-index"; } if (ss.use_same_leading_bits) { os << ", same-leading-bits"; } if (ss.frac_infinities > 0) { os << ", infs: " << ss.frac_infinities; } os << "}"; return os; } enum class Algo { kPublicApi, kRadix8bits, kRadix11bits, kRadix11bitsExtraPass, kWarpAuto, kWarpImmediate, kWarpFiltered, kWarpDistributed, kWarpDistributedShm, kFaissBlockSelect }; inline auto operator<<(std::ostream& os, const Algo& algo) -> std::ostream& { switch (algo) { case Algo::kPublicApi: return os << "kPublicApi"; case Algo::kRadix8bits: return os << "kRadix8bits"; case Algo::kRadix11bits: return os << "kRadix11bits"; case Algo::kRadix11bitsExtraPass: return os << "kRadix11bitsExtraPass"; case Algo::kWarpAuto: return os << "kWarpAuto"; case Algo::kWarpImmediate: return os << "kWarpImmediate"; case Algo::kWarpFiltered: return os << "kWarpFiltered"; case Algo::kWarpDistributed: return os << "kWarpDistributed"; case Algo::kWarpDistributedShm: return os << "kWarpDistributedShm"; case Algo::kFaissBlockSelect: return os << "kFaissBlockSelect"; default: return os << "unknown enum value"; } } template <typename T, typename IdxT> void select_k_impl(const resources& handle, const Algo& algo, const T* in, const IdxT* in_idx, size_t batch_size, size_t len, int k, T* out, IdxT* out_idx, bool select_min) { auto stream = resource::get_cuda_stream(handle); switch (algo) { case Algo::kPublicApi: { auto in_extent = make_extents<int64_t>(batch_size, len); auto out_extent = make_extents<int64_t>(batch_size, k); auto in_span = make_mdspan<const T, int64_t, row_major, false, true>(in, in_extent); auto in_idx_span = make_mdspan<const IdxT, int64_t, row_major, false, true>(in_idx, in_extent); auto out_span = make_mdspan<T, int64_t, row_major, false, true>(out, out_extent); auto out_idx_span = make_mdspan<IdxT, int64_t, row_major, false, true>(out_idx, out_extent); if (in_idx == nullptr) { // NB: std::nullopt prevents automatic inference of the template parameters. return matrix::select_k<T, IdxT>( handle, in_span, std::nullopt, out_span, out_idx_span, select_min, true); } else { return matrix::select_k(handle, in_span, std::make_optional(in_idx_span), out_span, out_idx_span, select_min, true); } } case Algo::kRadix8bits: return detail::select::radix::select_k<T, IdxT, 8, 512>(in, in_idx, batch_size, len, k, out, out_idx, select_min, true, // fused_last_filter stream); case Algo::kRadix11bits: return detail::select::radix::select_k<T, IdxT, 11, 512>(in, in_idx, batch_size, len, k, out, out_idx, select_min, true, // fused_last_filter stream); case Algo::kRadix11bitsExtraPass: return detail::select::radix::select_k<T, IdxT, 11, 512>(in, in_idx, batch_size, len, k, out, out_idx, select_min, false, // fused_last_filter stream); case Algo::kWarpAuto: return detail::select::warpsort::select_k<T, IdxT>( in, in_idx, batch_size, len, k, out, out_idx, select_min, stream); case Algo::kWarpImmediate: return detail::select::warpsort:: select_k_impl<T, IdxT, detail::select::warpsort::warp_sort_immediate>( in, in_idx, batch_size, len, k, out, out_idx, select_min, stream); case Algo::kWarpFiltered: return detail::select::warpsort:: select_k_impl<T, IdxT, detail::select::warpsort::warp_sort_filtered>( in, in_idx, batch_size, len, k, out, out_idx, select_min, stream); case Algo::kWarpDistributed: return detail::select::warpsort:: select_k_impl<T, IdxT, detail::select::warpsort::warp_sort_distributed>( in, in_idx, batch_size, len, k, out, out_idx, select_min, stream); case Algo::kWarpDistributedShm: return detail::select::warpsort:: select_k_impl<T, IdxT, detail::select::warpsort::warp_sort_distributed_ext>( in, in_idx, batch_size, len, k, out, out_idx, select_min, stream); case Algo::kFaissBlockSelect: return neighbors::detail::select_k( in, in_idx, batch_size, len, out, out_idx, select_min, k, stream); } } } // namespace raft::matrix::select
0
rapidsai_public_repos/cuvs
rapidsai_public_repos/cuvs/notebooks/tutorial_ivf_pq.ipynb
import os import tempfile import cupy as cp import numpy as np import matplotlib.pyplot as plt import rmm import urllib.request import h5py from rmm.allocators.cupy import rmm_cupy_allocator from pylibraft.common import DeviceResources from pylibraft.neighbors import ivf_pq, refine from adjustText import adjust_text from utils import calc_recall, load_dataset %matplotlib inline# A clumsy helper for inspecting properties of an object def show_properties(obj): return { attr: getattr(obj, attr) for attr in dir(obj) if type(getattr(type(obj), attr)).__name__ == 'getset_descriptor' }# We'll need to load store some data in this tutorial WORK_FOLDER = os.path.join(tempfile.gettempdir(), 'raft_ivf_pq_tutorial') if not os.path.exists(WORK_FOLDER): os.makedirs(WORK_FOLDER) print("The index and data will be saved in", WORK_FOLDER)# Report the GPU in use to put the measurements into perspective !nvidia-smipool = rmm.mr.PoolMemoryResource( rmm.mr.CudaMemoryResource(), initial_pool_size=2**30 ) rmm.mr.set_current_device_resource(pool) cp.cuda.set_allocator(rmm_cupy_allocator)DATASET_URL = "http://ann-benchmarks.com/sift-128-euclidean.hdf5" f = load_dataset(DATASET_URL)metric = f.attrs['distance'] dataset = cp.array(f['train']) queries = cp.array(f['test']) gt_neighbors = cp.array(f['neighbors']) gt_distances = cp.array(f['distances']) print(f"Loaded dataset of size {dataset.shape}; metric: '{metric}'.") print(f"Number of test queries: {queries.shape[0]}")# RAFT's DeviceResources controls the GPU, cuda stream, memory policies etc. # For now, we just create a default instance. resources = DeviceResources()# First, we need to initialize the build/indexing parameters. # One of the more important parameters is the product quantisation (PQ) dim. # Effectively, this parameter says # "shrink the dataset to this dimensionality to reduce the index size". # It must be not bigger than the dataset dim, # and it should be divisible by 32 for better GPU performance. pq_dim = 1 while pq_dim * 2 < dataset.shape[1]: pq_dim = pq_dim * 2 # We'll use the ANN-benchmarks-provided metric and sensible defaults for the rest of parameters. index_params = ivf_pq.IndexParams(n_lists=1024, metric=metric, pq_dim=pq_dim) show_properties(index_params)%%time ## Build the index # This function takes a row-major either numpy or cupy (GPU) array. # Generally, it's a bit faster with GPU inputs, but the CPU version may come in handy # if the whole dataset cannot fit into GPU memory. index = ivf_pq.build(index_params, dataset, handle=resources) # This function is asynchronous so we need to explicitly synchronize the GPU before we can measure the execution time resources.sync() index%%time index_filepath = os.path.join(WORK_FOLDER, "ivf_pq.bin") ivf_pq.save(index_filepath, index) loaded_index = ivf_pq.load(index_filepath) resources.sync() indexk = 10 search_params = ivf_pq.SearchParams() show_properties(search_params)%%time distances, neighbors = ivf_pq.search(search_params, index, queries, k, handle=resources) # Sync the GPU to make sure we've got the timing right resources.sync()recall_first_try = calc_recall(neighbors, gt_neighbors) print(f"Got recall = {recall_first_try} with the default parameters (k = {k}).")%%time candidates = ivf_pq.search(search_params, index, queries, k * 2, handle=resources)[1] distances, neighbors = refine(dataset, queries, candidates, k, handle=resources) resources.sync()recall_refine2x = calc_recall(neighbors, gt_neighbors) print(f"Got recall = {recall_refine2x} with 2x refinement (k = {k}).")bench_k = np.exp2(np.arange(10)).astype(np.int32) bench_avg = np.zeros_like(bench_k, dtype=np.float32) bench_std = np.zeros_like(bench_k, dtype=np.float32) for i, k in enumerate(bench_k): r = %timeit -o ivf_pq.search(search_params, index, queries, k, handle=resources); resources.sync() bench_avg[i] = (queries.shape[0] * r.loops / np.array(r.all_runs)).mean() bench_std[i] = (queries.shape[0] * r.loops / np.array(r.all_runs)).std() fig, ax = plt.subplots(1, 1, figsize=plt.figaspect(1/2)) ax.errorbar(bench_k, bench_avg, bench_std) ax.set_xscale('log') ax.set_xticks(bench_k, bench_k) ax.set_xlabel('k') ax.grid() ax.set_ylabel('QPS');bench_probes = np.exp2(np.arange(11)).astype(np.int32) bench_qps = np.zeros_like(bench_probes, dtype=np.float32) bench_recall = np.zeros_like(bench_probes, dtype=np.float32) k = 100 for i, n_probes in enumerate(bench_probes): sp = ivf_pq.SearchParams(n_probes=n_probes) r = %timeit -o ivf_pq.search(sp, index, queries, k, handle=resources); resources.sync() bench_qps[i] = (queries.shape[0] * r.loops / np.array(r.all_runs)).mean() bench_recall[i] = calc_recall(ivf_pq.search(sp, index, queries, k, handle=resources)[1], gt_neighbors) fig, ax = plt.subplots(1, 3, figsize=plt.figaspect(1/4)) ax[0].plot(bench_probes, bench_recall) ax[0].set_xscale('log') ax[0].set_xticks(bench_probes, bench_probes) ax[0].set_xlabel('n_probes') ax[0].set_ylabel('recall') ax[0].grid() ax[1].plot(bench_probes, bench_qps) ax[1].set_xscale('log') ax[1].set_xticks(bench_probes, bench_probes) ax[1].set_xlabel('n_probes') ax[1].set_ylabel('QPS') ax[1].set_yscale('log') ax[1].grid() ax[2].plot(bench_recall, bench_qps) ax[2].set_xlabel('recall') ax[2].set_ylabel('QPS') ax[2].set_yscale('log') ax[2].grid();bench_qps_s1 = np.zeros((5,), dtype=np.float32) bench_recall_s1 = np.zeros((5,), dtype=np.float32) k = 10 n_probes = 256 search_params_32_32 = ivf_pq.SearchParams(n_probes=n_probes, internal_distance_dtype=np.float32, lut_dtype=np.float32) search_params_32_16 = ivf_pq.SearchParams(n_probes=n_probes, internal_distance_dtype=np.float32, lut_dtype=np.float16) search_params_32_08 = ivf_pq.SearchParams(n_probes=n_probes, internal_distance_dtype=np.float32, lut_dtype=np.uint8) search_params_16_16 = ivf_pq.SearchParams(n_probes=n_probes, internal_distance_dtype=np.float16, lut_dtype=np.float16) search_params_16_08 = ivf_pq.SearchParams(n_probes=n_probes, internal_distance_dtype=np.float16, lut_dtype=np.uint8) search_ps = [search_params_32_32, search_params_32_16, search_params_32_08, search_params_16_16, search_params_16_08] bench_names = ['32/32', '32/16', '32/8', '16/16', '16/8'] for i, sp in enumerate(search_ps): r = %timeit -o ivf_pq.search(sp, index, queries, k, handle=resources); resources.sync() bench_qps_s1[i] = (queries.shape[0] * r.loops / np.array(r.all_runs)).mean() bench_recall_s1[i] = calc_recall(ivf_pq.search(sp, index, queries, k, handle=resources)[1], gt_neighbors)fig, ax = plt.subplots(1, 1, figsize=plt.figaspect(1/2)) fig.suptitle( f'Effects of search parameters on QPS/recall trade-off ({DATASET_FILENAME})\n' + \ f'k = {k}, n_probes = {n_probes}, pq_dim = {pq_dim}') ax.plot(bench_recall_s1, bench_qps_s1, 'o') ax.set_xlabel('recall') ax.set_ylabel('QPS') ax.grid() annotations = [] for i, label in enumerate(bench_names): annotations.append(ax.text( bench_recall_s1[i], bench_qps_s1[i], f" {label} ", ha='center', va='center')) clutter = [ ax.text( 0.02, 0.08, 'Labels denote the bitsize of: internal_distance_dtype/lut_dtype', verticalalignment='top', bbox={'facecolor': 'white', 'edgecolor': 'grey'}, transform = ax.transAxes) ] adjust_text(annotations, objects=clutter);def search_refine(ps, ratio): k_search = k * ratio candidates = ivf_pq.search(ps, index, queries, k_search, handle=resources)[1] return candidates if ratio == 1 else refine(dataset, queries, candidates, k, handle=resources)[1] ratios = [1, 2, 4] bench_qps_sr = np.zeros((len(ratios), len(search_ps)), dtype=np.float32) bench_recall_sr = np.zeros((len(ratios), len(search_ps)), dtype=np.float32) for j, ratio in enumerate(ratios): for i, ps in enumerate(search_ps): r = %timeit -o search_refine(ps, ratio); resources.sync() bench_qps_sr[j, i] = (queries.shape[0] * r.loops / np.array(r.all_runs)).mean() bench_recall_sr[j, i] = calc_recall(search_refine(ps, ratio), gt_neighbors)fig, ax = plt.subplots(1, 1, figsize=plt.figaspect(1/2)) fig.suptitle( f'Effects of search parameters on QPS/recall trade-off ({DATASET_FILENAME})\n' + \ f'k = {k}, n_probes = {n_probes}, pq_dim = {pq_dim}') labels = [] for j, ratio in enumerate(ratios): ax.plot(bench_recall_sr[j, :], bench_qps_sr[j, :], 'o') labels.append(f"refine ratio = {ratio}") ax.legend(labels) ax.set_xlabel('recall') ax.set_ylabel('QPS') ax.grid() colors = plt.rcParams["axes.prop_cycle"].by_key()["color"] annotations = [] for j, ratio in enumerate(ratios): for i, label in enumerate(bench_names): annotations.append(ax.text( bench_recall_sr[j, i], bench_qps_sr[j, i], f" {label} ", color=colors[j], ha='center', va='center')) clutter = [ ax.text( 0.02, 0.08, 'Labels denote the bitsize of: internal_distance_dtype/lut_dtype', verticalalignment='top', bbox={'facecolor': 'white', 'edgecolor': 'grey'}, transform = ax.transAxes) ] adjust_text(annotations, objects=clutter);def search_refine(internal_distance_dtype, lut_dtype, ratio, n_probes): k_search = k * ratio ps = ivf_pq.SearchParams( n_probes=n_probes, internal_distance_dtype=internal_distance_dtype, lut_dtype=lut_dtype) candidates = ivf_pq.search(ps, index, queries, k_search, handle=resources)[1] return candidates if ratio == 1 else refine(dataset, queries, candidates, k, handle=resources)[1] search_configs = [ lambda n_probes: search_refine(np.float16, np.float16, 1, n_probes), lambda n_probes: search_refine(np.float32, np.uint8, 1, n_probes), lambda n_probes: search_refine(np.float32, np.uint8, 2, n_probes) ] search_config_names = [ '16/16', '32/8', '32/8/r2' ]n_list_variants = [100, 500, 1000, 2000, 5000] pl_ratio_variants = [500, 200, 100, 50, 10, 5] selected_search_variant = 1 search_fun = search_configs[selected_search_variant] search_label = search_config_names[selected_search_variant] bench_qps_nl = np.zeros((len(n_list_variants), len(pl_ratio_variants)), dtype=np.float32) bench_recall_nl = np.zeros_like(bench_qps_nl, dtype=np.float32) for i, n_lists in enumerate(n_list_variants): index_params = ivf_pq.IndexParams(n_lists=n_lists, metric=metric, pq_dim=pq_dim) index = ivf_pq.build(index_params, dataset, handle=resources) for j, pl_ratio in enumerate(pl_ratio_variants): n_probes = max(1, n_lists // pl_ratio) r = %timeit -o search_fun(n_probes); resources.sync() bench_qps_nl[i, j] = (queries.shape[0] * r.loops / np.array(r.all_runs)).mean() bench_recall_nl[i, j] = calc_recall(search_fun(n_probes), gt_neighbors)fig, ax = plt.subplots(1, 1, figsize=plt.figaspect(1/2)) fig.suptitle( f'Effects of n_list on QPS/recall trade-off ({DATASET_FILENAME})\n' + \ f'k = {k}, pq_dim = {pq_dim}, search = {search_label}') labels = [] for i, n_lists in enumerate(n_list_variants): ax.plot(bench_recall_nl[i, :], bench_qps_nl[i, :]) labels.append(f"n_lists = {n_lists}") ax.legend(labels) ax.set_xlabel('recall') ax.set_ylabel('QPS') ax.set_yscale('log') ax.grid()# Let's try a few build configurations. # Warning: this will take some time k = 10 n_probes_variants = [10, 20, 50, 100] n_lists = 1000 build_configs = { '64-8-subspace': ivf_pq.IndexParams(n_lists=n_lists, metric=metric, pq_dim=64, pq_bits=8, codebook_kind="subspace"), '128-8-subspace': ivf_pq.IndexParams(n_lists=n_lists, metric=metric, pq_dim=128, pq_bits=8, codebook_kind="subspace"), '128-6-subspace': ivf_pq.IndexParams(n_lists=n_lists, metric=metric, pq_dim=128, pq_bits=6, codebook_kind="subspace"), '128-6-cluster': ivf_pq.IndexParams(n_lists=n_lists, metric=metric, pq_dim=128, pq_bits=6, codebook_kind="cluster"), } bench_qps_ip = np.zeros((len(build_configs), len(search_configs), len(n_probes_variants)), dtype=np.float32) bench_recall_ip = np.zeros_like(bench_qps_ip, dtype=np.float32) for i, index_params in enumerate(build_configs.values()): index = ivf_pq.build(index_params, dataset, handle=resources) for l, search_fun in enumerate(search_configs): for j, n_probes in enumerate(n_probes_variants): r = %timeit -o search_fun(n_probes); resources.sync() bench_qps_ip[i, l, j] = (queries.shape[0] * r.loops / np.array(r.all_runs)).mean() bench_recall_ip[i, l, j] = calc_recall(search_fun(n_probes), gt_neighbors)fig, ax = plt.subplots(len(search_config_names), 1, figsize=(16, len(search_config_names)*8)) fig.suptitle( f'Effects of index parameters on QPS/recall trade-off ({DATASET_FILENAME})\n' + \ f'k = {k}, n_lists = {n_lists}') for j, search_label in enumerate(search_config_names): labels = [] for i, index_label in enumerate(build_configs.keys()): ax[j].plot(bench_recall_ip[i, j, :], bench_qps_ip[i, j, :]) labels.append(index_label) ax[j].set_title(f"search: {search_label}") ax[j].legend(labels) ax[j].set_xlabel('recall') ax[j].set_ylabel('QPS') ax[j].set_yscale('log') ax[j].grid()
0
rapidsai_public_repos/cuvs
rapidsai_public_repos/cuvs/notebooks/VectorSearch_QuestionRetrieval.ipynb
import json from sentence_transformers import SentenceTransformer, CrossEncoder, util import time import gzip import os import torch import pylibraft from pylibraft.neighbors import ivf_flat, ivf_pq pylibraft.config.set_output_as(lambda device_ndarray: device_ndarray.copy_to_host()) if not torch.cuda.is_available(): print("Warning: No GPU found. Please add GPU to your notebook")# We use the Bi-Encoder to encode all passages, so that we can use it with semantic search model_name = 'nq-distilbert-base-v1' bi_encoder = SentenceTransformer(model_name) # As dataset, we use Simple English Wikipedia. Compared to the full English wikipedia, it has only # about 170k articles. We split these articles into paragraphs and encode them with the bi-encoder wikipedia_filepath = 'data/simplewiki-2020-11-01.jsonl.gz' if not os.path.exists(wikipedia_filepath): util.http_get('http://sbert.net/datasets/simplewiki-2020-11-01.jsonl.gz', wikipedia_filepath) passages = [] with gzip.open(wikipedia_filepath, 'rt', encoding='utf8') as fIn: for line in fIn: data = json.loads(line.strip()) for paragraph in data['paragraphs']: # We encode the passages as [title, text] passages.append([data['title'], paragraph]) # If you like, you can also limit the number of passages you want to use print("Passages:", len(passages)) # To speed things up, pre-computed embeddings are downloaded. # The provided file encoded the passages with the model 'nq-distilbert-base-v1' if model_name == 'nq-distilbert-base-v1': embeddings_filepath = 'simplewiki-2020-11-01-nq-distilbert-base-v1.pt' if not os.path.exists(embeddings_filepath): util.http_get('http://sbert.net/datasets/simplewiki-2020-11-01-nq-distilbert-base-v1.pt', embeddings_filepath) corpus_embeddings = torch.load(embeddings_filepath) corpus_embeddings = corpus_embeddings.float() # Convert embedding file to float if torch.cuda.is_available(): corpus_embeddings = corpus_embeddings.to('cuda') else: # Here, we compute the corpus_embeddings from scratch (which can take a while depending on the GPU) corpus_embeddings = bi_encoder.encode(passages, convert_to_tensor=True, show_progress_bar=True)%%time params = ivf_pq.IndexParams(n_lists=150, pq_dim=96) pq_index = ivf_pq.build(params, corpus_embeddings) search_params = ivf_pq.SearchParams() def search_raft_pq(query, top_k = 5): # Encode the query using the bi-encoder and find potentially relevant passages question_embedding = bi_encoder.encode(query, convert_to_tensor=True) hits = ivf_pq.search(search_params, pq_index, question_embedding[None], top_k) # Output of top-k hits print("Input question:", query) for k in range(top_k): print("\t{:.3f}\t{}".format(hits[0][0, k], passages[hits[1][0, k]]))pq_index_mem = pq_index.pq_dim * pq_index.size * pq_index.pq_bits print("IVF-PQ memory footprint: {:.1f} MB".format(pq_index_mem / 2**20)) original_mem = corpus_embeddings.shape[0] * corpus_embeddings.shape[1] * 4 print("Original dataset: {:.1f} MB".format(original_mem / 2**20)) print("Memory saved: {:.1f}%".format(100 * (1 - pq_index_mem / original_mem)))%%time search_raft_pq(query="Who was Grace Hopper?")%%time search_raft_pq(query="Who was Alan Turing?")%%time search_raft_pq(query = "What is creating tides?")%%time params = ivf_flat.IndexParams(n_lists=150) flat_index = ivf_flat.build(params, corpus_embeddings) search_params = ivf_flat.SearchParams() def search_raft_flat(query, top_k = 5): # Encode the query using the bi-encoder and find potentially relevant passages question_embedding = bi_encoder.encode(query, convert_to_tensor=True) start_time = time.time() hits = ivf_flat.search(search_params, flat_index, question_embedding[None], top_k) end_time = time.time() # Output of top-k hits print("Input question:", query) print("Results (after {:.3f} seconds):".format(end_time - start_time)) for k in range(top_k): print("\t{:.3f}\t{}".format(hits[0][0, k], passages[hits[1][0, k]]))%%time search_raft_flat(query="Who was Grace Hopper?")%%time search_raft_flat(query="Who was Alan Turing?")%%time search_raft_flat(query = "What is creating tides?")from pylibraft.neighbors import cagra%%time params = cagra.IndexParams(intermediate_graph_degree=128, graph_degree=64) cagra_index = cagra.build(params, corpus_embeddings) search_params = cagra.SearchParams()def search_raft_cagra(query, top_k = 5): # Encode the query using the bi-encoder and find potentially relevant passages question_embedding = bi_encoder.encode(query, convert_to_tensor=True) hits = cagra.search(search_params, cagra_index, question_embedding[None], top_k) # Output of top-k hits print("Input question:", query) for k in range(top_k): print("\t{:.3f}\t{}".format(hits[0][0, k], passages[hits[1][0, k]]))%time search_raft_cagra(query="Who was Grace Hopper?")
0
rapidsai_public_repos/cuvs
rapidsai_public_repos/cuvs/notebooks/ivf_flat_example.ipynb
import os import cupy as cp import numpy as np from pylibraft.common import DeviceResources from pylibraft.neighbors import ivf_flat import matplotlib.pyplot as plt import tempfile from utils import BenchmarkTimer, calc_recall, load_datasetimport rmm from rmm.allocators.cupy import rmm_cupy_allocator mr = rmm.mr.PoolMemoryResource( rmm.mr.CudaMemoryResource(), initial_pool_size=2**30 ) rmm.mr.set_current_device_resource(mr) cp.cuda.set_allocator(rmm_cupy_allocator)# Report the GPU in use !nvidia-smiWORK_FOLDER = os.path.join(tempfile.gettempdir(), "raft_example") f = load_dataset("http://ann-benchmarks.com/sift-128-euclidean.hdf5", work_folder=WORK_FOLDER)metric = f.attrs['distance'] dataset = cp.array(f['train']) queries = cp.array(f['test']) gt_neighbors = cp.array(f['neighbors']) gt_distances = cp.array(f['distances']) itemsize = dataset.dtype.itemsize print(f"Loaded dataset of size {dataset.shape}, {dataset.size*itemsize/(1<<30):4.1f} GiB; metric: '{metric}'.") print(f"Number of test queries: {queries.shape[0]}")%%time build_params = ivf_flat.IndexParams( n_lists=1024, metric="euclidean", kmeans_trainset_fraction=0.1, kmeans_n_iters=20, add_data_on_build=True ) index = ivf_flat.build(build_params, dataset)print(index)handle = DeviceResources()%%time n_queries=10000 # n_probes is the number of clusters we select in the first (coarse) search step. This is the only hyper parameter for search. search_params = ivf_flat.SearchParams(n_probes=30) # Search 10 nearest neighbors. distances, indices = ivf_flat.search(search_params, index, cp.asarray(queries[:n_queries,:]), k=10, handle=handle) # RAFT calls are asynchronous (when handle arg is provided), we need to sync before accessing the results. handle.sync() distances, neighbors = cp.asnumpy(distances), cp.asnumpy(indices)calc_recall(neighbors, gt_neighbors)index_file = os.path.join(WORK_FOLDER, "my_ivf_flat_index.bin") ivf_flat.save(index_file, index)index = ivf_flat.load(index_file)n_probes = np.asarray([10, 20, 30, 50, 100, 200, 500, 1024]); qps = np.zeros(n_probes.shape); recall = np.zeros(n_probes.shape); for i in range(len(n_probes)): print("\nBenchmarking search with n_probes =", n_probes[i]) timer = BenchmarkTimer(reps=1, warmup=1) for rep in timer.benchmark_runs(): distances, neighbors = ivf_flat.search( ivf_flat.SearchParams(n_probes=n_probes[i]), index, cp.asarray(queries), k=10, handle=handle, ) handle.sync() recall[i] = calc_recall(cp.asnumpy(neighbors), gt_neighbors) print("recall", recall[i]) timings = np.asarray(timer.timings) avg_time = timings.mean() std_time = timings.std() qps[i] = queries.shape[0] / avg_time print("Average search time: {0:7.3f} +/- {1:7.3} s".format(avg_time, std_time)) print("Queries per second (QPS): {0:8.0f}".format(qps[i]))fig = plt.figure(figsize=(12,3)) ax = fig.add_subplot(131) ax.plot(n_probes, recall,'o-') #ax.set_xticks(bench_k, bench_k) ax.set_xlabel('n_probes') ax.grid() ax.set_ylabel('recall (@k=10)') ax = fig.add_subplot(132) ax.plot(n_probes, qps,'o-') #ax.set_xticks(bench_k, bench_k) ax.set_xlabel('n_probes') ax.grid() ax.set_ylabel('queries per second'); ax = fig.add_subplot(133) ax.plot(recall, qps,'o-') #ax.set_xticks(bench_k, bench_k) ax.set_xlabel('recall') ax.grid() ax.set_ylabel('queries per second'); #ax.set_yscale('log')%%time build_params = ivf_flat.IndexParams( n_lists=100, metric="euclidean", kmeans_trainset_fraction=1, kmeans_n_iters=20, add_data_on_build=True ) index = ivf_flat.build(build_params, dataset, handle=handle)%%time n_queries=10000 search_params = ivf_flat.SearchParams(n_probes=10) # Search 10 nearest neighbors. distances, indices = ivf_flat.search(search_params, index, cp.asarray(queries[:n_queries,:]), k=10, handle=handle) handle.sync() distances, neighbors = cp.asnumpy(distances), cp.asnumpy(indices)calc_recall(neighbors, gt_neighbors)%%time build_params = ivf_flat.IndexParams( n_lists=100, metric="sqeuclidean", kmeans_trainset_fraction=0.1, kmeans_n_iters=20 ) index = ivf_flat.build(build_params, dataset, handle=handle)search_params = ivf_flat.SearchParams(n_probes=10) distances, indices = ivf_flat.search(search_params, index, cp.asarray(queries[:n_queries,:]), k=10, handle=handle) handle.sync() distances, neighbors = cp.asnumpy(distances), cp.asnumpy(indices) calc_recall(neighbors, gt_neighbors)# subsample the dataset n_train = 10000 train_set = dataset[cp.random.choice(dataset.shape[0], n_train, replace=False),:] # build using training set build_params = ivf_flat.IndexParams( n_lists=1024, metric="sqeuclidean", kmeans_trainset_fraction=1, kmeans_n_iters=20, add_data_on_build=False ) index = ivf_flat.build(build_params, train_set) print("Index before adding vectors", index) ivf_flat.extend(index, dataset, cp.arange(dataset.shape[0], dtype=cp.int64)) print("Index after adding vectors", index)
0
rapidsai_public_repos/cuvs
rapidsai_public_repos/cuvs/notebooks/utils.py
# # Copyright (c) 2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cupy as cp import h5py import os import tempfile import time import urllib ## Check the quality of the prediction (recall) def calc_recall(found_indices, ground_truth): found_indices = cp.asarray(found_indices) bs, k = found_indices.shape if bs != ground_truth.shape[0]: raise RuntimeError( "Batch sizes do not match {} vs {}".format( bs, ground_truth.shape[0] ) ) if k > ground_truth.shape[1]: raise RuntimeError( "Not enough indices in the ground truth ({} > {})".format( k, ground_truth.shape[1] ) ) n = 0 # Go over the batch for i in range(bs): # Note, ivf-pq does not guarantee the ordered input, hence the use of intersect1d n += cp.intersect1d(found_indices[i, :k], ground_truth[i, :k]).size recall = n / found_indices.size return recall class BenchmarkTimer: """Provides a context manager that runs a code block `reps` times and records results to the instance variable `timings`. Use like: .. code-block:: python timer = BenchmarkTimer(rep=5) for _ in timer.benchmark_runs(): ... do something ... print(np.min(timer.timings)) This class is borrowed from the rapids/cuml benchmark suite """ def __init__(self, reps=1, warmup=0): self.warmup = warmup self.reps = reps self.timings = [] def benchmark_runs(self): for r in range(self.reps + self.warmup): t0 = time.time() yield r t1 = time.time() self.timings.append(t1 - t0) if r >= self.warmup: self.timings.append(t1 - t0) def load_dataset(dataset_url, work_folder=None): """Download dataset from url. It is expected that the dataset contains a hdf5 file in ann-benchmarks format Parameters ---------- dataset_url address of hdf5 file work_folder name of the local folder to store the dataset """ dataset_url = "http://ann-benchmarks.com/sift-128-euclidean.hdf5" dataset_filename = dataset_url.split("/")[-1] # We'll need to load store some data in this tutorial if work_folder is None: work_folder = os.path.join(tempfile.gettempdir(), "raft_example") if not os.path.exists(work_folder): os.makedirs(work_folder) print("The index and data will be saved in", work_folder) ## download the dataset dataset_path = os.path.join(work_folder, dataset_filename) if not os.path.exists(dataset_path): urllib.request.urlretrieve(dataset_url, dataset_path) f = h5py.File(dataset_path, "r") return f
0
rapidsai_public_repos/cuvs
rapidsai_public_repos/cuvs/.devcontainer/README.md
# RAFT Development Containers This directory contains [devcontainer configurations](https://containers.dev/implementors/json_reference/) for using VSCode to [develop in a container](https://code.visualstudio.com/docs/devcontainers/containers) via the `Remote Containers` [extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) or [GitHub Codespaces](https://github.com/codespaces). This container is a turnkey development environment for building and testing the RAFT C++ and Python libraries. ## Table of Contents * [Prerequisites](#prerequisites) * [Host bind mounts](#host-bind-mounts) * [Launch a Dev Container](#launch-a-dev-container) ## Prerequisites * [VSCode](https://code.visualstudio.com/download) * [VSCode Remote Containers extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) ## Host bind mounts By default, the following directories are bind-mounted into the devcontainer: * `${repo}:/home/coder/cuvs` * `${repo}/../.aws:/home/coder/.aws` * `${repo}/../.local:/home/coder/.local` * `${repo}/../.cache:/home/coder/.cache` * `${repo}/../.conda:/home/coder/.conda` * `${repo}/../.config:/home/coder/.config` This ensures caches, configurations, dependencies, and your commits are persisted on the host across container runs. ## Launch a Dev Container To launch a devcontainer from VSCode, open the RAFT repo and select the "Reopen in Container" button in the bottom right:<br/><img src="https://user-images.githubusercontent.com/178183/221771999-97ab29d5-e718-4e5f-b32f-2cdd51bba25c.png"/> Alternatively, open the VSCode command palette (typically `cmd/ctrl + shift + P`) and run the "Rebuild and Reopen in Container" command. ## Using the devcontainer On startup, the devcontainer creates or updates the conda/pip environment using `cuvs/dependencies.yaml`. The container includes convenience functions to clean, configure, and build the various RAFT components: ```shell $ clean-cuvs-cpp # only cleans the C++ build dir $ clean-pylibcuvs-python # only cleans the Python build dir $ clean-cuvs # cleans both C++ and Python build dirs $ configure-cuvs-cpp # only configures cuvs C++ lib $ build-cuvs-cpp # only builds cuvs C++ lib $ build-pylibcuvs-python # only builds cuvs Python lib $ build-cuvs # builds both C++ and Python libs ``` * The C++ build script is a small wrapper around `cmake -S ~/cuvs/cpp -B ~/cuvs/cpp/build` and `cmake --build ~/cuvs/cpp/build` * The Python build script is a small wrapper around `pip install --editable ~/cuvs/cpp` Unlike `build.sh`, these convenience scripts *don't* install the libraries after building them. Instead, they automatically inject the correct arguments to build the C++ libraries from source and use their build dirs as package roots: ```shell $ cmake -S ~/cuvs/cpp -B ~/cuvs/cpp/build $ CMAKE_ARGS="-Dcuvs_ROOT=~/cuvs/cpp/build" \ # <-- this argument is automatic pip install -e ~/cuvs/cpp ```
0
rapidsai_public_repos/cuvs
rapidsai_public_repos/cuvs/.devcontainer/Dockerfile
# syntax=docker/dockerfile:1.5 ARG BASE ARG PYTHON_PACKAGE_MANAGER=conda FROM ${BASE} as pip-base ENV DEFAULT_VIRTUAL_ENV=rapids FROM ${BASE} as conda-base ENV DEFAULT_CONDA_ENV=rapids FROM ${PYTHON_PACKAGE_MANAGER}-base ARG CUDA ENV CUDAARCHS="RAPIDS" ENV CUDA_VERSION="${CUDA_VERSION:-${CUDA}}" ARG PYTHON_PACKAGE_MANAGER ENV PYTHON_PACKAGE_MANAGER="${PYTHON_PACKAGE_MANAGER}" ENV PYTHONSAFEPATH="1" ENV PYTHONUNBUFFERED="1" ENV PYTHONDONTWRITEBYTECODE="1" ENV SCCACHE_REGION="us-east-2" ENV SCCACHE_BUCKET="rapids-sccache-devs" ENV VAULT_HOST="https://vault.ops.k8s.rapids.ai" ENV HISTFILE="/home/coder/.cache/._bash_history"
0
rapidsai_public_repos/cuvs/.devcontainer
rapidsai_public_repos/cuvs/.devcontainer/cuda11.8-pip/devcontainer.json
{ "build": { "context": "${localWorkspaceFolder}/.devcontainer", "dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile", "args": { "CUDA": "11.8", "PYTHON_PACKAGE_MANAGER": "pip", "BASE": "rapidsai/devcontainers:24.02-cpp-llvm16-cuda11.8-ubuntu22.04" } }, "hostRequirements": {"gpu": "optional"}, "features": { "ghcr.io/rapidsai/devcontainers/features/ucx:24.2": {"version": "1.14.1"}, "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.2": {} }, "overrideFeatureInstallOrder": [ "ghcr.io/rapidsai/devcontainers/features/ucx", "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils" ], "initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config/pip,local/share/${localWorkspaceFolderBasename}-cuda11.8-venvs}"], "postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"], "workspaceFolder": "/home/coder", "workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/cuvs,type=bind,consistency=consistent", "mounts": [ "source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.local/share/${localWorkspaceFolderBasename}-cuda11.8-venvs,target=/home/coder/.local/share/venvs,type=bind,consistency=consistent" ], "customizations": { "vscode": { "extensions": [ "ms-python.flake8", "nvidia.nsight-vscode-edition" ] } } }
0
rapidsai_public_repos/cuvs/.devcontainer
rapidsai_public_repos/cuvs/.devcontainer/cuda12.0-pip/devcontainer.json
{ "build": { "context": "${localWorkspaceFolder}/.devcontainer", "dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile", "args": { "CUDA": "12.0", "PYTHON_PACKAGE_MANAGER": "pip", "BASE": "rapidsai/devcontainers:24.02-cpp-llvm16-cuda12.0-ubuntu22.04" } }, "hostRequirements": {"gpu": "optional"}, "features": { "ghcr.io/rapidsai/devcontainers/features/ucx:24.2": {"version": "1.14.1"}, "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.2": {} }, "overrideFeatureInstallOrder": [ "ghcr.io/rapidsai/devcontainers/features/ucx", "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils" ], "initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config/pip,local/share/${localWorkspaceFolderBasename}-cuda12.0-venvs}"], "postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"], "workspaceFolder": "/home/coder", "workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/cuvs,type=bind,consistency=consistent", "mounts": [ "source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.local/share/${localWorkspaceFolderBasename}-cuda12.0-venvs,target=/home/coder/.local/share/venvs,type=bind,consistency=consistent" ], "customizations": { "vscode": { "extensions": [ "ms-python.flake8", "nvidia.nsight-vscode-edition" ] } } }
0
rapidsai_public_repos/cuvs/.devcontainer
rapidsai_public_repos/cuvs/.devcontainer/cuda12.0-conda/devcontainer.json
{ "build": { "context": "${localWorkspaceFolder}/.devcontainer", "dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile", "args": { "CUDA": "12.0", "PYTHON_PACKAGE_MANAGER": "conda", "BASE": "rapidsai/devcontainers:24.02-cpp-mambaforge-ubuntu22.04" } }, "hostRequirements": {"gpu": "optional"}, "features": { "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.2": {} }, "overrideFeatureInstallOrder": [ "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils" ], "initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config,conda/pkgs,conda/${localWorkspaceFolderBasename}-cuda12.0-envs}"], "postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"], "workspaceFolder": "/home/coder", "workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/cuvs,type=bind,consistency=consistent", "mounts": [ "source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.conda/pkgs,target=/home/coder/.conda/pkgs,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.conda/${localWorkspaceFolderBasename}-cuda12.0-envs,target=/home/coder/.conda/envs,type=bind,consistency=consistent" ], "customizations": { "vscode": { "extensions": [ "ms-python.flake8", "nvidia.nsight-vscode-edition" ] } } }
0
rapidsai_public_repos/cuvs/.devcontainer
rapidsai_public_repos/cuvs/.devcontainer/cuda11.8-conda/devcontainer.json
{ "build": { "context": "${localWorkspaceFolder}/.devcontainer", "dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile", "args": { "CUDA": "11.8", "PYTHON_PACKAGE_MANAGER": "conda", "BASE": "rapidsai/devcontainers:24.02-cpp-llvm16-cuda11.8-mambaforge-ubuntu22.04" } }, "hostRequirements": {"gpu": "optional"}, "features": { "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.2": {} }, "overrideFeatureInstallOrder": [ "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils" ], "initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config,conda/pkgs,conda/${localWorkspaceFolderBasename}-cuda11.8-envs}"], "postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"], "workspaceFolder": "/home/coder", "workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/cuvs,type=bind,consistency=consistent", "mounts": [ "source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.conda/pkgs,target=/home/coder/.conda/pkgs,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.conda/${localWorkspaceFolderBasename}-cuda11.8-envs,target=/home/coder/.conda/envs,type=bind,consistency=consistent" ], "customizations": { "vscode": { "extensions": [ "ms-python.flake8", "nvidia.nsight-vscode-edition" ] } } }
0
rapidsai_public_repos/cuvs
rapidsai_public_repos/cuvs/docs/README.md
# Building Documentation ## Building locally: #### [Build and install RAFT](source/build.md) #### Generate the docs ```shell script bash build.sh docs ``` #### Once the process finishes, documentation can be found in build/html ```shell script xdg-open build/html/index.html` ```
0
rapidsai_public_repos/cuvs
rapidsai_public_repos/cuvs/docs/make.bat
@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=source set BUILDDIR=build set SPHINXPROJ=cuML if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% :end popd
0
rapidsai_public_repos/cuvs
rapidsai_public_repos/cuvs/docs/Makefile
# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = RAFT SOURCEDIR = source BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help -v "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
0
rapidsai_public_repos/cuvs/docs
rapidsai_public_repos/cuvs/docs/source/pylibraft_api.rst
~~~~~~~~~~ Python API ~~~~~~~~~~ .. _api: .. toctree:: :maxdepth: 4 pylibraft_api/cluster.rst pylibraft_api/common.rst pylibraft_api/distance.rst pylibraft_api/matrix.rst pylibraft_api/neighbors.rst pylibraft_api/random.rst
0
rapidsai_public_repos/cuvs/docs
rapidsai_public_repos/cuvs/docs/source/cpp_api.rst
~~~~~~~ C++ API ~~~~~~~ .. _api: .. toctree:: :maxdepth: 4 cpp_api/cluster.rst cpp_api/distance.rst cpp_api/neighbors.rst cpp_api/sparse.rst cpp_api/stats.rst
0
rapidsai_public_repos/cuvs/docs
rapidsai_public_repos/cuvs/docs/source/vector_search_tutorial.md
# Vector Search in C++ Tutorial RAFT has several important algorithms for performing vector search on the GPU and this tutorial walks through the primary vector search APIs from start to finish to provide a reference for quick setup and C++ API usage. This tutorial assumes RAFT has been installed and/or added to your build so that you are able to compile and run RAFT code. If not done already, please follow the [build and install instructions](build.md) and consider taking a look at the [example c++ template project](https://github.com/rapidsai/raft/tree/HEAD/cpp/template) for ready-to-go examples that you can immediately build and start playing with. Also take a look at RAFT's library of [reproducible vector search benchmarks](raft_ann_benchmarks.md) to run benchmarks that compare RAFT against other state-of-the-art nearest neighbors algorithms at scale. For more information about the various APIs demonstrated in this tutorial, along with comprehensive usage examples of all the APIs offered by RAFT, please refer to the [RAFT's C++ API Documentation](https://docs.rapids.ai/api/raft/nightly/cpp_api/). ## Step 1: Starting off with RAFT ### CUDA Development? If you are reading this tuturial then you probably know about CUDA and its relationship to general-purpose GPU computing (GPGPU). You probably also know about Nvidia GPUs but might not necessarily be familiar with the programming model nor GPU computing. The good news is that extensive knowledge of CUDA and GPUs are not needed in order to get started with or build applications with RAFT. RAFT hides away most of the complexities behind simple single-threaded stateless functions that are inherently asynchronous, meaning the result of a computation isn't necessarily read to be used when the function executes and control is given back to the user. The functions are, however, allowed to be chained together in a sequence of calls that don't need to wait for subsequent computations to complete in order to continue execution. In fact, the only time you need to wait for the computation to complete is when you are ready to use the result. A common structure you will encounter when using RAFT is a `raft::device_resources` object. This object is a container for important resources for a single GPU that might be needed during computation. If communicating with multiple GPUs, multiple `device_resources` might be needed, one for each GPU. `device_resources` contains several methods for managing its state but most commonly, you'll call the `sync_stream()` to guarantee all recently submitted computation has completed (as mentioned above.) A simple example of using `raft::device_resources` in RAFT: ```c++ #include <raft/core/device_resources.hpp> raft::device_resources res; // Call a bunch of RAFT functions in sequence... res.sync_stream() ``` ### Host vs Device Memory We differentiate between two different types of memory. `host` memory is your traditional RAM memory that is primarily accessible by applications on the CPU. `device` memory, on the other hand, is what we call the special memory on the GPU, which is not accessible from the CPU. In order to access host memory from the GPU, it needs to be explicitly copied to the GPU and in order to access device memory by the CPU, it needs to be explicitly copied there. We have several mechanisms available for allocating and managing the lifetime of device memory on the stack so that we don't need to explicitly allocate and free pointers on the heap. For example, instead of a `std::vector` for host memory, we can use `rmm::device_uvector` on the device. The following function will copy an array from host memory to device memory: ```c++ #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <vector> raft::device_resources res; std::vector<int> my_host_vector = {0, 1, 2, 3, 4}; rmm::device_uvector<int> my_device_vector(my_host_vector.size(), res.get_stream()); raft::copy(my_device_vector.data(), my_host_vector.data(), my_host_vector.size(), res.get_stream()); ``` Since a stream is involved in the copy operation above, RAFT functions can be invoked immediately so long as the same `device_resources` instances is used (or, more specifically, the same main stream from the `devices_resources`.) As you might notice in the example above, `res.get_stream()` can be used to extract the main stream from a `device_resources` instance. ### Multi-dimensional data representation `rmm::device_uvector` is a great mechanism for allocating and managing a chunk of device memory. While it's possible to use a single array to represent objects in higher dimensions like matrices, it lacks the means to pass that information along. For example, in addition to knowing that we have a 2d structure, we would need to know the number of rows, the number of columns, and even whether we read the columns or rows first (referred to as column- or row-major respectively). For this reason, RAFT relies on the `mdspan` standard, which was composed specifically for this purpose. To be even more, `mdspan` itself doesn't actually allocate or own any data on host or device because it's just a view over an existing memory on host device. The `mdspan` simply gives us a way to represent multi-dimensional data so we can pass along the needed metadata to our APIs. Even more powerful is that we can design functions that only accept a matrix of `float` in device memory that is laid out in row-major format. The memory-owning counterpart to the `mdspan` is the `mdarray` and the `mdarray` can allocate memory on device or host and carry along with it the metadata about its shape and layout. An `mdspan` can be produced from an `mdarray` for invoking RAFT APIs with `mdarray.view()`. They also follow similar paradigms to the STL, where we represent an immutable `mdspan` of `int` using `mdspan<const int>` instead of `const mdspan<int>` to ensure it's the type carried along by the `mdspan` that's not allowed to change. Many RAFT functions require `mdspan<const T>` to represent immutable input data and there's no implicit conversion between `mdspan<T>` and `mdspan<const T>` we use `raft::make_const_mdspan()` to alleviate the pain of constructing a new `mdspan` to invoke these functions. The following example demonstrates how to create `mdarray` matrices in both device and host memory, copy one to the other, and create mdspans out of them: ```c++ #include <raft/core/device_mdarray.hpp> #include <raft/core/host_mdarray.hpp> #include <raft/core/copy.hpp> raft::device_resources res; int n_rows = 10; int n_cols = 10; auto device_matrix = raft::make_device_matrix<float>(res, n_rows, n_cols); auto host_matrix = raft::make_host_matrix<float>(res, n_rows, n_cols); // Set the diagonal to 1 for(int i = 0; i < n_rows; i++) { host_matrix(i, i) = 1; } raft::copy(res, device_matrix.view(), host_matrix.view()); ``` ## Step 2: Generate some data Let's build upon the fundamentals from the prior section and actually invoke some of RAFT's computational APIs on the device. A good starting point is data generation. ```c++ #include <raft/core/device_mdarray.hpp> #include <raft/random/make_blobs.cuh> raft::device_resources res; int n_rows = 10000; int n_cols = 10000; auto dataset = raft::make_device_matrix<float, int>(res, n_rows, n_cols); auto labels = raft::make_device_vector<float, int>(res, n_rows); raft::make_blobs(res, dataset.view(), labels.view()); ``` That's it. We've now generated a random 10kx10k matrix with points that cleanly separate into Gaussian clusters, along with a vector of cluster labels for each of the data points. Notice the `cuh` extension in the header file include for `make_blobs`. This signifies to us that this file contains CUDA device functions like kernel code so the CUDA compiler, `nvcc` is needed in order to compile any code that uses it. Generally, any source files that include headers with a `cuh` extension use the `.cu` extension instead of `.cpp`. The rule here is that `cpp` source files contain code which can be compiled with a C++ compiler like `g++` while `cu` files require the CUDA compiler. Since the `make_blobs` code generates the random dataset on the GPU device, we didn't need to do any host to device copies in this one. `make_blobs` is also asynchronous, so if we don't need to copy and use the data in host memory right away, we can continue calling RAFT functions with the `device_resources` instance and the data transformations will all be scheduled on the same stream. ## Step 3: Using brute-force indexes ### Build brute-force index Consider the `(10k, 10k)` shaped random matrix we generated in the previous step. We want to be able to find the k-nearest neighbors for all points of the matrix, or what we refer to as the all-neighbors graph, which means finding the neighbors of all data points within the same matrix. ```c++ #include <raft/neighbors/brute_force.cuh> raft::device_resources res; // set number of neighbors to search for int const k = 64; auto bfknn_index = raft::neighbors::brute_force::build(res, raft::make_const_mdspan(dataset.view())); ``` ### Query brute-force index ```c++ // using matrix `dataset` from previous example auto search = raft::make_const_mdspan(dataset.view()); // Indices and Distances are of dimensions (n, k) // where n is number of rows in the search matrix auto reference_indices = raft::make_device_matrix<int, int>(search.extent(0), k); // stores index of neighbors auto reference_distances = raft::make_device_matrix<float, int>(search.extent(0), k); // stores distance to neighbors raft::neighbors::brute_force::search(res, bfknn_index, search, raft::make_const_mdspan(indices.view()), raft::make_const_mdspan(distances.view())); ``` We have established several things here by building a flat index. Now we know the exact 64 neighbors of all points in the matrix, and this algorithm can be generally useful in several ways: 1. Creating a baseline to compare against when building an approximate nearest neighbors index. 2. Directly using the brute-force algorithm when accuracy is more important than speed of computation. Don't worry, our implementation is still the best in-class and will provide not only significant speedups over other brute force methods, but also be quick relatively when the matrices are small! ## Step 4: Using the ANN indexes ### Build a CAGRA index Next we'll train an ANN index. We'll use our graph-based CAGRA algorithm for this example but the other index types use a very similar pattern. ```c++ #include <raft/neighbors/cagra.cuh> raft::device_resources res; // use default index parameters cagra::index_params index_params; auto index = cagra::build<float, uint32_t>(res, index_params, dataset); ``` ### Query the CAGRA index Now that we've trained a CAGRA index, we can query it by first allocating our output `mdarray` objects and passing the trained index model into the search function. ```c++ // create output arrays auto indices = raft::make_device_matrix<uint32_t>(res, n_rows, k); auto distances = raft::make_device_matrix<float>(res, n_rows, k); // use default search parameters cagra::search_params search_params; // search K nearest neighbors cagra::search<float, uint32_t>( res, search_params, index, search, indices.view(), distances.view()); ``` ## Step 7: Evaluate neighborhood quality In step 3 we built a flat index and queried for exact neighbors while in step 4 we build an ANN index and queried for approximate neighbors. How do you quickly figure out the quality of our approximate neighbors and whether it's in an acceptable range based on your needs? Just compute the `neighborhood_recall` which gives a single value in the range [0, 1]. Closer the value to 1, higher the quality of the approximation. ```c++ #include <raft/stats/neighborhood_recall.cuh> raft::device_resources res; // Assuming matrices as type raft::device_matrix_view and variables as // indices : approximate neighbor indices // reference_indices : exact neighbor indices // distances : approximate neighbor distances // reference_distances : exact neighbor distances // We want our `neighborhood_recall` value in host memory float const recall_scalar = 0.0; auto recall_value = raft::make_host_scalar(recall_scalar); raft::stats::neighborhood_recall(res, raft::make_const_mdspan(indices.view()), raft::make_const_mdspan(reference_indices.view()), recall_value.view(), raft::make_const_mdspan(distances), raft::make_const_mdspan(reference_distances)); res.sync_stream(); ``` Notice we can run invoke the functions for index build and search for both algorithms, one right after the other, because we don't need to access any outputs from the algorithms in host memory. We will need to synchronize the stream on the `raft::device_resources` instance before we can read the result of the `neighborhood_recall` computation, though. Similar to a Numpy array, when we use a `host_scalar`, we are really using a multi-dimensional structure that contains only a single dimension, and further a single element. We can use element indexing to access the resulting element directly. ```c++ std::cout << recall_value(0) << std::endl; ``` While it may seem like unnecessary additional work to wrap the result in a `host_scalar` mdspan, this API choice is made intentionally to support the possibility of also receiving the result as a `device_scalar` so that it can be used directly on the device for follow-on computations without having to incur the synchronization or transfer cost of bringing the result to host. This pattern becomes even more important when the result is being computed in a loop, such as an iterative solver, and the cost of synchronization and device-to-host (d2h) transfer becomes very expensive. ## Advanced features The following sections present some advanced features that we have found can be useful for squeezing more utilization out of GPU hardware. As you've seen in this tutorial, RAFT provides several very useful tools and building blocks for developing accelerated applications beyond vector search capabilities. ### Stream pools Within each CPU thread, CUDA uses `streams` to submit asynchronous work. You can think of a stream as a queue. Each stream can submit work to the GPU independently of other streams but work submitted within each stream is queued and executed in the order in which it was submitted. Similar to how we can use thread pools to bound the parallelism of CPU threads, we can use CUDA stream pools to bound the amount of concurrent asynchronous work that can be scheduled on a GPU. Each instance of `device_resources` has a main stream, but can also create a stream pool. For a single CPU thread, multiple different instances of `device_resources` can be created with different main streams and used to invoke a series of RAFT functions concurrently on the same or different GPU devices, so long as the target devices have available resources to perform the work. Once a device is saturated, queued work on streams will be scheduled and wait for a chance to do more work. During this time the streams are waiting, the CPU thread will still continue its own execution asynchronously unless `sync_stream_pool()` is called, causing the thread to block and wait for the thread pools to complete. Also, beware that before splitting GPU work onto multiple different concurrent streams, it can often be important to wait for the main stream in the `device_resources`. This can be done with `wait_stream_pool_on_stream()`. To summarize, if wanting to execute multiple different streams in parallel, we would often use a stream pool like this: ```c++ #include <raft/core/device_resources.hpp> #include <rmm/cuda_stream_pool.hpp> #include <rmm/cuda_stream.hpp> int n_streams = 5; rmm::cuda_stream stream; std::shared_ptr<rmm::cuda_stream_pool> stream_pool(5) raft::device_resources res(stream.view(), stream_pool); // Submit some work on the main stream... res.wait_stream_pool_on_stream() for(int i = 0; i < n_streams; ++i) { rmm::cuda_stream_view stream_from_pool = res.get_next_usable_stream(); raft::device_resources pool_res(stream_from_pool); // Submit some work with pool_res... } res.sync_stream_pool(); ``` ### Device resources manager In multi-threaded applications, it is often useful to create a set of `raft::device_resources` objects on startup to avoid the overhead of re-initializing underlying resources every time a `raft::device_resources` object is needed. To help simplify this common initialization logic, RAFT provides a `raft::device_resources_manager` to handle this for downstream applications. On startup, the application can specify certain limits on the total resource consumption of the `raft::device_resources` objects that will be generated: ```c++ #include <raft/core/device_resources_manager.hpp> void initialize_application() { // Set the total number of CUDA streams to use on each GPU across all CPU // threads. If this method is not called, the default stream per thread // will be used. raft::device_resources_manager::set_streams_per_device(16); // Create a memory pool with given max size in bytes. Passing std::nullopt will allow // the pool to grow to the available memory of the device. raft::device_resources_manager::set_max_mem_pool_size(std::nullopt); // Set the initial size of the memory pool in bytes. raft::device_resources_manager::set_init_mem_pool_size(16000000); // If neither of the above methods are called, no memory pool will be used } ``` While this example shows some commonly used settings, `raft::device_resources_manager` provides support for several other resource options and constraints, including options to initialize entire stream pools that can be used by an individual `raft::device_resources` object. After this initialization method is called, the following function could be called from any CPU thread: ```c++ void foo() { raft::device_resources const& res = raft::device_resources_manager::get_device_resources(); // Submit some work with res res.sync_stream(); } ``` If any `raft::device_resources_manager` setters are called _after_ the first call to `raft::device_resources_manager::get_device_resources()`, these new settings are ignored, and a warning will be logged. If a thread calls `raft::device_resources_manager::get_device_resources()` multiple times, it is guaranteed to access the same underlying `raft::device_resources` object every time. This can be useful for chaining work in different calls on the same thread without keeping a persistent reference to the resources object. ### Device memory resources The RAPIDS software ecosystem makes heavy use of the [RAPIDS Memory Manager](https://github.com/rapidsai/rmm) (RMM) to enable zero-copy sharing of device memory across various GPU-enabled libraries such as PyTorch, Jax, Tensorflow, and FAISS. A really powerful feature of RMM is the ability to set a memory resource, such as a pooled memory resource that allocates a block of memory up front to speed up subsequent smaller allocations, and have all the libraries in the GPU ecosystem recognize and use that same memory resource for all of their memory allocations. As an example, the following code snippet creates a `pool_memory_resource` and sets it as the default memory resource, which means all other libraries that use RMM will now allocate their device memory from this same pool: ```c++ #include <rmm/mr/device/pool_memory_resource.hpp> rmm::mr::cuda_memory_resource cuda_mr; // Construct a resource that uses a coalescing best-fit pool allocator rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource> pool_mr{&cuda_mr}; rmm::mr::set_current_device_resource(&pool_mr); // Updates the current device resource pointer to `pool_mr` ``` The `raft::device_resources` object will now also use the `rmm::current_device_resource`. This isn't limited to C++, however. Often a user will be interacting with PyTorch, RAPIDS, or Tensorflow through Python and so they can set and use RMM's `current_device_resource` [right in Python](https://github.com/rapidsai/rmm#using-rmm-in-python-code). ### Workspace memory resource As mentioned above, `raft::device_resources` will use `rmm::current_device_resource` by default for all memory allocations. However, there are times when a particular algorithm might benefit from using a different memory resource such as a `managed_memory_resource`, which creates a unified memory space between device and host memory, paging memory in and out of device as needed. Most of RAFT's algorithms allocate temporary memory as needed to perform their computations and we can control the memory resource used for these temporary allocations through the `workspace_resource` in the `raft::device_resources` instance. For some applications, the `managed_memory_resource`, can enable a memory space that is larger than the GPU, thus allowing a natural spilling to host memory when needed. This isn't always the best way to use managed memory, though, as it can quickly lead to thrashing and severely impact performance. Still, when it can be used, it provides a very powerful tool that can also avoid out of memory errors when enough host memory is available. The following creates a managed memory allocator and set it as the `workspace_resource` of the `raft::device_resources` instance: ```c++ #include <raft/core/device_resources.hpp> #include <rmm/mr/device/managed_memory_resource.hpp> std::shared_ptr<rmm::mr::managed_memory_resource> managed_resource; raft::device_resource res(managed_resource); ``` The `workspace_resource` uses an `rmm::mr::limiting_resource_adaptor`, which limits the total amount of allocation possible. This allows RAFT algorithms to work within the confines of the memory constraints imposed by the user so that things like batch sizes can be automatically set to reasonable values without exceeding the allotted memory. By default, this limit restricts the memory allocation space for temporary workspace buffers to the memory available on the device. The below example specifies the total number of bytes that RAFT can use for temporary workspace allocations to 3GB: ```c++ #include <raft/core/device_resources.hpp> #include <rmm/mr/device/managed_memory_resource.hpp> #include <optional> std::shared_ptr<rmm::mr::managed_memory_resource> managed_resource; raft::device_resource res(managed_resource, std::make_optional<std::size_t>(3 * 1024^3)); ```
0
rapidsai_public_repos/cuvs/docs
rapidsai_public_repos/cuvs/docs/source/ann_benchmarks_build.md
### Dependencies CUDA 11 and a GPU with Pascal architecture or later are required to run the benchmarks. Please refer to the [installation docs](https://docs.rapids.ai/api/raft/stable/build.html#cuda-gpu-requirements) for the base requirements to build RAFT. In addition to the base requirements for building RAFT, additional dependencies needed to build the ANN benchmarks include: 1. FAISS GPU >= 1.7.1 2. Google Logging (GLog) 3. H5Py 4. HNSWLib 5. nlohmann_json 6. GGNN [rapids-cmake](https://github.com/rapidsai/rapids-cmake) is used to build the ANN benchmarks so the code for dependencies not already supplied in the CUDA toolkit will be downloaded and built automatically. The easiest (and most reproducible) way to install the dependencies needed to build the ANN benchmarks is to use the conda environment file located in the `conda/environments` directory of the RAFT repository. The following command will use `mamba` (which is preferred over `conda`) to build and activate a new environment for compiling the benchmarks: ```bash mamba env create --name raft_ann_benchmarks -f conda/environments/bench_ann_cuda-118_arch-x86_64.yaml conda activate raft_ann_benchmarks ``` The above conda environment will also reduce the compile times as dependencies like FAISS will already be installed and not need to be compiled with `rapids-cmake`. ### Compiling the Benchmarks After the needed dependencies are satisfied, the easiest way to compile ANN benchmarks is through the `build.sh` script in the root of the RAFT source code repository. The following will build the executables for all the support algorithms: ```bash ./build.sh bench-ann ``` You can limit the algorithms that are built by providing a semicolon-delimited list of executable names (each algorithm is suffixed with `_ANN_BENCH`): ```bash ./build.sh bench-ann -n --limit-bench-ann=HNSWLIB_ANN_BENCH;RAFT_IVF_PQ_ANN_BENCH ``` Available targets to use with `--limit-bench-ann` are: - FAISS_IVF_FLAT_ANN_BENCH - FAISS_IVF_PQ_ANN_BENCH - FAISS_BFKNN_ANN_BENCH - GGNN_ANN_BENCH - HNSWLIB_ANN_BENCH - RAFT_CAGRA_ANN_BENCH - RAFT_IVF_PQ_ANN_BENCH - RAFT_IVF_FLAT_ANN_BENCH By default, the `*_ANN_BENCH` executables program infer the dataset's datatype from the filename's extension. For example, an extension of `fbin` uses a `float` datatype, `f16bin` uses a `float16` datatype, extension of `i8bin` uses `int8_t` datatype, and `u8bin` uses `uint8_t` type. Currently, only `float`, `float16`, int8_t`, and `unit8_t` are supported.
0
rapidsai_public_repos/cuvs/docs
rapidsai_public_repos/cuvs/docs/source/wiki_all_dataset.md
# Wiki-all Dataset The `wiki-all` dataset was created to stress vector search algorithms at scale with both a large number of vectors and dimensions. The entire dataset contains 88M vectors with 768 dimensions and is meant for testing the types of vectors one would typically encounter in retrieval augmented generation (RAG) workloads. The full dataset is ~251GB in size, which is intentionally larger than the typical memory of GPUs. The massive scale is intended to promote the use of compression and efficient out-of-core methods for both indexing and search. The dataset is composed of all the available languages of in the [Cohere Wikipedia dataset](https://huggingface.co/datasets/Cohere/wikipedia-22-12). An [English version]( https://www.kaggle.com/datasets/jjinho/wikipedia-20230701) is also available. The dataset is composed of English wiki texts from [Kaggle](https://www.kaggle.com/datasets/jjinho/wikipedia-20230701) and multi-lingual wiki texts from [Cohere Wikipedia](https://huggingface.co/datasets/Cohere/wikipedia-22-12). Cohere's English Texts are older (2022) and smaller than the Kaggle English Wiki texts (2023) so the English texts have been removed from Cohere completely. The final Wiki texts include English Wiki from Kaggle and the other languages from Cohere. The English texts constitute 50% of the total text size. To form the final dataset, the Wiki texts were chunked into 85 million 128-token pieces. For reference, Cohere chunks Wiki texts into 104-token pieces. Finally, the embeddings of each chunk were computed using the [paraphrase-multilingual-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-mpnet-base-v2) embedding model. The resulting dataset is an embedding matrix of size 88 million by 768. Also included with the dataset is a query file containing 10k query vectors and a groundtruth file to evaluate nearest neighbors algorithms. ## Getting the dataset ### Full dataset A version of the dataset is made available in the binary format that can be used directly by the [raft-ann-bench](https://docs.rapids.ai/api/raft/nightly/raft_ann_benchmarks/) tool. The full 88M dataset is ~251GB and the download link below contains tarballs that have been split into multiple parts. The following will download all 10 the parts and untar them to a `wiki_all_88M` directory: ```bash curl -s https://data.rapids.ai/raft/datasets/wiki_all/wiki_all.tar.{00..9} | tar -xf - -C wiki_all_88M/ ``` The above has the unfortunate drawback that if the command should fail for any reason, all the parts need to be re-downloaded. The files can also be downloaded individually and then untarred to the directory. Each file is ~27GB and there are 10 of them. ```bash curl -s https://data.rapids.ai/raft/datasets/wiki_all/wiki_all.tar.00 ... curl -s https://data.rapids.ai/raft/datasets/wiki_all/wiki_all.tar.09 cat wiki_all.tar.* | tar -xf - -C wiki_all_88M/ ``` ### 1M and 10M subsets Also available are 1M and 10M subsets of the full dataset which are 2.9GB and 29GB, respectively. These subsets also include query sets of 10k vectors and corresponding groundtruth files. ```bash curl -s https://data.rapids.ai/raft/datasets/wiki_all_1M/wiki_all_1M.tar curl -s https://data.rapids.ai/raft/datasets/wiki_all_10M/wiki_all_10M.tar ``` ## Using the dataset After the dataset is downloaded and extracted to the `wiki_all_88M` directory (or `wiki_all_1M`/`wiki_all_10M` depending on whether the subsets are used), the files can be used in the benchmarking tool. The dataset name is `wiki_all` (or `wiki_all_1M`/`wiki_all_10M`), and the benchmarking tool can be used by specifying the appropriate name `--dataset wiki_all_88M` in the scripts.
0
rapidsai_public_repos/cuvs/docs
rapidsai_public_repos/cuvs/docs/source/ann_benchmarks_dataset.md
# ANN Benchmarks Datasets A dataset usually has 4 binary files containing database vectors, query vectors, ground truth neighbors and their corresponding distances. For example, Glove-100 dataset has files `base.fbin` (database vectors), `query.fbin` (query vectors), `groundtruth.neighbors.ibin` (ground truth neighbors), and `groundtruth.distances.fbin` (ground truth distances). The first two files are for index building and searching, while the other two are associated with a particular distance and are used for evaluation. The file suffixes `.fbin`, `.f16bin`, `.ibin`, `.u8bin`, and `.i8bin` denote that the data type of vectors stored in the file are `float32`, `float16`(a.k.a `half`), `int`, `uint8`, and `int8`, respectively. These binary files are little-endian and the format is: the first 8 bytes are `num_vectors` (`uint32_t`) and `num_dimensions` (`uint32_t`), and the following `num_vectors * num_dimensions * sizeof(type)` bytes are vectors stored in row-major order. Some implementation can take `float16` database and query vectors as inputs and will have better performance. Use `script/fbin_to_f16bin.py` to transform dataset from `float32` to `float16` type. Commonly used datasets can be downloaded from two websites: 1. Million-scale datasets can be found at the [Data sets](https://github.com/erikbern/ann-benchmarks#data-sets) section of [`ann-benchmarks`](https://github.com/erikbern/ann-benchmarks). However, these datasets are in HDF5 format. Use `cpp/bench/ann/scripts/hdf5_to_fbin.py` to transform the format. A few Python packages are required to run it: ```bash pip3 install numpy h5py ``` The usage of this script is: ```bash $ cpp/bench/ann/scripts/hdf5_to_fbin.py usage: scripts/hdf5_to_fbin.py [-n] <input>.hdf5 -n: normalize base/query set outputs: <input>.base.fbin <input>.query.fbin <input>.groundtruth.neighbors.ibin <input>.groundtruth.distances.fbin ``` So for an input `.hdf5` file, four output binary files will be produced. See previous section for an example of prepossessing GloVe dataset. Most datasets provided by `ann-benchmarks` use `Angular` or `Euclidean` distance. `Angular` denotes cosine distance. However, computing cosine distance reduces to computing inner product by normalizing vectors beforehand. In practice, we can always do the normalization to decrease computation cost, so it's better to measure the performance of inner product rather than cosine distance. The `-n` option of `hdf5_to_fbin.py` can be used to normalize the dataset. 2. <a id='billion-scale'></a>Billion-scale datasets can be found at [`big-ann-benchmarks`](http://big-ann-benchmarks.com). The ground truth file contains both neighbors and distances, thus should be split. A script is provided for this: ```bash $ cpp/bench/ann/scripts/split_groundtruth.pl usage: script/split_groundtruth.pl input output_prefix ``` Take Deep-1B dataset as an example: ```bash pushd cd cpp/bench/ann mkdir -p data/deep-1B && cd data/deep-1B # download manually "Ground Truth" file of "Yandex DEEP" # suppose the file name is deep_new_groundtruth.public.10K.bin ../../scripts/split_groundtruth.pl deep_new_groundtruth.public.10K.bin groundtruth # two files 'groundtruth.neighbors.ibin' and 'groundtruth.distances.fbin' should be produced popd ``` Besides ground truth files for the whole billion-scale datasets, this site also provides ground truth files for the first 10M or 100M vectors of the base sets. This mean we can use these billion-scale datasets as million-scale datasets. To facilitate this, an optional parameter `subset_size` for dataset can be used. See the next step for further explanation. ## Generate ground truth If you have a dataset, but no corresponding ground truth file, then you can generate ground trunth using the `generate_groundtruth` utility. Example usage: ```bash # With existing query file python -m raft-ann-bench.generate_groundtruth --dataset /dataset/base.fbin --output=groundtruth_dir --queries=/dataset/query.public.10K.fbin # With randomly generated queries python -m raft-ann-bench.generate_groundtruth --dataset /dataset/base.fbin --output=groundtruth_dir --queries=random --n_queries=10000 # Using only a subset of the dataset. Define queries by randomly # selecting vectors from the (subset of the) dataset. python -m raft-ann-bench.generate_groundtruth --dataset /dataset/base.fbin --nrows=2000000 --output=groundtruth_dir --queries=random-choice --n_queries=10000 ```
0
rapidsai_public_repos/cuvs/docs
rapidsai_public_repos/cuvs/docs/source/contributing.md
# Contributing If you are interested in contributing to CUVS, your contributions will fall into three categories: 1. You want to report a bug, feature request, or documentation issue - File an [issue](https://github.com/rapidsai/CUVS/issues/new/choose) describing what you encountered or what you want to see changed. - The RAPIDS team will evaluate the issues and triage them, scheduling them for a release. If you believe the issue needs priority attention comment on the issue to notify the team. 2. You want to propose a new Feature and implement it - Post about your intended feature, and we shall discuss the design and implementation. - Once we agree that the plan looks good, go ahead and implement it, using the [code contributions](#code-contributions) guide below. 3. You want to implement a feature or bug-fix for an outstanding issue - Follow the [code contributions](#code-contributions) guide below. - If you need more context on a particular issue, please ask and we shall provide. ## Code contributions ### Your first issue 1. Read the project's [README.md](https://github.com/rapidsai/cuvs) to learn how to setup the development environment 2. Find an issue to work on. The best way is to look for the [good first issue](https://github.com/rapidsai/CUVS/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) or [help wanted](https://github.com/rapidsai/CUVS/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) labels 3. Comment on the issue saying you are going to work on it 4. Code! Make sure to update unit tests! 5. When done, [create your pull request](https://github.com/rapidsai/CUVS/compare) 6. Verify that CI passes all [status checks](https://help.github.com/articles/about-status-checks/). Fix if needed 7. Wait for other developers to review your code and update code as needed 8. Once reviewed and approved, a RAPIDS developer will merge your pull request Remember, if you are unsure about anything, don't hesitate to comment on issues and ask for clarifications! ### Python / Pre-commit hooks CUVS uses [pre-commit](https://pre-commit.com/) to execute code linters and formatters such as [Black](https://black.readthedocs.io/en/stable/), [isort](https://pycqa.github.io/isort/), and [flake8](https://flake8.pycqa.org/en/latest/). These tools ensure a consistent code format throughout the project. Using pre-commit ensures that linter versions and options are aligned for all developers. Additionally, there is a CI check in place to enforce that committed code follows our standards. To use `pre-commit`, install via `conda` or `pip`: ```bash conda install -c conda-forge pre-commit ``` ```bash pip install pre-commit ``` Then run pre-commit hooks before committing code: ```bash pre-commit run ``` Optionally, you may set up the pre-commit hooks to run automatically when you make a git commit. This can be done by running: ```bash pre-commit install ``` Now code linters and formatters will be run each time you commit changes. You can skip these checks with `git commit --no-verify` or with the short version `git commit -n`. ### Seasoned developers Once you have gotten your feet wet and are more comfortable with the code, you can look at the prioritized issues of our next release in our [project boards](https://github.com/rapidsai/CUVS/projects). > **Pro Tip:** Always look at the release board with the highest number for issues to work on. This is where RAPIDS developers also focus their efforts. Look at the unassigned issues, and find an issue you are comfortable with contributing to. Start with _Step 3_ from above, commenting on the issue to let others know you are working on it. If you have any questions related to the implementation of the issue, ask them in the issue instead of the PR. ## Attribution Portions adopted from https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md
0
rapidsai_public_repos/cuvs/docs
rapidsai_public_repos/cuvs/docs/source/ann_benchmarks_param_tuning.md
# ANN Benchmarks Parameter Tuning Guide This guide outlines the various parameter settings that can be specified in [RAFT ANN Benchmark](raft_ann_benchmarks.md) json configuration files and explains the impact they have on corresponding algorithms to help inform their settings for benchmarking across desired levels of recall. ## RAFT Indexes ### `raft_brute_force` Use RAFT brute-force index for exact search. Brute-force has no further build or search parameters. ### `raft_ivf_flat` IVF-flat uses an inverted-file index, which partitions the vectors into a series of clusters, or lists, storing them in an interleaved format which is optimized for fast distance computation. The searching of an IVF-flat index reduces the total vectors in the index to those within some user-specified nearest clusters called probes. IVF-flat is a simple algorithm which won't save any space, but it provides competitive search times even at higher levels of recall. | Parameter | Type | Required | Data Type | Default | Description | |----------------------|------------------|----------|----------------------------|----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `nlist` | `build_param` | Y | Positive Integer >0 | | Number of clusters to partition the vectors into. Larger values will put less points into each cluster but this will impact index build time as more clusters need to be trained. | | `niter` | `build_param` | N | Positive Integer >0 | 20 | Number of clusters to partition the vectors into. Larger values will put less points into each cluster but this will impact index build time as more clusters need to be trained. | | `ratio` | `build_param` | N | Positive Integer >0 | 2 | `1/ratio` is the number of training points which should be used to train the clusters. | | `dataset_memory_type` | `build_param` | N | ["device", "host", "mmap"] | "device" | What memory type should the dataset reside? | | `query_memory_type` | `search_params` | N | ["device", "host", "mmap"] | "device | What memory type should the queries reside? | | `nprobe` | `search_params` | Y | Positive Integer >0 | | The closest number of clusters to search for each query vector. Larger values will improve recall but will search more points in the index. | ### `raft_ivf_pq` IVF-pq is an inverted-file index, which partitions the vectors into a series of clusters, or lists, in a similar way to IVF-flat above. The difference is that IVF-PQ uses product quantization to also compress the vectors, giving the index a smaller memory footprint. Unfortunately, higher levels of compression can also shrink recall, which a refinement step can improve when the original vectors are still available. | Parameter | Type | Required | Data Type | Default | Description | |------------------------|----------------|---|----------------------------------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `nlist` | `build_param` | Y | Positive Integer >0 | | Number of clusters to partition the vectors into. Larger values will put less points into each cluster but this will impact index build time as more clusters need to be trained. | | `niter` | `build_param` | N | Positive Integer >0 | 20 | Number of k-means iterations to use when training the clusters. | | `ratio` | `build_param` | N | Positive Integer >0 | 2 | `1/ratio` is the number of training points which should be used to train the clusters. | | `pq_dim` | `build_param` | N | Positive Integer. Multiple of 8. | 0 | Dimensionality of the vector after product quantization. When 0, a heuristic is used to select this value. `pq_dim` * `pq_bits` must be a multiple of 8. | | `pq_bits` | `build_param` | N | Positive Integer. [4-8] | 8 | Bit length of the vector element after quantization. | | `codebook_kind` | `build_param` | N | ["cluster", "subspace"] | "subspace" | Type of codebook. See the [API docs](https://docs.rapids.ai/api/raft/nightly/cpp_api/neighbors_ivf_pq/#_CPPv412codebook_gen) for more detail | | `dataset_memory_type` | `build_param` | N | ["device", "host", "mmap"] | "device" | What memory type should the dataset reside? | | `query_memory_type` | `search_params` | N | ["device", "host", "mmap"] | "device | What memory type should the queries reside? | | `nprobe` | `search_params` | Y | Positive Integer >0 | | The closest number of clusters to search for each query vector. Larger values will improve recall but will search more points in the index. | | `internalDistanceDtype` | `search_params` | N | [`float`, `half`] | `half` | The precision to use for the distance computations. Lower precision can increase performance at the cost of accuracy. | | `smemLutDtype` | `search_params` | N | [`float`, `half`, `fp8`] | `half` | The precision to use for the lookup table in shared memory. Lower precision can increase performance at the cost of accuracy. | | `refine_ratio` | `search_params` | N| Positive Number >=0 | 0 | `refine_ratio * k` nearest neighbors are queried from the index initially and an additional refinement step improves recall by selecting only the best `k` neighbors. | ### `raft_cagra` <a id='raft-cagra'></a>CAGRA uses a graph-based index, which creates an intermediate, approximate kNN graph using IVF-PQ and then further refining and optimizing to create a final kNN graph. This kNN graph is used by CAGRA as an index for search. | Parameter | Type | Required | Data Type | Default | Description | |-----------------------------|----------------|----------|----------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `graph_degree` | `build_param` | N | Positive Integer >0 | 64 | Degree of the final kNN graph index. | | `intermediate_graph_degree` | `build_param` | N | Positive Integer >0 | 128 | Degree of the intermediate kNN graph. | | `graph_build_algo` | `build_param` | N | ["IVF_PQ", "NN_DESCENT"] | "IVF_PQ" | Algorithm to use for search | | `dataset_memory_type` | `build_param` | N | ["device", "host", "mmap"] | "device" | What memory type should the dataset reside while constructing the index? | | `query_memory_type` | `search_params` | N | ["device", "host", "mmap"] | "device | What memory type should the queries reside? | | `itopk` | `search_wdith` | N | Positive Integer >0 | 64 | Number of intermediate search results retained during the search. Higher values improve search accuracy at the cost of speed. | | `search_width` | `search_param` | N | Positive Integer >0 | 1 | Number of graph nodes to select as the starting point for the search in each iteration. | | `max_iterations` | `search_param` | N | Integer >=0 | 0 | Upper limit of search iterations. Auto select when 0. | | `algo` | `search_param` | N | string | "auto" | Algorithm to use for search. Possible values: {"auto", "single_cta", "multi_cta", "multi_kernel"} | | `graph_memory_type` | `search_param` | N | string | "device" | Memory type to store gaph. Must be one of {"device", "host_pinned", "host_huge_page"}. | | `internal_dataset_memory_type` | `search_param` | N | string | "device" | Memory type to store dataset in the index. Must be one of {"device", "host_pinned", "host_huge_page"}. | The `graph_memory_type` or `internal_dataset_memory_type` options can be useful for large datasets that do not fit the device memory. Setting `internal_dataset_memory_type` other than `device` has negative impact on search speed. Using `host_huge_page` option is only supported on systems with Heterogeneous Memory Management or on platforms that natively support GPU access to system allocated memory, for example Grace Hopper. To fine tune CAGRA index building we can customize IVF-PQ index builder options using the following settings. These take effect only if `graph_build_algo == "IVF_PQ"`. It is recommended to experiment using a separate IVF-PQ index to find the config that gives the largest QPS for large batch. Recall does not need to be very high, since CAGRA further optimizes the kNN neighbor graph. Some of the default values are derived from the dataset size which is assumed to be [n_vecs, dim]. | Parameter | Type | Required | Data Type | Default | Description | |------------------------|----------------|---|----------------------------------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `ivf_pq_build_nlist` | `build_param` | N | Positive Integer >0 | n_vecs / 2500 | Number of clusters to partition the vectors into. Larger values will put less points into each cluster but this will impact index build time as more clusters need to be trained. | | `ivf_pq_build_niter` | `build_param` | N | Positive Integer >0 | 25 | Number of k-means iterations to use when training the clusters. | | `ivf_pq_build_ratio` | `build_param` | N | Positive Integer >0 | 10 | `1/ratio` is the number of training points which should be used to train the clusters. | | `ivf_pq_build_pq_dim` | `build_param` | N | Positive Integer. Multiple of 8. | dim/2 rounded up to 8 | Dimensionality of the vector after product quantization. When 0, a heuristic is used to select this value. `pq_dim` * `pq_bits` must be a multiple of 8. | | `ivf_pq_build_pq_bits` | `build_param` | N | Positive Integer. [4-8] | 8 | Bit length of the vector element after quantization. | | `ivf_pq_build_codebook_kind` | `build_param` | N | ["cluster", "subspace"] | "subspace" | Type of codebook. See the [API docs](https://docs.rapids.ai/api/raft/nightly/cpp_api/neighbors_ivf_pq/#_CPPv412codebook_gen) for more detail | | `ivf_pq_search_nprobe` | `build_params` | N | Positive Integer >0 | min(2*dim, nlist) | The closest number of clusters to search for each query vector. | | `ivf_pq_search_internalDistanceDtype` | `build_params` | N | [`float`, `half`] | `fp8` | The precision to use for the distance computations. Lower precision can increase performance at the cost of accuracy. | | `ivf_pq_search_smemLutDtype` | `build_params` | N | [`float`, `half`, `fp8`] | `half` | The precision to use for the lookup table in shared memory. Lower precision can increase performance at the cost of accuracy. | | `ivf_pq_search_refine_ratio` | `build_params` | N| Positive Number >=0 | 2 | `refine_ratio * k` nearest neighbors are queried from the index initially and an additional refinement step improves recall by selecting only the best `k` neighbors. | Alternatively, if `graph_build_algo == "NN_DESCENT"`, then we can customize the following parameters | Parameter | Type | Required | Data Type | Default | Description | |-----------------------------|----------------|----------|----------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `nn_descent_niter` | `build_param` | N | Positive Integer>0 | 20 | Number of NN Descent iterations. | | `nn_descent_intermediate_graph_degree` | `build_param` | N | Positive Integer>0 | `intermediate_graph_degree` * 1.5 | Intermadiate graph degree during NN descent iterations | | `nn_descent_max_iterations` | `build_param` | N | Positive Integer>0 | 20 | Alias for `nn_descent_niter` | | `nn_descent_termination_threshold` | `build_param` | N | Positive float>0 | 0.0001 | Termination threshold for NN descent. | ### `raft_cagra_hnswlib` This is a benchmark that enables interoperability between `CAGRA` built `HNSW` search. It uses the `CAGRA` built graph as the base layer of an `hnswlib` index to search queries only within the base layer (this is enabled with a simple patch to `hnswlib`). `build_param` : Same as `build_param` of [CAGRA](#raft-cagra) `search_param` : Same as `search_param` of [hnswlib](#hnswlib) ## FAISS Indexes ### `faiss_gpu_flat` Use FAISS flat index on the GPU, which performs an exact search using brute-force and doesn't have any further build or search parameters. ### `faiss_gpu_ivf_flat` IVF-flat uses an inverted-file index, which partitions the vectors into a series of clusters, or lists, storing them in an interleaved format which is optimized for fast distance computation. The searching of an IVF-flat index reduces the total vectors in the index to those within some user-specified nearest clusters called probes. IVF-flat is a simple algorithm which won't save any space, but it provides competitive search times even at higher levels of recall. | Parameter | Type | Required | Data Type | Default | Description | |-----------|----------------|----------|---------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `nlists` | `build_param` | Y | Positive Integer >0 | | Number of clusters to partition the vectors into. Larger values will put less points into each cluster but this will impact index build time as more clusters need to be trained. | | `ratio` | `build_param` | N | Positive Integer >0 | 2 | `1/ratio` is the number of training points which should be used to train the clusters. | | `nprobe` | `search_params` | Y | Positive Integer >0 | | The closest number of clusters to search for each query vector. Larger values will improve recall but will search more points in the index. | ### `faiss_gpu_ivf_pq` IVF-pq is an inverted-file index, which partitions the vectors into a series of clusters, or lists, in a similar way to IVF-flat above. The difference is that IVF-PQ uses product quantization to also compress the vectors, giving the index a smaller memory footprint. Unfortunately, higher levels of compression can also shrink recall, which a refinement step can improve when the original vectors are still available. | Parameter | Type | Required | Data Type | Default | Description | |------------------|----------------|----------|----------------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `nlist` | `build_param` | Y | Positive Integer >0 | | Number of clusters to partition the vectors into. Larger values will put less points into each cluster but this will impact index build time as more clusters need to be trained. | | `ratio` | `build_param` | N | Positive Integer >0 | 2 | `1/ratio` is the number of training points which should be used to train the clusters. | | `M_ratio` | `build_param` | Y | Positive Integer Power of 2 [8-64] | | Ratio of numbeer of chunks or subquantizers for each vector. Computed by `dims` / `M_ratio` | | `usePrecomputed` | `build_param` | N | Boolean. Default=`false` | `false` | Use pre-computed lookup tables to speed up search at the cost of increased memory usage. | | `useFloat16` | `build_param` | N | Boolean. Default=`false` | `false` | Use half-precision floats for clustering step. | | `nprobe` | `search_params` | Y | Positive Integer >0 | | The closest number of clusters to search for each query vector. Larger values will improve recall but will search more points in the index. | | `refine_ratio` | `search_params` | N| Positive Number >=0 | 0 | `refine_ratio * k` nearest neighbors are queried from the index initially and an additional refinement step improves recall by selecting only the best `k` neighbors. | ### `faiss_cpu_flat` Use FAISS flat index on the CPU, which performs an exact search using brute-force and doesn't have any further build or search parameters. | Parameter | Type | Required | Data Type | Default | Description | |-----------|----------------|----------|---------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `numThreads` | `search_params` | N | Positive Integer >0 | 1 | Number of threads to use for queries. | ### `faiss_cpu_ivf_flat` Use FAISS IVF-Flat index on CPU | Parameter | Type | Required | Data Type | Default | Description | |----------|----------------|----------|---------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `nlist` | `build_param` | Y | Positive Integer >0 | | Number of clusters to partition the vectors into. Larger values will put less points into each cluster but this will impact index build time as more clusters need to be trained. | | `ratio` | `build_param` | N | Positive Integer >0 | 2 | `1/ratio` is the number of training points which should be used to train the clusters. | | `nprobe` | `search_params` | Y | Positive Integer >0 | | The closest number of clusters to search for each query vector. Larger values will improve recall but will search more points in the index. | | `numThreads` | `search_params` | N | Positive Integer >0 | 1 | Number of threads to use for queries. | ### `faiss_cpu_ivf_pq` Use FAISS IVF-PQ index on CPU | Parameter | Type | Required | Data Type | Default | Description | |------------------|----------------|----------|------------------------------------|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `nlist` | `build_param` | Y | Positive Integer >0 | | Number of clusters to partition the vectors into. Larger values will put less points into each cluster but this will impact index build time as more clusters need to be trained. | | `ratio` | `build_param` | N | Positive Integer >0 | 2 | `1/ratio` is the number of training points which should be used to train the clusters. | | `M` | `build_param` | Y | Positive Integer Power of 2 [8-64] | | Number of chunks or subquantizers for each vector. | | `usePrecomputed` | `build_param` | N | Boolean. Default=`false` | `false` | Use pre-computed lookup tables to speed up search at the cost of increased memory usage. | | `bitsPerCode` | `build_param` | N | Positive Integer [4-8] | 8 | Number of bits to use for each code. | | `nprobe` | `search_params` | Y | Positive Integer >0 | | The closest number of clusters to search for each query vector. Larger values will improve recall but will search more points in the index. | | `refine_ratio` | `search_params` | N| Positive Number >=0 | 0 | `refine_ratio * k` nearest neighbors are queried from the index initially and an additional refinement step improves recall by selecting only the best `k` neighbors. | | `numThreads` | `search_params` | N | Positive Integer >0 | 1 | Number of threads to use for queries. | ## HNSW <a id='hnswlib'></a> ### `hnswlib` | Parameter | Type | Required | Data Type | Default | Description | |------------------|-----------------|----------|--------------------------------------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `efConstruction` | `build_param` | Y | Positive Integer >0 | | Controls index time and accuracy. Bigger values increase the index quality. At some point, increasing this will no longer improve the quality. | | `M` | `build_param` | Y | Positive Integer often between 2-100 | | Number of bi-directional links create for every new element during construction. Higher values work for higher intrinsic dimensionality and/or high recall, low values can work for datasets with low intrinsic dimensionality and/or low recalls. Also affects the algorithm's memory consumption. | | `numThreads` | `build_param` | N | Positive Integer >0 | 1 | Number of threads to use to build the index. | | `ef` | `search_param` | Y | Positive Integer >0 | | Size of the dynamic list for the nearest neighbors used for search. Higher value leads to more accurate but slower search. Cannot be lower than `k`. | | `numThreads` | `search_params` | N | Positive Integer >0 | 1 | Number of threads to use for queries. | Please refer to [HNSW algorithm parameters guide](https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md) from `hnswlib` to learn more about these arguments.
0
rapidsai_public_repos/cuvs/docs
rapidsai_public_repos/cuvs/docs/source/ann_benchmarks_low_level.md
### Low-level Scripts and Executables #### End-to-end Example An end-to-end example (run from the RAFT source code root directory): ```bash # (0) get raft sources git clone https://github.com/rapidsai/raft.git cd raft # (1) prepare a dataset export PYTHONPATH=python/raft-ann-bench/src:$PYTHONPATH python -m raft-ann-bench.get_dataset --dataset glove-100-angular --normalize # option --normalize is used here to normalize vectors so cosine distance is converted # to inner product; don't use -n for l2 distance # (2) build index $CONDA_PREFIX/bin/ann/RAFT_IVF_FLAT_ANN_BENCH \ --data_prefix=datasets \ --build \ --benchmark_filter="raft_ivf_flat\..*" \ python/raft-ann-bench/src/raft-ann-bench/run/conf/glove-100-inner.json # (3) search $CONDA_PREFIX/bin/ann/RAFT_IVF_FLAT_ANN_BENCH\ --data_prefix=datasets \ --benchmark_min_time=2s \ --benchmark_out=ivf_flat_search.csv \ --benchmark_out_format=csv \ --benchmark_counters_tabular \ --search \ --benchmark_filter="raft_ivf_flat\..*" \ python/raft-ann-bench/src/raft-ann-bench/run/conf/glove-100-inner.json # optional step: plot QPS-Recall figure using data in ivf_flat_search.csv with your favorite tool ``` ##### Step 1: Prepare Dataset Note: the preferred way to download and process smaller (million scale) datasets is to use the `get_dataset` script as demonstrated in the example above. A dataset usually has 4 binary files containing database vectors, query vectors, ground truth neighbors and their corresponding distances. For example, Glove-100 dataset has files `base.fbin` (database vectors), `query.fbin` (query vectors), `groundtruth.neighbors.ibin` (ground truth neighbors), and `groundtruth.distances.fbin` (ground truth distances). The first two files are for index building and searching, while the other two are associated with a particular distance and are used for evaluation. The file suffixes `.fbin`, `.f16bin`, `.ibin`, `.u8bin`, and `.i8bin` denote that the data type of vectors stored in the file are `float32`, `float16`(a.k.a `half`), `int`, `uint8`, and `int8`, respectively. These binary files are little-endian and the format is: the first 8 bytes are `num_vectors` (`uint32_t`) and `num_dimensions` (`uint32_t`), and the following `num_vectors * num_dimensions * sizeof(type)` bytes are vectors stored in row-major order. Some implementation can take `float16` database and query vectors as inputs and will have better performance. Use `python/raft-ann-bench/src/raft-ann-bench/get_dataset/fbin_to_f16bin.py` to transform dataset from `float32` to `float16` type. Commonly used datasets can be downloaded from two websites: 1. Million-scale datasets can be found at the [Data sets](https://github.com/erikbern/ann-benchmarks#data-sets) section of [`ann-benchmarks`](https://github.com/erikbern/ann-benchmarks). However, these datasets are in HDF5 format. Use `python/raft-ann-bench/src/raft-ann-bench/get_dataset/fbin_to_f16bin.py/hdf5_to_fbin.py` to transform the format. A few Python packages are required to run it: ```bash pip3 install numpy h5py ``` The usage of this script is: ```bash $ cpp/bench/ann/scripts/hdf5_to_fbin.py usage: scripts/hdf5_to_fbin.py [-n] <input>.hdf5 -n: normalize base/query set outputs: <input>.base.fbin <input>.query.fbin <input>.groundtruth.neighbors.ibin <input>.groundtruth.distances.fbin ``` So for an input `.hdf5` file, four output binary files will be produced. See previous section for an example of prepossessing GloVe dataset. Most datasets provided by `ann-benchmarks` use `Angular` or `Euclidean` distance. `Angular` denotes cosine distance. However, computing cosine distance reduces to computing inner product by normalizing vectors beforehand. In practice, we can always do the normalization to decrease computation cost, so it's better to measure the performance of inner product rather than cosine distance. The `-n` option of `hdf5_to_fbin.py` can be used to normalize the dataset. 2. Billion-scale datasets can be found at [`big-ann-benchmarks`](http://big-ann-benchmarks.com). The ground truth file contains both neighbors and distances, thus should be split. A script is provided for this: ```bash $ python/raft-ann-bench/src/raft-ann-bench/split_groundtruth/split_groundtruth.pl usage: split_groundtruth.pl input output_prefix ``` Take Deep-1B dataset as an example: ```bash pushd cd cpp/bench/ann mkdir -p data/deep-1B && cd data/deep-1B # download manually "Ground Truth" file of "Yandex DEEP" # suppose the file name is deep_new_groundtruth.public.10K.bin /path/to/raft/python/raft-ann-bench/src/raft-ann-bench/split_groundtruth/split_groundtruth.pl deep_new_groundtruth.public.10K.bin groundtruth # two files 'groundtruth.neighbors.ibin' and 'groundtruth.distances.fbin' should be produced popd ``` Besides ground truth files for the whole billion-scale datasets, this site also provides ground truth files for the first 10M or 100M vectors of the base sets. This mean we can use these billion-scale datasets as million-scale datasets. To facilitate this, an optional parameter `subset_size` for dataset can be used. See the next step for further explanation. ##### Step 2: Build Index An index is a data structure to facilitate searching. Different algorithms may use different data structures for their index. We can use `RAFT_IVF_FLAT_ANN_BENCH --build` to build an index and save it to disk. To run a benchmark executable, like `RAFT_IVF_FLAT_ANN_BENCH`, a JSON configuration file is required. Refer to [`cpp/bench/ann/conf/glove-100-inner.json`](../../cpp/cpp/bench/ann/conf/glove-100-inner.json) as an example. Configuration file has 3 sections: * `dataset` section specifies the name and files of a dataset, and also the distance in use. Since the `*_ANN_BENCH` programs are for index building and searching, only `base_file` for database vectors and `query_file` for query vectors are needed. Ground truth files are for evaluation thus not needed. - To use only a subset of the base dataset, an optional parameter `subset_size` can be specified. It means using only the first `subset_size` vectors of `base_file` as the base dataset. * `search_basic_param` section specifies basic parameters for searching: - `k` is the "k" in "k-nn", that is, the number of neighbors (or results) we want from the searching. * `index` section specifies an array of configurations for index building and searching: - `build_param` and `search_params` are parameters for building and searching, respectively. `search_params` is an array since we will search with different parameters to get different recall values. - `file` is the file name of index. Building will save built index to this file, while searching will load this file. - if `refine_ratio` is specified, refinement, as a post-processing step of search, will be done. It's for algorithms that compress vectors. For example, if `"refine_ratio" : 2` is set, 2`k` results are first computed, then exact distances of them are computed using original uncompressed vectors, and finally top `k` results among them are kept. The usage of `*_ANN_BENCH` can be found by running `*_ANN_BENCH --help` on one of the executables: ```bash $ ./cpp/build/*_ANN_BENCH --help benchmark [--benchmark_list_tests={true|false}] [--benchmark_filter=<regex>] [--benchmark_min_time=`<integer>x` OR `<float>s` ] [--benchmark_min_warmup_time=<min_warmup_time>] [--benchmark_repetitions=<num_repetitions>] [--benchmark_enable_random_interleaving={true|false}] [--benchmark_report_aggregates_only={true|false}] [--benchmark_display_aggregates_only={true|false}] [--benchmark_format=<console|json|csv>] [--benchmark_out=<filename>] [--benchmark_out_format=<json|console|csv>] [--benchmark_color={auto|true|false}] [--benchmark_counters_tabular={true|false}] [--benchmark_context=<key>=<value>,...] [--benchmark_time_unit={ns|us|ms|s}] [--v=<verbosity>] [--build|--search] [--overwrite] [--data_prefix=<prefix>] <conf>.json Note the non-standard benchmark parameters: --build: build mode, will build index --search: search mode, will search using the built index one and only one of --build and --search should be specified --overwrite: force overwriting existing index files --data_prefix=<prefix>: prepend <prefix> to dataset file paths specified in the <conf>.json. --override_kv=<key:value1:value2:...:valueN>: override a build/search key one or more times multiplying the number of configurations; you can use this parameter multiple times to get the Cartesian product of benchmark configs. ``` * `--build`: build index. * `--search`: do the searching with built index. * `--overwrite`: by default, the building mode skips building an index if it find out it already exists. This is useful when adding more configurations to the config; only new indices are build without the need to specify an elaborate filtering regex. By supplying `overwrite` flag, you disable this behavior; all indices are build regardless whether they are already stored on disk. * `--data_prefix`: prepend an arbitrary path to the data file paths. By default, it is equal to `data`. Note, this does not apply to index file paths. * `--override_kv`: override a build/search key one or more times multiplying the number of configurations. In addition to these ANN-specific flags, you can use all of the standard google benchmark flags. Some of the useful flags: * `--benchmark_filter`: specify subset of benchmarks to run * `--benchmark_out`, `--benchmark_out_format`: store the output to a file * `--benchmark_list_tests`: check the available configurations * `--benchmark_min_time`: specify the minimum duration or number of iterations per case to improve accuracy of the benchmarks. Refer to the google benchmark [user guide](https://github.com/google/benchmark/blob/main/docs/user_guide.md#command-line) for more information about the command-line usage. ##### Step 3: Searching Use the `--search` flag on any of the `*_ANN_BENCH` executables. Other options are the same as in step 2. ## Adding a new ANN algorithm Implementation of a new algorithm should be a class that inherits `class ANN` (defined in `cpp/bench/ann/src/ann.h`) and implements all the pure virtual functions. In addition, it should define two `struct`s for building and searching parameters. The searching parameter class should inherit `struct ANN<T>::AnnSearchParam`. Take `class HnswLib` as an example, its definition is: ```c++ template<typename T> class HnswLib : public ANN<T> { public: struct BuildParam { int M; int ef_construction; int num_threads; }; using typename ANN<T>::AnnSearchParam; struct SearchParam : public AnnSearchParam { int ef; int num_threads; }; // ... }; ``` The benchmark program uses JSON configuration file. To add the new algorithm to the benchmark, need be able to specify `build_param`, whose value is a JSON object, and `search_params`, whose value is an array of JSON objects, for this algorithm in configuration file. Still take the configuration for `HnswLib` as an example: ```json { "name" : "...", "algo" : "hnswlib", "build_param": {"M":12, "efConstruction":500, "numThreads":32}, "file" : "/path/to/file", "search_params" : [ {"ef":10, "numThreads":1}, {"ef":20, "numThreads":1}, {"ef":40, "numThreads":1} ] }, ``` How to interpret these JSON objects is totally left to the implementation and should be specified in `cpp/bench/ann/src/factory.cuh`: 1. First, add two functions for parsing JSON object to `struct BuildParam` and `struct SearchParam`, respectively: ```c++ template<typename T> void parse_build_param(const nlohmann::json& conf, typename cuann::HnswLib<T>::BuildParam& param) { param.ef_construction = conf.at("efConstruction"); param.M = conf.at("M"); if (conf.contains("numThreads")) { param.num_threads = conf.at("numThreads"); } } template<typename T> void parse_search_param(const nlohmann::json& conf, typename cuann::HnswLib<T>::SearchParam& param) { param.ef = conf.at("ef"); if (conf.contains("numThreads")) { param.num_threads = conf.at("numThreads"); } } ``` 2. Next, add corresponding `if` case to functions `create_algo()` and `create_search_param()` by calling parsing functions. The string literal in `if` condition statement must be the same as the value of `algo` in configuration file. For example, ```c++ // JSON configuration file contains a line like: "algo" : "hnswlib" if (algo == "hnswlib") { // ... } ```
0
rapidsai_public_repos/cuvs/docs
rapidsai_public_repos/cuvs/docs/source/quick_start.md
# Quick Start This guide is meant to provide a quick-start tutorial for interacting with RAFT's C++ & Python APIs. ## RAPIDS Memory Manager (RMM) RAFT relies heavily on the [RMM](https://github.com/rapidsai/rmm) library which eases the burden of configuring different allocation strategies globally across the libraries that use it. ## Multi-dimensional Spans and Arrays Most of the APIs in RAFT accept [mdspan](https://arxiv.org/abs/2010.06474) multi-dimensional array view for representing data in higher dimensions similar to the `ndarray` in the Numpy Python library. RAFT also contains the corresponding owning `mdarray` structure, which simplifies the allocation and management of multi-dimensional data in both host and device (GPU) memory. The `mdarray` is an owning object that forms a convenience layer over RMM and can be constructed in RAFT using a number of different helper functions: ```c++ #include <raft/core/device_mdarray.hpp> int n_rows = 10; int n_cols = 10; auto scalar = raft::make_device_scalar<float>(handle, 1.0); auto vector = raft::make_device_vector<float>(handle, n_cols); auto matrix = raft::make_device_matrix<float>(handle, n_rows, n_cols); ``` The `mdspan` is a lightweight non-owning view that can wrap around any pointer, maintaining shape, layout, and indexing information for accessing elements. We can construct `mdspan` instances directly from the above `mdarray` instances: ```c++ // Scalar mdspan on device auto scalar_view = scalar.view(); // Vector mdspan on device auto vector_view = vector.view(); // Matrix mdspan on device auto matrix_view = matrix.view(); ``` Since the `mdspan` is just a lightweight wrapper, we can also construct it from the underlying data handles in the `mdarray` instances above. We use the extent to get information about the `mdarray` or `mdspan`'s shape. ```c++ #include <raft/core/device_mdspan.hpp> auto scalar_view = raft::make_device_scalar_view(scalar.data_handle()); auto vector_view = raft::make_device_vector_view(vector.data_handle(), vector.extent(0)); auto matrix_view = raft::make_device_matrix_view(matrix.data_handle(), matrix.extent(0), matrix.extent(1)); ``` Of course, RAFT's `mdspan`/`mdarray` APIs aren't just limited to the `device`. You can also create `host` variants: ```c++ #include <raft/core/host_mdarray.hpp> #include <raft/core/host_mdspan.hpp> int n_rows = 10; int n_cols = 10; auto scalar = raft::make_host_scalar<float>(handle, 1.0); auto vector = raft::make_host_vector<float>(handle, n_cols); auto matrix = raft::make_host_matrix<float>(handle, n_rows, n_cols); auto scalar_view = raft::make_host_scalar_view(scalar.data_handle()); auto vector_view = raft::make_host_vector_view(vector.data_handle(), vector.extent(0)); auto matrix_view = raft::make_host_matrix_view(matrix.data_handle(), matrix.extent(0), matrix.extent(1)); ``` And `managed` variants: ```c++ #include <raft/core/device_mdspan.hpp> int n_rows = 10; int n_cols = 10; auto matrix = raft::make_managed_mdspan(managed_ptr, raft::make_matrix_extents(n_rows, n_cols)); ``` You can also create strided mdspans: ```c++ #include <raft/core/device_mdspan.hpp> int n_elements = 10; int stride = 10; auto vector = raft::make_device_vector_view(vector_ptr, raft::make_vector_strided_layout(n_elements, stride)); ``` ## C++ Example Most of the primitives in RAFT accept a `raft::handle_t` object for the management of resources which are expensive to create, such CUDA streams, stream pools, and handles to other CUDA libraries like `cublas` and `cusolver`. The example below demonstrates creating a RAFT handle and using it with `device_matrix` and `device_vector` to allocate memory, generating random clusters, and computing pairwise Euclidean distances: ```c++ #include <raft/core/handle.hpp> #include <raft/core/device_mdarray.hpp> #include <raft/random/make_blobs.cuh> #include <raft/distance/distance.cuh> raft::handle_t handle; int n_samples = 5000; int n_features = 50; auto input = raft::make_device_matrix<float>(handle, n_samples, n_features); auto labels = raft::make_device_vector<int>(handle, n_samples); auto output = raft::make_device_matrix<float>(handle, n_samples, n_samples); raft::random::make_blobs(handle, input.view(), labels.view()); auto metric = raft::distance::DistanceType::L2SqrtExpanded; raft::distance::pairwise_distance(handle, input.view(), input.view(), output.view(), metric); ``` ## Python Example The `pylibraft` package contains a Python API for RAFT algorithms and primitives. `pylibraft` integrates nicely into other libraries by being very lightweight with minimal dependencies and accepting any object that supports the `__cuda_array_interface__`, such as [CuPy's ndarray](https://docs.cupy.dev/en/stable/user_guide/interoperability.html#rmm). The number of RAFT algorithms exposed in this package is continuing to grow from release to release. The example below demonstrates computing the pairwise Euclidean distances between CuPy arrays. Note that CuPy is not a required dependency for `pylibraft`. ```python import cupy as cp from pylibraft.distance import pairwise_distance n_samples = 5000 n_features = 50 in1 = cp.random.random_sample((n_samples, n_features), dtype=cp.float32) in2 = cp.random.random_sample((n_samples, n_features), dtype=cp.float32) output = pairwise_distance(in1, in2, metric="euclidean") ``` The `output` array in the above example is of type `raft.common.device_ndarray`, which supports [__cuda_array_interface__](https://numba.pydata.org/numba-doc/dev/cuda/cuda_array_interface.html#cuda-array-interface-version-2) making it interoperable with other libraries like CuPy, Numba, and PyTorch that also support it. CuPy supports DLPack, which also enables zero-copy conversion from `raft.common.device_ndarray` to JAX and Tensorflow. Below is an example of converting the output `pylibraft.common.device_ndarray` to a CuPy array: ```python cupy_array = cp.asarray(output) ``` And converting to a PyTorch tensor: ```python import torch torch_tensor = torch.as_tensor(output, device='cuda') ``` When the corresponding library has been installed and available in your environment, this conversion can also be done automatically by all RAFT compute APIs by setting a global configuration option: ```python import pylibraft.config pylibraft.config.set_output_as("cupy") # All compute APIs will return cupy arrays pylibraft.config.set_output_as("torch") # All compute APIs will return torch tensors ``` You can also specify a `callable` that accepts a `pylibraft.common.device_ndarray` and performs a custom conversion. The following example converts all output to `numpy` arrays: ```python pylibraft.config.set_output_as(lambda device_ndarray: return device_ndarray.copy_to_host()) ``` `pylibraft` also supports writing to a pre-allocated output array so any `__cuda_array_interface__` supported array can be written to in-place: ```python import cupy as cp from pylibraft.distance import pairwise_distance n_samples = 5000 n_features = 50 in1 = cp.random.random_sample((n_samples, n_features), dtype=cp.float32) in2 = cp.random.random_sample((n_samples, n_features), dtype=cp.float32) output = cp.empty((n_samples, n_samples), dtype=cp.float32) pairwise_distance(in1, in2, out=output, metric="euclidean") ```
0
rapidsai_public_repos/cuvs/docs
rapidsai_public_repos/cuvs/docs/source/raft_ann_benchmarks.md
# RAFT ANN Benchmarks This project provides a benchmark program for various ANN search implementations. It's especially suitable for comparing GPU implementations as well as comparing GPU against CPU. ## Table of Contents - [Installing the benchmarks](#installing-the-benchmarks) - [Conda](#conda) - [Docker](#docker) - [How to run the benchmarks](#how-to-run-the-benchmarks) - [Step 1: prepare dataset](#step-1-prepare-dataset) - [Step 2: build and search index](#step-2-build-and-search-index) - [Step 3: data export](#step-3-data-export) - [Step 4: plot results](#step-4-plot-results) - [Running the benchmarks](#running-the-benchmarks) - [End to end: small-scale (<1M to 10M)](#end-to-end-small-scale-benchmarks-1m-to-10m) - [End to end: large-scale (>10M)](#end-to-end-large-scale-benchmarks-10m-vectors) - [Running with Docker containers](#running-with-docker-containers) - [Evaluating the results](#evaluating-the-results) - [Creating and customizing dataset configurations](#creating-and-customizing-dataset-configurations) - [Adding a new ANN algorithm](#adding-a-new-ann-algorithm) - [Parameter tuning guide](https://docs.rapids.ai/api/raft/nightly/ann_benchmarks_param_tuning/) - [Wiki-all RAG/LLM Dataset](https://docs.rapids.ai/api/raft/nightly/wiki_all_dataset/) ## Installing the benchmarks There are two main ways pre-compiled benchmarks are distributed: - [Conda](#Conda): For users not using containers but want an easy to install and use Python package. Pip wheels are planned to be added as an alternative for users that cannot use conda and prefer to not use containers. - [Docker](#Docker): Only needs docker and [NVIDIA docker](https://github.com/NVIDIA/nvidia-docker) to use. Provides a single docker run command for basic dataset benchmarking, as well as all the functionality of the conda solution inside the containers. ## Conda If containers are not an option or not preferred, the easiest way to install the ANN benchmarks is through conda. We provide packages for GPU enabled systems, as well for systems without a GPU. We suggest using mamba as it generally leads to a faster install time: ```bash mamba create --name raft_ann_benchmarks conda activate raft_ann_benchmarks # to install GPU package: mamba install -c rapidsai -c conda-forge -c nvidia raft-ann-bench cuda-version=11.8* # to install CPU package for usage in CPU-only systems: mamba install -c rapidsai -c conda-forge raft-ann-bench-cpu ``` The channel `rapidsai` can easily be substituted `rapidsai-nightly` if nightly benchmarks are desired. The CPU package currently allows to run the HNSW benchmarks. Please see the [build instructions](ann_benchmarks_build.md) to build the benchmarks from source. ## Docker We provide images for GPU enabled systems, as well as systems without a GPU. The following images are available: - `raft-ann-bench`: Contains GPU and CPU benchmarks, can run all algorithms supported. Will download million-scale datasets as required. Best suited for users that prefer a smaller container size for GPU based systems. Requires the NVIDIA Container Toolkit to run GPU algorithms, can run CPU algorithms without it. - `raft-ann-bench-datasets`: Contains the GPU and CPU benchmarks with million-scale datasets already included in the container. Best suited for users that want to run multiple million scale datasets already included in the image. - `raft-ann-bench-cpu`: Contains only CPU benchmarks with minimal size. Best suited for users that want the smallest containers to reproduce benchmarks on systems without a GPU. Nightly images are located in [dockerhub](https://hub.docker.com/r/rapidsai/raft-ann-bench/tags), meanwhile release (stable) versions are located in [NGC](https://hub.docker.com/r/rapidsai/raft-ann-bench), starting with release 23.12. - The following command pulls the nightly container for python version 10, cuda version 12, and RAFT version 23.10: ```bash docker pull rapidsai/raft-ann-bench:24.02a-cuda12.0-py3.10 #substitute raft-ann-bench for the exact desired container. ``` The CUDA and python versions can be changed for the supported values: Supported CUDA versions: 11.2 and 12.0 Supported Python versions: 3.9 and 3.10. You can see the exact versions as well in the dockerhub site: - [RAFT ANN Benchmark images](https://hub.docker.com/r/rapidsai/raft-ann-bench/tags) - [RAFT ANN Benchmark with datasets preloaded images](https://hub.docker.com/r/rapidsai/raft-ann-bench-cpu/tags) - [RAFT ANN Benchmark CPU only images](https://hub.docker.com/r/rapidsai/raft-ann-bench-datasets/tags) **Note:** GPU containers use the CUDA toolkit from inside the container, the only requirement is a driver installed on the host machine that supports that version. So, for example, CUDA 11.8 containers can run in systems with a CUDA 12.x capable driver. Please also note that the Nvidia-Docker runtime from the [Nvidia Container Toolkit](https://github.com/NVIDIA/nvidia-docker) is required to use GPUs inside docker containers. [//]: # (- The following command &#40;only available after RAPIDS 23.10 release&#41; pulls the container:) [//]: # () [//]: # (```bash) [//]: # (docker pull nvcr.io/nvidia/rapidsai/raft-ann-bench:24.02-cuda11.8-py3.10 #substitute raft-ann-bench for the exact desired container.) [//]: # (```) ## How to run the benchmarks We provide a collection of lightweight Python scripts to run the benchmarks. There are 4 general steps to running the benchmarks and visualizing the results. 1. Prepare Dataset 2. Build Index and Search Index 3. Data Export 4. Plot Results ### Step 1: Prepare Dataset The script `raft-ann-bench.get_dataset` will download and unpack the dataset in directory that the user provides. As of now, only million-scale datasets are supported by this script. For more information on [datasets and formats](ann_benchmarks_dataset.md). The usage of this script is: ```bash usage: get_dataset.py [-h] [--name NAME] [--dataset-path DATASET_PATH] [--normalize] options: -h, --help show this help message and exit --dataset DATASET dataset to download (default: glove-100-angular) --dataset-path DATASET_PATH path to download dataset (default: ${RAPIDS_DATASET_ROOT_DIR}) --normalize normalize cosine distance to inner product (default: False) ``` When option `normalize` is provided to the script, any dataset that has cosine distances will be normalized to inner product. So, for example, the dataset `glove-100-angular` will be written at location `datasets/glove-100-inner/`. ### Step 2: Build and Search Index The script `raft-ann-bench.run` will build and search indices for a given dataset and its specified configuration. The usage of the script `raft-ann-bench.run` is: ```bash usage: __main__.py [-h] [--subset-size SUBSET_SIZE] [-k COUNT] [-bs BATCH_SIZE] [--dataset-configuration DATASET_CONFIGURATION] [--configuration CONFIGURATION] [--dataset DATASET] [--dataset-path DATASET_PATH] [--build] [--search] [--algorithms ALGORITHMS] [--groups GROUPS] [--algo-groups ALGO_GROUPS] [-f] [-m SEARCH_MODE] options: -h, --help show this help message and exit --subset-size SUBSET_SIZE the number of subset rows of the dataset to build the index (default: None) -k COUNT, --count COUNT the number of nearest neighbors to search for (default: 10) -bs BATCH_SIZE, --batch-size BATCH_SIZE number of query vectors to use in each query trial (default: 10000) --dataset-configuration DATASET_CONFIGURATION path to YAML configuration file for datasets (default: None) --configuration CONFIGURATION path to YAML configuration file or directory for algorithms Any run groups found in the specified file/directory will automatically override groups of the same name present in the default configurations, including `base` (default: None) --dataset DATASET name of dataset (default: glove-100-inner) --dataset-path DATASET_PATH path to dataset folder, by default will look in RAPIDS_DATASET_ROOT_DIR if defined, otherwise a datasets subdirectory from the calling directory (default: os.getcwd()/datasets/) --build --search --algorithms ALGORITHMS run only comma separated list of named algorithms. If parameters `groups` and `algo-groups are both undefined, then group `base` is run by default (default: None) --groups GROUPS run only comma separated groups of parameters (default: base) --algo-groups ALGO_GROUPS add comma separated <algorithm>.<group> to run. Example usage: "--algo-groups=raft_cagra.large,hnswlib.large" (default: None) -f, --force re-run algorithms even if their results already exist (default: False) -m SEARCH_MODE, --search-mode SEARCH_MODE run search in 'latency' (measure individual batches) or 'throughput' (pipeline batches and measure end-to-end) mode (default: throughput) -t SEARCH_THREADS, --search-threads SEARCH_THREADS specify the number threads to use for throughput benchmark. Single value or a pair of min and max separated by ':'. Example --search-threads=1:4. Power of 2 values between 'min' and 'max' will be used. If only 'min' is specified, then a single test is run with 'min' threads. By default min=1, max=<num hyper threads>. (default: None) -r, --dry-run dry-run mode will convert the yaml config for the specified algorithms and datasets to the json format that's consumed by the lower-level c++ binaries and then print the command to run execute the benchmarks but will not actually execute the command. (default: False) ``` `dataset`: name of the dataset to be searched in [datasets.yaml](#yaml-dataset-config) `dataset-configuration`: optional filepath to custom dataset YAML config which has an entry for arg `dataset` `configuration`: optional filepath to YAML configuration for an algorithm or to directory that contains YAML configurations for several algorithms. [Here's how to configure an algorithm.](#yaml-algo-config) `algorithms`: runs all algorithms that it can find in YAML configs found by `configuration`. By default, only `base` group will be run. `groups`: run only specific groups of parameters configurations for an algorithm. Groups are defined in YAML configs (see `configuration`), and by default run `base` group `algo-groups`: this parameter is helpful to append any specific algorithm+group combination to run the benchmark for in addition to all the arguments from `algorithms` and `groups`. It is of the format `<algorithm>.<group>`, or for example, `raft_cagra.large` For every algorithm run by this script, it outputs an index build statistics JSON file in `<dataset-path/<dataset>/result/build/<algo_{group}-{k}-{batch_size}.json>` and an index search statistics JSON file in `<dataset-path/<dataset>/result/search/<algo_{group}-{k}-{batch_size}.json>`. NOTE: The filenams will not have "_{group}" if `group = "base"`. `dataset-path` : 1. data is read from `<dataset-path>/<dataset>` 2. indices are built in `<dataset-path>/<dataset>/index` 3. build/search results are stored in `<dataset-path>/<dataset>/result` `build` and `search` : if both parameters are not supplied to the script then it is assumed both are `True`. `indices` and `algorithms` : these parameters ensure that the algorithm specified for an index is available in `algos.yaml` and not disabled, as well as having an associated executable. ### Step 3: Data Export The script `raft-ann-bench.data_export` will convert the intermediate JSON outputs produced by `raft-ann-bench.run` to more easily readable CSV files, which are needed to build charts made by `raft-ann-bench.plot`. ```bash usage: data_export.py [-h] [--dataset DATASET] [--dataset-path DATASET_PATH] options: -h, --help show this help message and exit --dataset DATASET dataset to download (default: glove-100-inner) --dataset-path DATASET_PATH path to dataset folder (default: ${RAPIDS_DATASET_ROOT_DIR}) ``` Build statistics CSV file is stored in `<dataset-path/<dataset>/result/build/<algo_group.csv>` and index search statistics CSV file in `<dataset-path/<dataset>/result/search/<algo_group-k{k}-batch_size{batch_size}_{suffix}.csv>`, where suffix has three values: 1. `raw`: All search results are exported 2. `throughput`: Pareto frontier of throughput results is exported 3. `latency`: Pareto frontier of latency results is exported ### Step 4: Plot Results The script `raft-ann-bench.plot` will plot results for all algorithms found in index search statistics CSV files `<dataset-path/<dataset>/result/search/*.csv`. The usage of this script is: ```bash usage: [-h] [--dataset DATASET] [--dataset-path DATASET_PATH] [--output-filepath OUTPUT_FILEPATH] [--algorithms ALGORITHMS] [--groups GROUPS] [--algo-groups ALGO_GROUPS] [-k COUNT] [-bs BATCH_SIZE] [--build] [--search] [--x-scale X_SCALE] [--y-scale {linear,log,symlog,logit}] [--mode {throughput,latency}] [--time-unit {s,ms,us}] [--raw] options: -h, --help show this help message and exit --dataset DATASET dataset to plot (default: glove-100-inner) --dataset-path DATASET_PATH path to dataset folder (default: /home/coder/raft/datasets/) --output-filepath OUTPUT_FILEPATH directory for PNG to be saved (default: /home/coder/raft) --algorithms ALGORITHMS plot only comma separated list of named algorithms. If parameters `groups` and `algo-groups are both undefined, then group `base` is plot by default (default: None) --groups GROUPS plot only comma separated groups of parameters (default: base) --algo-groups ALGO_GROUPS, --algo-groups ALGO_GROUPS add comma separated <algorithm>.<group> to plot. Example usage: "--algo-groups=raft_cagra.large,hnswlib.large" (default: None) -k COUNT, --count COUNT the number of nearest neighbors to search for (default: 10) -bs BATCH_SIZE, --batch-size BATCH_SIZE number of query vectors to use in each query trial (default: 10000) --build --search --x-scale X_SCALE Scale to use when drawing the X-axis. Typically linear, logit or a2 (default: linear) --y-scale {linear,log,symlog,logit} Scale to use when drawing the Y-axis (default: linear) --mode {throughput,latency} search mode whose Pareto frontier is used on the y-axis (default: throughput) --time-unit {s,ms,us} time unit to plot when mode is latency (default: ms) --raw Show raw results (not just Pareto frontier) of mode arg (default: False) ``` `mode`: plots pareto frontier of `throughput` or `latency` results exported in the previous step `algorithms`: plots all algorithms that it can find results for the specified `dataset`. By default, only `base` group will be plotted. `groups`: plot only specific groups of parameters configurations for an algorithm. Groups are defined in YAML configs (see `configuration`), and by default run `base` group `algo-groups`: this parameter is helpful to append any specific algorithm+group combination to plot results for in addition to all the arguments from `algorithms` and `groups`. It is of the format `<algorithm>.<group>`, or for example, `raft_cagra.large` The figure below is the resulting plot of running our benchmarks as of August 2023 for a batch size of 10, on an NVIDIA H100 GPU and an Intel Xeon Platinum 8480CL CPU. It presents the throughput (in Queries-Per-Second) performance for every level of recall. ![Throughput vs recall plot comparing popular ANN algorithms with RAFT's at batch size 10](../../img/raft-vector-search-batch-10.png) ## Running the benchmarks ### End to end: small-scale benchmarks (<1M to 10M) The steps below demonstrate how to download, install, and run benchmarks on a subset of 10M vectors from the Yandex Deep-1B dataset By default the datasets will be stored and used from the folder indicated by the `RAPIDS_DATASET_ROOT_DIR` environment variable if defined, otherwise a datasets sub-folder from where the script is being called: ```bash # (1) prepare dataset. python -m raft-ann-bench.get_dataset --dataset deep-image-96-angular --normalize # (2) build and search index python -m raft-ann-bench.run --dataset deep-image-96-inner # (3) export data python -m raft-ann-bench.data_export --dataset deep-image-96-inner # (4) plot results python -m raft-ann-bench.plot --dataset deep-image-96-inner ``` Configuration files already exist for the following list of the million-scale datasets. Please refer to [ann-benchmarks datasets](https://github.com/erikbern/ann-benchmarks/#data-sets) for more information, including actual train and sizes. These all work out-of-the-box with the `--dataset` argument. Other million-scale datasets from `ann-benchmarks.com` will work, but will require a json configuration file to be created in `$CONDA_PREFIX/lib/python3.xx/site-packages/raft-ann-bench/run/conf`, or you can specify the `--configuration` option to use a specific file. | Dataset Name | Train Rows | Columns | Test Rows | Distance | |-----|------------|----|----------------|------------| | `deep-image-96-angular` | 10M | 96 | 10K | Angular | | `fashion-mnist-784-euclidean` | 60K | 784 | 10K | Euclidean | | `glove-50-angular` | 1.1M | 50 | 10K | Angular | | `glove-100-angular` | 1.1M | 100 | 10K | Angular | | `mnist-784-euclidean` | 60K | 784 | 10K | Euclidean | | `nytimes-256-angular` | 290K | 256 | 10K | Angular | | `sift-128-euclidean` | 1M | 128 | 10K | Euclidean| All of the datasets above contain ground test datasets with 100 neighbors. Thus `k` for these datasets must be less than or equal to 100. ### End to end: large-scale benchmarks (>10M vectors) `raft-ann-bench.get_dataset` cannot be used to download the [billion-scale datasets](ann_benchmarks_dataset.md#billion-scale) due to their size. You should instead use our billion-scale datasets guide to download and prepare them. All other python commands mentioned below work as intended once the billion-scale dataset has been downloaded. To download billion-scale datasets, visit [big-ann-benchmarks](http://big-ann-benchmarks.com/neurips21.html) We also provide a new dataset called `wiki-all` containing 88 million 768-dimensional vectors. This dataset is meant for benchmarking a realistic retrieval-augmented generation (RAG)/LLM embedding size at scale. It also contains 1M and 10M vector subsets for smaller-scale experiments. See our [Wiki-all Dataset Guide](https://docs.rapids.ai/api/raft/nightly/wiki_all_dataset/) for more information and to download the dataset. The steps below demonstrate how to download, install, and run benchmarks on a subset of 100M vectors from the Yandex Deep-1B dataset. Please note that datasets of this scale are recommended for GPUs with larger amounts of memory, such as the A100 or H100. ```bash mkdir -p datasets/deep-1B # (1) prepare dataset # download manually "Ground Truth" file of "Yandex DEEP" # suppose the file name is deep_new_groundtruth.public.10K.bin python -m raft-ann-bench.split_groundtruth --groundtruth datasets/deep-1B/deep_new_groundtruth.public.10K.bin # two files 'groundtruth.neighbors.ibin' and 'groundtruth.distances.fbin' should be produced # (2) build and search index python -m raft-ann-bench.run --dataset deep-1B # (3) export data python -m raft-ann-bench.data_export --dataset deep-1B # (4) plot results python -m raft-ann-bench.plot --dataset deep-1B ``` The usage of `python -m raft-ann-bench.split_groundtruth` is: ```bash usage: split_groundtruth.py [-h] --groundtruth GROUNDTRUTH options: -h, --help show this help message and exit --groundtruth GROUNDTRUTH Path to billion-scale dataset groundtruth file (default: None) ``` ### Running with Docker containers Two methods are provided for running the benchmarks with the Docker containers. #### End-to-end run on GPU When no other entrypoint is provided, an end-to-end script will run through all the steps in [Running the benchmarks](#running-the-benchmarks) above. For GPU-enabled systems, the `DATA_FOLDER` variable should be a local folder where you want datasets stored in `$DATA_FOLDER/datasets` and results in `$DATA_FOLDER/result` (we highly recommend `$DATA_FOLDER` to be a dedicated folder for the datasets and results of the containers): ```bash export DATA_FOLDER=path/to/store/datasets/and/results docker run --gpus all --rm -it -u $(id -u) \ -v $DATA_FOLDER:/data/benchmarks \ rapidsai/raft-ann-bench:24.02a-cuda11.8-py3.10 \ "--dataset deep-image-96-angular" \ "--normalize" \ "--algorithms raft_cagra,raft_ivf_pq --batch-size 10 -k 10" \ "" ``` Usage of the above command is as follows: | Argument | Description | |-----------------------------------------------------------|----------------------------------------------------------------------------------------------------| | `rapidsai/raft-ann-bench:24.02a-cuda11.8-py3.10` | Image to use. Can be either `raft-ann-bench` or `raft-ann-bench-datasets` | | `"--dataset deep-image-96-angular"` | Dataset name | | `"--normalize"` | Whether to normalize the dataset | | `"--algorithms raft_cagra,hnswlib --batch-size 10 -k 10"` | Arguments passed to the `run` script, such as the algorithms to benchmark, the batch size, and `k` | | `""` | Additional (optional) arguments that will be passed to the `plot` script. | ***Note about user and file permissions:*** The flag `-u $(id -u)` allows the user inside the container to match the `uid` of the user outside the container, allowing the container to read and write to the mounted volume indicated by the `$DATA_FOLDER` variable. #### End-to-end run on CPU The container arguments in the above section also be used for the CPU-only container, which can be used on systems that don't have a GPU installed. ***Note:*** the image changes to `raft-ann-bench-cpu` container and the `--gpus all` argument is no longer used: ```bash export DATA_FOLDER=path/to/store/datasets/and/results docker run --rm -it -u $(id -u) \ -v $DATA_FOLDER:/data/benchmarks \ rapidsai/raft-ann-bench-cpu:24.02a-py3.10 \ "--dataset deep-image-96-angular" \ "--normalize" \ "--algorithms hnswlib --batch-size 10 -k 10" \ "" ``` #### Manually run the scripts inside the container All of the `raft-ann-bench` images contain the Conda packages, so they can be used directly by logging directly into the container itself: ```bash export DATA_FOLDER=path/to/store/datasets/and/results docker run --gpus all --rm -it -u $(id -u) \ --entrypoint /bin/bash \ --workdir /data/benchmarks \ -v $DATA_FOLDER:/data/benchmarks \ rapidsai/raft-ann-bench:24.02a-cuda11.8-py3.10 ``` This will drop you into a command line in the container, with the `raft-ann-bench` python package ready to use, as described in the [Running the benchmarks](#running-the-benchmarks) section above: ``` (base) root@00b068fbb862:/data/benchmarks# python -m raft-ann-bench.get_dataset --dataset deep-image-96-angular --normalize ``` Additionally, the containers can be run in detached mode without any issue. ### Evaluating the results The benchmarks capture several different measurements. The table below describes each of the measurements for index build benchmarks: | Name | Description | |------------|--------------------------------------------------------| | Benchmark | A name that uniquely identifies the benchmark instance | | Time | Wall-time spent training the index | | CPU | CPU time spent training the index | | Iterations | Number of iterations (this is usually 1) | | GPU | GPU time spent building | | index_size | Number of vectors used to train index | The table below describes each of the measurements for the index search benchmarks. The most important measurements `Latency`, `items_per_second`, `end_to_end`. | Name | Description | |------------|-------------------------------------------------------------------------------------------------------------------------------------------------------| | Benchmark | A name that uniquely identifies the benchmark instance | | Time | The wall-clock time of a single iteration (batch) divided by the number of threads. | | CPU | The average CPU time (user + sys time). This does not include idle time (which can also happen while waiting for GPU sync). | | Iterations | Total number of batches. This is going to be `total_queries` / `n_queries`. | | GPU | GPU latency of a single batch (seconds). In throughput mode this is averaged over multiple threads. | | Latency | Latency of a single batch (seconds), calculated from wall-clock time. In throughput mode this is averaged over multiple threads. | | Recall | Proportion of correct neighbors to ground truth neighbors. Note this column is only present if groundtruth file is specified in dataset configuration.| | items_per_second | Total throughput, a.k.a Queries per second (QPS). This is approximately `total_queries` / `end_to_end`. | | k | Number of neighbors being queried in each iteration | | end_to_end | Total time taken to run all batches for all iterations | | n_queries | Total number of query vectors in each batch | | total_queries | Total number of vectors queries across all iterations ( = `iterations` * `n_queries`) | Note the following: - A slightly different method is used to measure `Time` and `end_to_end`. That is why `end_to_end` = `Time` * `Iterations` holds only approximately. - The actual table displayed on the screen may differ slightly as the hyper-parameters will also be displayed for each different combination being benchmarked. - Recall calculation: the number of queries processed per test depends on the number of iterations. Because of this, recall can show slight fluctuations if less neighbors are processed then it is available for the benchmark. ## Creating and customizing dataset configurations A single configuration will often define a set of algorithms, with associated index and search parameters, that can be generalize across datasets. We use YAML to define dataset specific and algorithm specific configurations. <a id='yaml-dataset-config'></a>A default `datasets.yaml` is provided by RAFT in `${RAFT_HOME}/python/raft-ann-bench/src/raft-ann-bench/run/conf` with configurations available for several datasets. Here's a simple example entry for the `sift-128-euclidean` dataset: ```yaml - name: sift-128-euclidean base_file: sift-128-euclidean/base.fbin query_file: sift-128-euclidean/query.fbin groundtruth_neighbors_file: sift-128-euclidean/groundtruth.neighbors.ibin dims: 128 distance: euclidean ``` <a id='yaml-algo-config'></a>Configuration files for ANN algorithms supported by `raft-ann-bench` are provided in `${RAFT_HOME}/python/raft-ann-bench/src/raft-ann-bench/run/conf`. `raft_cagra` algorithm configuration looks like: ```yaml name: raft_cagra groups: base: build: graph_degree: [32, 64] intermediate_graph_degree: [64, 96] search: itopk: [32, 64, 128] large: build: graph_degree: [32, 64] search: itopk: [32, 64, 128] ``` The default parameters for which the benchmarks are run can be overridden by creating a custom YAML file for algorithms with a `base` group. There config above has 2 fields: 1. `name` - define the name of the algorithm for which the parameters are being specified. 2. `groups` - define a run group which has a particular set of parameters. Each group helps create a cross-product of all hyper-parameter fields for `build` and `search`. The table below contains all algorithms supported by RAFT. Each unique algorithm will have its own set of `build` and `search` settings. The [ANN Algorithm Parameter Tuning Guide](ann_benchmarks_param_tuning.md) contains detailed instructions on choosing build and search parameters for each supported algorithm. | Library | Algorithms | |-----------|------------------------------------------------------------------| | FAISS GPU | `faiss_gpu_flat`, `faiss_gpu_ivf_flat`, `faiss_gpu_ivf_pq` | | FAISS CPU | `faiss_cpu_flat`, `faiss_cpu_ivf_flat`, `faiss_cpu_ivf_pq` | | GGNN | `ggnn` | | HNSWlib | `hnswlib` | | RAFT | `raft_brute_force`, `raft_cagra`, `raft_ivf_flat`, `raft_ivf_pq` | ## Adding a new ANN algorithm ### Implementation and Configuration Implementation of a new algorithm should be a C++ class that inherits `class ANN` (defined in `cpp/bench/ann/src/ann.h`) and implements all the pure virtual functions. In addition, it should define two `struct`s for building and searching parameters. The searching parameter class should inherit `struct ANN<T>::AnnSearchParam`. Take `class HnswLib` as an example, its definition is: ```c++ template<typename T> class HnswLib : public ANN<T> { public: struct BuildParam { int M; int ef_construction; int num_threads; }; using typename ANN<T>::AnnSearchParam; struct SearchParam : public AnnSearchParam { int ef; int num_threads; }; // ... }; ``` <a id='json-index-config'></a>The benchmark program uses JSON format in a configuration file to specify indexes to build, along with the build and search parameters. To add the new algorithm to the benchmark, need be able to specify `build_param`, whose value is a JSON object, and `search_params`, whose value is an array of JSON objects, for this algorithm in configuration file. The `build_param` and `search_param` arguments will vary depending on the algorithm. Take the configuration for `HnswLib` as an example: ```json { "name" : "hnswlib.M12.ef500.th32", "algo" : "hnswlib", "build_param": {"M":12, "efConstruction":500, "numThreads":32}, "file" : "/path/to/file", "search_params" : [ {"ef":10, "numThreads":1}, {"ef":20, "numThreads":1}, {"ef":40, "numThreads":1}, ], "search_result_file" : "/path/to/file" }, ``` How to interpret these JSON objects is totally left to the implementation and should be specified in `cpp/bench/ann/src/factory.cuh`: 1. First, add two functions for parsing JSON object to `struct BuildParam` and `struct SearchParam`, respectively: ```c++ template<typename T> void parse_build_param(const nlohmann::json& conf, typename cuann::HnswLib<T>::BuildParam& param) { param.ef_construction = conf.at("efConstruction"); param.M = conf.at("M"); if (conf.contains("numThreads")) { param.num_threads = conf.at("numThreads"); } } template<typename T> void parse_search_param(const nlohmann::json& conf, typename cuann::HnswLib<T>::SearchParam& param) { param.ef = conf.at("ef"); if (conf.contains("numThreads")) { param.num_threads = conf.at("numThreads"); } } ``` 2. Next, add corresponding `if` case to functions `create_algo()` (in `cpp/bench/ann/) and `create_search_param()` by calling parsing functions. The string literal in `if` condition statement must be the same as the value of `algo` in configuration file. For example, ```c++ // JSON configuration file contains a line like: "algo" : "hnswlib" if (algo == "hnswlib") { // ... } ``` ### Adding a CMake Target In `raft/cpp/bench/ann/CMakeLists.txt`, we provide a `CMake` function to configure a new Benchmark target with the following signature: ``` ConfigureAnnBench( NAME <algo_name> PATH </path/to/algo/benchmark/source/file> INCLUDES <additional_include_directories> CXXFLAGS <additional_cxx_flags> LINKS <additional_link_library_targets> ) ``` To add a target for `HNSWLIB`, we would call the function as: ``` ConfigureAnnBench( NAME HNSWLIB PATH bench/ann/src/hnswlib/hnswlib_benchmark.cpp INCLUDES ${CMAKE_CURRENT_BINARY_DIR}/_deps/hnswlib-src/hnswlib CXXFLAGS "${HNSW_CXX_FLAGS}" ) ``` This will create an executable called `HNSWLIB_ANN_BENCH`, which can then be used to run `HNSWLIB` benchmarks. Add a new entry to `algos.yaml` to map the name of the algorithm to its binary executable and specify whether the algorithm requires GPU support. ```yaml raft_ivf_pq: executable: RAFT_IVF_PQ_ANN_BENCH requires_gpu: true ``` `executable` : specifies the name of the binary that will build/search the index. It is assumed to be available in `raft/cpp/build/`. `requires_gpu` : denotes whether an algorithm requires GPU to run.
0