repo_id
stringlengths
21
96
file_path
stringlengths
31
155
content
stringlengths
1
92.9M
__index_level_0__
int64
0
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/core/mdarray.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <raft/core/resource/thrust_policy.hpp> #include <gtest/gtest.h> #include <raft/core/device_container_policy.hpp> #include <raft/core/device_mdarray.hpp> #include <raft/core/host_container_policy.hpp> #include <raft/core/host_mdarray.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/cuda_stream.hpp> #include <rmm/device_uvector.hpp> #include <rmm/device_vector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/sequence.h> namespace { namespace stdex = std::experimental; void check_status(int32_t* d_status, rmm::cuda_stream_view stream) { stream.synchronize(); int32_t h_status{1}; raft::update_host(&h_status, d_status, 1, stream); ASSERT_EQ(h_status, 0); } // just simple integration test, main tests are in mdspan ref implementation. void test_mdspan() { auto stream = rmm::cuda_stream_default; rmm::device_uvector<float> a{16ul, stream}; thrust::sequence(rmm::exec_policy(stream), a.begin(), a.end()); stdex::mdspan<float, stdex::extents<int, raft::dynamic_extent, raft::dynamic_extent>> span{ a.data(), 4, 4}; thrust::device_vector<int32_t> status(1, 0); auto p_status = status.data().get(); thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator(0ul), 4, [=] __device__(size_t i) { auto v = span(0, i); if (v != i) { raft::myAtomicAdd(p_status, 1); } auto k = stdex::submdspan(span, 0, stdex::full_extent); if (k(i) != i) { raft::myAtomicAdd(p_status, 1); } }); check_status(p_status, stream); } } // namespace TEST(MDSpan, Basic) { test_mdspan(); } namespace raft { void test_uvector_policy() { auto s = rmm::cuda_stream{}; device_uvector<float> dvec(10, s); auto a = dvec[2]; a = 3; float c = a; ASSERT_EQ(c, 3); } TEST(MDArray, Policy) { test_uvector_policy(); } void test_mdarray_basic() { using matrix_extent = stdex::extents<int, dynamic_extent, dynamic_extent>; raft::resources handle; auto s = resource::get_cuda_stream(handle); { /** * device policy */ layout_c_contiguous::mapping<matrix_extent> layout{matrix_extent{4, 4}}; using mdarray_t = device_mdarray<float, matrix_extent, layout_c_contiguous>; auto policy = mdarray_t::container_policy_type{}; static_assert( std::is_same_v<typename decltype(policy)::accessor_type, device_uvector_policy<float>>); device_mdarray<float, matrix_extent, layout_c_contiguous> array{handle, layout, policy}; array(0, 3) = 1; ASSERT_EQ(array(0, 3), 1); // non-const access auto d_view = array.view(); static_assert(!decltype(d_view)::accessor_type::is_host_type::value); thrust::device_vector<int32_t> status(1, 0); auto p_status = status.data().get(); thrust::for_each_n(rmm::exec_policy(s), thrust::make_counting_iterator(0ul), 1, [d_view, p_status] __device__(auto i) { if (d_view(0, 3) != 1) { myAtomicAdd(p_status, 1); } d_view(0, 2) = 3; if (d_view(0, 2) != 3) { myAtomicAdd(p_status, 1); } }); check_status(p_status, s); // const ref access auto const& arr = array; ASSERT_EQ(arr(0, 3), 1); auto const_d_view = arr.view(); thrust::for_each_n(rmm::exec_policy(s), thrust::make_counting_iterator(0ul), 1, [const_d_view, p_status] __device__(auto i) { if (const_d_view(0, 3) != 1) { myAtomicAdd(p_status, 1); } }); check_status(p_status, s); // utilities static_assert(array.rank_dynamic() == 2); static_assert(array.rank() == 2); static_assert(array.is_unique()); static_assert(array.is_exhaustive()); static_assert(array.is_strided()); static_assert(!std::is_nothrow_default_constructible<mdarray_t>::value); // cuda stream static_assert(std::is_nothrow_move_constructible<mdarray_t>::value); static_assert(std::is_nothrow_move_assignable<mdarray_t>::value); } { /** * host policy */ using mdarray_t = host_mdarray<float, matrix_extent, layout_c_contiguous>; mdarray_t::container_policy_type policy; static_assert( std::is_same_v<typename decltype(policy)::accessor_type, host_vector_policy<float>>); layout_c_contiguous::mapping<matrix_extent> layout{matrix_extent{4, 4}}; host_mdarray<float, matrix_extent, layout_c_contiguous> array{handle, layout, policy}; array(0, 3) = 1; ASSERT_EQ(array(0, 3), 1); auto h_view = array.view(); static_assert(decltype(h_view)::accessor_type::is_host_type::value); thrust::for_each_n(thrust::host, thrust::make_counting_iterator(0ul), 1, [h_view](auto i) { ASSERT_EQ(h_view(0, 3), 1); }); // static_assert(std::is_nothrow_default_constructible<mdarray_t>::value); static_assert(std::is_nothrow_move_constructible<mdarray_t>::value); static_assert(std::is_nothrow_move_assignable<mdarray_t>::value); } { /** * static extent */ using static_extent = stdex::extents<int, 16, 16>; layout_c_contiguous::mapping<static_extent> layout{static_extent{}}; using mdarray_t = device_mdarray<float, static_extent, layout_c_contiguous>; mdarray_t::container_policy_type policy{}; device_mdarray<float, static_extent, layout_c_contiguous> array{handle, layout, policy}; static_assert(array.rank_dynamic() == 0); static_assert(array.rank() == 2); static_assert(array.is_unique()); static_assert(array.is_exhaustive()); static_assert(array.is_strided()); array(0, 3) = 1; ASSERT_EQ(array(0, 3), 1); auto const& ref = array; ASSERT_EQ(ref(0, 3), 1); } } TEST(MDArray, Basic) { test_mdarray_basic(); } template <typename BasicMDarray, typename PolicyFn, typename ThrustPolicy> void test_mdarray_copy_move(ThrustPolicy exec, PolicyFn make_policy) { raft::resources handle; using matrix_extent = stdex::extents<size_t, dynamic_extent, dynamic_extent>; layout_c_contiguous::mapping<matrix_extent> layout{matrix_extent{4, 4}}; using mdarray_t = BasicMDarray; using policy_t = typename mdarray_t::container_policy_type; auto policy = make_policy(); mdarray_t arr_origin{handle, layout, policy}; thrust::sequence(exec, arr_origin.data_handle(), arr_origin.data_handle() + arr_origin.size()); auto check_eq = [](auto const& l, auto const& r) { ASSERT_EQ(l.extents(), r.extents()); for (size_t i = 0; i < l.view().extent(0); ++i) { for (size_t j = 0; j < l.view().extent(1); ++j) { ASSERT_EQ(l(i, j), r(i, j)); } } }; { // copy ctor auto policy = make_policy(); mdarray_t arr{handle, layout, policy}; thrust::sequence(exec, arr.data_handle(), arr.data_handle() + arr.size()); mdarray_t arr_copy_construct{arr}; check_eq(arr, arr_copy_construct); auto const& ref = arr; mdarray_t arr_copy_construct_1{ref}; check_eq(ref, arr_copy_construct_1); } { // copy assign auto policy = make_policy(); mdarray_t arr{handle, layout, policy}; thrust::sequence(exec, arr.data_handle(), arr.data_handle() + arr.size()); mdarray_t arr_copy_assign{handle, layout, policy}; arr_copy_assign = arr; check_eq(arr, arr_copy_assign); auto const& ref = arr; mdarray_t arr_copy_assign_1{handle, layout, policy}; arr_copy_assign_1 = ref; check_eq(ref, arr_copy_assign_1); } { // move ctor auto policy = make_policy(); mdarray_t arr{handle, layout, policy}; thrust::sequence(exec, arr.data_handle(), arr.data_handle() + arr.size()); mdarray_t arr_move_construct{std::move(arr)}; ASSERT_EQ(arr.data_handle(), nullptr); check_eq(arr_origin, arr_move_construct); } { // move assign auto policy = make_policy(); mdarray_t arr{handle, layout, policy}; thrust::sequence(exec, arr.data_handle(), arr.data_handle() + arr.size()); mdarray_t arr_move_assign{handle, layout, policy}; arr_move_assign = std::move(arr); ASSERT_EQ(arr.data_handle(), nullptr); check_eq(arr_origin, arr_move_assign); } } TEST(MDArray, CopyMove) { using matrix_extent = stdex::extents<size_t, dynamic_extent, dynamic_extent>; using d_matrix_t = device_mdarray<float, matrix_extent>; using policy_t = typename d_matrix_t::container_policy_type; raft::resources handle; auto s = resource::get_cuda_stream(handle); test_mdarray_copy_move<d_matrix_t>(rmm::exec_policy(s), []() { return policy_t{}; }); using h_matrix_t = host_mdarray<float, matrix_extent>; test_mdarray_copy_move<h_matrix_t>(thrust::host, []() { return host_vector_policy<float>{}; }); { d_matrix_t arr{handle}; policy_t policy; matrix_extent extents{3, 3}; d_matrix_t::layout_type::mapping<matrix_extent> layout{extents}; d_matrix_t non_dft{handle, layout, policy}; arr = non_dft; ASSERT_NE(arr.data_handle(), non_dft.data_handle()); ASSERT_EQ(arr.extent(0), non_dft.extent(0)); } { h_matrix_t arr(handle); using h_policy_t = typename h_matrix_t::container_policy_type; h_policy_t policy; matrix_extent extents{3, 3}; h_matrix_t::layout_type::mapping<matrix_extent> layout{extents}; h_matrix_t non_dft{handle, layout, policy}; arr = non_dft; ASSERT_NE(arr.data_handle(), non_dft.data_handle()); ASSERT_EQ(arr.extent(0), non_dft.extent(0)); } } namespace { void test_factory_methods() { size_t n{100}; rmm::device_uvector<float> d_vec(n, rmm::cuda_stream_default); { auto d_matrix = make_device_matrix_view(d_vec.data(), static_cast<int>(d_vec.size() / 2), 2); ASSERT_EQ(d_matrix.extent(0), n / 2); ASSERT_EQ(d_matrix.extent(1), 2); ASSERT_EQ(d_matrix.data_handle(), d_vec.data()); } { auto const& vec_ref = d_vec; auto d_matrix = make_device_matrix_view(vec_ref.data(), static_cast<int>(d_vec.size() / 2), 2); ASSERT_EQ(d_matrix.extent(0), n / 2); ASSERT_EQ(d_matrix.extent(1), 2); ASSERT_EQ(d_matrix.data_handle(), d_vec.data()); } std::vector<float> h_vec(n); { auto h_matrix = make_host_matrix_view(h_vec.data(), static_cast<int>(h_vec.size() / 2), 2); ASSERT_EQ(h_matrix.extent(0), n / 2); ASSERT_EQ(h_matrix.extent(1), 2); ASSERT_EQ(h_matrix.data_handle(), h_vec.data()); h_matrix(0, 0) = 13; ASSERT_EQ(h_matrix(0, 0), 13); } { auto const& vec_ref = h_vec; auto h_matrix = make_host_matrix_view(vec_ref.data(), static_cast<int>(d_vec.size() / 2), 2); ASSERT_EQ(h_matrix.extent(0), n / 2); ASSERT_EQ(h_matrix.extent(1), 2); ASSERT_EQ(h_matrix.data_handle(), h_vec.data()); // const, cannot assign // h_matrix(0, 0) = 13; ASSERT_EQ(h_matrix(0, 0), 13); } { // host mdarray auto h_matrix = make_host_matrix<float>(n, n); ASSERT_EQ(h_matrix.extent(0), n); ASSERT_EQ(h_matrix.extent(1), n); static_assert(h_matrix.rank() == 2); auto h_vec = make_host_vector<float>(n); static_assert(h_vec.rank() == 1); ASSERT_EQ(h_vec.extent(0), n); } { raft::resources handle; // device mdarray auto d_matrix = make_device_matrix<float>(handle, n, n); ASSERT_EQ(d_matrix.extent(0), n); ASSERT_EQ(d_matrix.extent(1), n); static_assert(d_matrix.rank() == 2); auto d_vec = make_device_vector<float>(handle, n); static_assert(d_vec.rank() == 1); ASSERT_EQ(d_vec.extent(0), n); } { raft::resources handle; // device scalar auto d_scalar = make_device_scalar<double>(handle, 17.0); static_assert(d_scalar.rank() == 1); static_assert(d_scalar.rank_dynamic() == 0); ASSERT_EQ(d_scalar(0), 17.0); auto view = d_scalar.view(); thrust::device_vector<int32_t> status(1, 0); auto p_status = status.data().get(); thrust::for_each_n(rmm::exec_policy(resource::get_cuda_stream(handle)), thrust::make_counting_iterator(0), 1, [=] __device__(auto i) { if (view(i) != 17.0) { myAtomicAdd(p_status, 1); } }); check_status(p_status, resource::get_cuda_stream(handle)); } { // host scalar raft::resources handle; auto h_scalar = make_host_scalar<double>(handle, 17.0); static_assert(h_scalar.rank() == 1); static_assert(h_scalar.rank_dynamic() == 0); ASSERT_EQ(h_scalar(0), 17.0); ASSERT_EQ(h_scalar.view()(0), 17.0); auto view = make_host_scalar_view(h_scalar.data_handle()); ASSERT_EQ(view(0), 17.0); } // managed { raft::resources handle; auto mda = make_device_vector<int>(handle, 10); auto mdv = make_managed_mdspan(mda.data_handle(), raft::vector_extent<int>{10}); static_assert(decltype(mdv)::accessor_type::is_managed_accessible, "Not managed mdspan"); ASSERT_EQ(mdv.size(), 10); } } } // anonymous namespace TEST(MDArray, Factory) { test_factory_methods(); } namespace { template <typename T, typename Index, typename LayoutPolicy> void check_matrix_layout(device_matrix_view<T, Index, LayoutPolicy> in) { static_assert(in.rank() == 2); static_assert(in.is_exhaustive()); bool constexpr kIsCContiguous = std::is_same_v<LayoutPolicy, layout_c_contiguous>; bool constexpr kIsFContiguous = std::is_same_v<LayoutPolicy, layout_f_contiguous>; // only 1 of them is true static_assert(kIsCContiguous || kIsFContiguous); static_assert(!(kIsCContiguous && kIsFContiguous)); } } // anonymous namespace TEST(MDArray, FuncArg) { raft::resources handle; { auto d_matrix = make_device_matrix<float>(handle, 10, 10); check_matrix_layout(d_matrix.view()); } { auto d_matrix = make_device_matrix<float, int, layout_f_contiguous>(handle, 10, 10); check_matrix_layout(d_matrix.view()); auto slice = stdex::submdspan(d_matrix.view(), std::make_tuple(2ul, 4ul), std::make_tuple(2ul, 5ul)); static_assert(slice.is_strided()); ASSERT_EQ(slice.extent(0), 2); ASSERT_EQ(slice.extent(1), 3); // is using device_accessor mixin. static_assert( std::is_same_v<decltype(slice)::accessor_type, device_matrix_view<float>::accessor_type>); } } void test_mdspan_layout_right_padded() { { // 5x2 example, constexpr int n_rows = 2; constexpr int n_cols = 5; constexpr int alignment = 8; constexpr int alignment_bytes = sizeof(int) * alignment; int data_row_major[] = { 1, 2, 3, 4, 5, /* X X X */ 6, 7, 8, 9, 10 /* X X X */ }; // manually aligning the above, using -1 as filler static constexpr int X = -1; int data_padded[] = {1, 2, 3, 4, 5, X, X, X, 6, 7, 8, 9, 10, X, X, X}; using extents_type = stdex::extents<size_t, stdex::dynamic_extent, stdex::dynamic_extent>; using padded_layout_row_major = stdex::layout_right_padded<detail::padding<int, alignment_bytes>::value>; using padded_mdspan = stdex::mdspan<int, extents_type, padded_layout_row_major>; using row_major_mdspan = stdex::mdspan<int, extents_type, stdex::layout_right>; padded_layout_row_major::mapping<extents_type> layout{extents_type{n_rows, n_cols}}; auto padded = padded_mdspan(data_padded, layout); auto row_major = row_major_mdspan(data_row_major, n_rows, n_cols); int failures = 0; for (int irow = 0; irow < n_rows; ++irow) { for (int icol = 0; icol < n_cols; ++icol) { if (padded(irow, icol) != row_major(irow, icol)) { ++failures; } } } ASSERT_EQ(failures, 0); } } TEST(MDSpan, LayoutRightPadded) { test_mdspan_layout_right_padded(); } void test_mdarray_padding() { using extents_type = stdex::extents<size_t, dynamic_extent, dynamic_extent>; raft::resources handle; auto s = resource::get_cuda_stream(handle); { constexpr int rows = 6; constexpr int cols = 7; constexpr int alignment = 5; constexpr int alignment_bytes = sizeof(int) * alignment; /** * padded device array */ using padded_layout_row_major = stdex::layout_right_padded<detail::padding<float, alignment_bytes>::value>; using padded_mdarray_type = device_mdarray<float, extents_type, padded_layout_row_major>; padded_layout_row_major::mapping<extents_type> layout(extents_type(rows, cols)); auto device_policy = padded_mdarray_type::container_policy_type{}; static_assert(std::is_same_v<typename decltype(device_policy)::accessor_type, device_uvector_policy<float>>); padded_mdarray_type padded_device_array{handle, layout, device_policy}; // direct access mdarray padded_device_array(0, 3) = 1; ASSERT_EQ(padded_device_array(0, 3), 1); // non-const access via mdspan auto d_view = padded_device_array.view(); static_assert(!decltype(d_view)::accessor_type::is_host_type::value); thrust::device_vector<int32_t> status(1, 0); auto p_status = status.data().get(); thrust::for_each_n(rmm::exec_policy(), thrust::make_counting_iterator(0ul), 1, [d_view, p_status] __device__(size_t i) { if (d_view(0, 3) != 1) { myAtomicAdd(p_status, 1); } d_view(0, 2) = 3; if (d_view(0, 2) != 3) { myAtomicAdd(p_status, 1); } }); check_status(p_status, s); // const ref access via mdspan auto const& arr = padded_device_array; ASSERT_EQ(arr(0, 3), 1); auto const_d_view = arr.view(); thrust::for_each_n(rmm::exec_policy(s), thrust::make_counting_iterator(0ul), 1, [const_d_view, p_status] __device__(size_t i) { if (const_d_view(0, 3) != 1) { myAtomicAdd(p_status, 1); } }); check_status(p_status, s); // initialize with sequence thrust::for_each_n( rmm::exec_policy(s), thrust::make_counting_iterator(0ul), rows * cols, [d_view, rows, cols] __device__(size_t i) { d_view(i / cols, i % cols) = i; }); // manually create span with layout { auto data_padded = padded_device_array.data_handle(); using padded_mdspan_type = device_mdspan<float, extents_type, padded_layout_row_major>; auto padded_span = padded_mdspan_type(data_padded, layout); thrust::for_each_n(rmm::exec_policy(s), thrust::make_counting_iterator(0ul), rows * cols, [padded_span, rows, cols, p_status] __device__(size_t i) { if (padded_span(i / cols, i % cols) != i) myAtomicAdd(p_status, 1); }); check_status(p_status, s); } // utilities static_assert(padded_device_array.rank_dynamic() == 2); static_assert(padded_device_array.rank() == 2); static_assert(padded_device_array.is_unique()); static_assert(padded_device_array.is_strided()); static_assert( !std::is_nothrow_default_constructible<padded_mdarray_type>::value); // cuda stream static_assert(std::is_nothrow_move_constructible<padded_mdarray_type>::value); static_assert(std::is_nothrow_move_assignable<padded_mdarray_type>::value); } } TEST(MDArray, Padding) { test_mdarray_padding(); } // Test deactivated as submdspan support requires upstream changes /*void test_submdspan_padding() { using extents_type = stdex::extents<dynamic_extent, dynamic_extent>; auto s = rmm::cuda_stream_default; { constexpr int rows = 6; constexpr int cols = 7; constexpr int alignment = 5; constexpr int alignment_bytes = sizeof(int) * alignment; using layout_padded_general = stdex::layout_padded_general<float, stdex::StorageOrderType::row_major_t, alignment_bytes>; using padded_mdarray_type = device_mdarray<float, extents_type, layout_padded_general>; using padded_mdspan_type = device_mdspan<float, extents_type, layout_padded_general>; layout_padded_general::mapping<extents_type> layout{extents_type{rows, cols}}; auto device_policy = padded_mdarray_type::container_policy_type{s}; static_assert(std::is_same_v<typename decltype(device_policy)::accessor_type, detail::device_uvector_policy<float>>); padded_mdarray_type padded_device_array{handle, layout, device_policy}; // test status thrust::device_vector<int32_t> status(1, 0); auto p_status = status.data().get(); // initialize with sequence { auto d_view = padded_device_array.view(); static_assert(std::is_same_v<typename decltype(d_view)::layout_type, layout_padded_general>); thrust::for_each_n( rmm::exec_policy(s), thrust::make_counting_iterator(0ul), rows * cols, [d_view, rows, cols] __device__(size_t i) { d_view(i / cols, i % cols) = i; }); } // get mdspan manually from raw data { auto data_padded = padded_device_array.data(); auto padded_span = padded_mdspan_type(data_padded, layout); thrust::for_each_n(rmm::exec_policy(s), thrust::make_counting_iterator(0ul), rows * cols, [padded_span, rows, cols, p_status] __device__(size_t i) { if (padded_span(i / cols, i % cols) != i) myAtomicAdd(p_status, 1); }); check_status(p_status, s); } // full subspan { auto padded_span = padded_device_array.view(); auto subspan_full = stdex::submdspan(padded_span, stdex::full_extent, stdex::full_extent); thrust::for_each_n(rmm::exec_policy(s), thrust::make_counting_iterator(0ul), cols * rows, [subspan_full, padded_span, rows, cols, p_status] __device__(size_t i) { if (subspan_full(i / cols, i % cols) != padded_span(i / cols, i % cols)) myAtomicAdd(p_status, 1); }); check_status(p_status, s); // resulting submdspan should still be padded static_assert( std::is_same_v<typename decltype(subspan_full)::layout_type, layout_padded_general>); } // slicing a row { auto padded_span = padded_device_array.view(); auto row3 = stdex::submdspan(padded_span, 3, stdex::full_extent); thrust::for_each_n(rmm::exec_policy(s), thrust::make_counting_iterator(0ul), cols, [row3, padded_span, p_status] __device__(size_t i) { if (row3(i) != padded_span(3, i)) myAtomicAdd(p_status, 1); }); check_status(p_status, s); // resulting submdspan should still be padded static_assert(std::is_same_v<typename decltype(row3)::layout_type, layout_padded_general>); } // slicing a column { auto padded_span = padded_device_array.view(); auto col1 = stdex::submdspan(padded_span, stdex::full_extent, 1); thrust::for_each_n(rmm::exec_policy(s), thrust::make_counting_iterator(0ul), rows, [col1, padded_span, p_status] __device__(size_t i) { if (col1(i) != padded_span(i, 1)) myAtomicAdd(p_status, 1); }); check_status(p_status, s); // resulting submdspan is *NOT* padded anymore static_assert(std::is_same_v<typename decltype(col1)::layout_type, stdex::layout_stride>); } // sub-rectangle of 6x7 { auto padded_span = padded_device_array.view(); auto subspan = stdex::submdspan(padded_span, std::make_tuple(1ul, 4ul), std::make_tuple(2ul, 5ul)); thrust::for_each_n(rmm::exec_policy(s), thrust::make_counting_iterator(0ul), (rows - 1) * (cols - 2), [subspan, rows, cols, padded_span, p_status] __device__(size_t i) { size_t idx = i / (cols - 2); size_t idy = i % (cols - 2); // elements > subspan range can be accessed as well if (subspan(idx, idy) != padded_span(idx + 1, idy + 2)) myAtomicAdd(p_status, 1); }); check_status(p_status, s); // resulting submdspan is *NOT* padded anymore static_assert(std::is_same_v<typename decltype(subspan)::layout_type, stdex::layout_stride>); } // sub-rectangle retaining padded layout { auto padded_span = padded_device_array.view(); auto subspan = stdex::submdspan(padded_span, std::make_tuple(1ul, 4ul), std::make_tuple(2ul, 5ul)); thrust::for_each_n(rmm::exec_policy(s), thrust::make_counting_iterator(0ul), (rows - 1) * (cols - 2), [subspan, rows, cols, padded_span, p_status] __device__(size_t i) { size_t idx = i / (cols - 2); size_t idy = i % (cols - 2); // elements > subspan range can be accessed as well if (subspan(idx, idy) != padded_span(idx + 1, idy + 2)) myAtomicAdd(p_status, 1); }); check_status(p_status, s); // resulting submdspan is *NOT* padded anymore static_assert(std::is_same_v<typename decltype(subspan)::layout_type, stdex::layout_stride>); } } } TEST(MDSpan, SubmdspanPadding) { test_submdspan_padding(); }*/ struct TestElement1 { int a, b; }; void test_mdspan_padding_by_type() { using extents_type = stdex::extents<size_t, dynamic_extent, dynamic_extent>; raft::resources handle; auto s = rmm::cuda_stream_default; { constexpr int rows = 6; constexpr int cols = 7; constexpr int alignment_bytes = 16; thrust::device_vector<int32_t> status(1, 0); auto p_status = status.data().get(); // manually check strides for row major (c style) padding { using padded_layout_row_major = stdex::layout_right_padded< detail::padding<std::remove_cv_t<std::remove_reference_t<TestElement1>>, alignment_bytes>::value>; using padded_mdarray_type = device_mdarray<TestElement1, extents_type, padded_layout_row_major>; auto device_policy = padded_mdarray_type::container_policy_type{}; padded_layout_row_major::mapping<extents_type> layout{extents_type{rows, cols}}; padded_mdarray_type padded_device_array{handle, layout, device_policy}; int alignment_elements = detail::padding<TestElement1, alignment_bytes>::value; auto padded_span = padded_device_array.view(); thrust::for_each_n( rmm::exec_policy(s), thrust::make_counting_iterator(0ul), rows * cols, [rows, cols, padded_span, alignment_elements, p_status] __device__(size_t i) { size_t idx = i / cols; size_t idy = i % cols; if ((&(padded_span(idx, idy)) - &(padded_span(0, idy))) % alignment_elements != 0) myAtomicAdd(p_status, 1); if ((&(padded_span(idx, idy)) - &(padded_span(idx, 0))) != idy) myAtomicAdd(p_status, 1); }); check_status(p_status, s); } // manually check strides for col major (f style) padding { using padded_layout_col_major = stdex::layout_left_padded< detail::padding<std::remove_cv_t<std::remove_reference_t<TestElement1>>, alignment_bytes>::value>; using padded_mdarray_type = device_mdarray<TestElement1, extents_type, padded_layout_col_major>; auto device_policy = padded_mdarray_type::container_policy_type{}; padded_layout_col_major::mapping<extents_type> layout{extents_type{rows, cols}}; padded_mdarray_type padded_device_array{handle, layout, device_policy}; int alignment_elements = detail::padding<TestElement1, alignment_bytes>::value; auto padded_span = padded_device_array.view(); thrust::for_each_n( rmm::exec_policy(s), thrust::make_counting_iterator(0ul), rows * cols, [rows, cols, padded_span, alignment_elements, p_status] __device__(size_t i) { size_t idx = i / cols; size_t idy = i % cols; if ((&(padded_span(idx, idy)) - &(padded_span(idx, 0))) % alignment_elements != 0) myAtomicAdd(p_status, 1); if ((&(padded_span(idx, idy)) - &(padded_span(0, idy))) != idx) myAtomicAdd(p_status, 1); }); check_status(p_status, s); } } } TEST(MDSpan, MDSpanPaddingType) { test_mdspan_padding_by_type(); } void test_mdspan_aligned_matrix() { using extents_type = stdex::extents<size_t, dynamic_extent, dynamic_extent>; raft::resources handle; constexpr int rows = 2; constexpr int cols = 10; // manually aligning the above, using -1 as filler static constexpr int X = -1; long data_padded[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, X, X, X, X, X, X, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, X, X, X, X, X, X}; auto my_aligned_host_span = make_host_aligned_matrix_view<long, int, layout_right_padded<long>>(data_padded, rows, cols); int failures = 0; for (int irow = 0; irow < rows; ++irow) { for (int icol = 0; icol < cols; ++icol) { if (my_aligned_host_span(irow, icol) != irow * cols + icol) { ++failures; } } } ASSERT_EQ(failures, 0); // now work with device memory // use simple 1D array to allocate some space auto s = rmm::cuda_stream_default; using extent_1d = stdex::extents<size_t, dynamic_extent>; layout_c_contiguous::mapping<extent_1d> layout_1d{extent_1d{rows * 32}}; using mdarray_t = device_mdarray<long, extent_1d, layout_c_contiguous>; auto device_policy = mdarray_t::container_policy_type{}; mdarray_t device_array_1d{handle, layout_1d, device_policy}; // direct access mdarray -- initialize with above data for (int i = 0; i < 32; ++i) { device_array_1d(i) = data_padded[i]; } auto my_aligned_device_span = make_device_aligned_matrix_view<long, int, layout_right_padded<long>>( device_array_1d.data_handle(), rows, cols); thrust::device_vector<int32_t> status(1, 0); auto p_status = status.data().get(); thrust::for_each_n(rmm::exec_policy(s), thrust::make_counting_iterator(0ul), rows * cols, [rows, cols, my_aligned_device_span, p_status] __device__(size_t i) { size_t idx = i / cols; size_t idy = i % cols; if (my_aligned_device_span(idx, idy) != i) myAtomicAdd(p_status, 1); }); check_status(p_status, s); } TEST(MDSpan, MDSpanAlignedMatrix) { test_mdspan_aligned_matrix(); } namespace { void test_mdarray_unravel() { { uint32_t v{0}; ASSERT_EQ(detail::native_popc(v), 0); ASSERT_EQ(detail::popc(v), 0); v = 1; ASSERT_EQ(detail::native_popc(v), 1); ASSERT_EQ(detail::popc(v), 1); v = 0xffffffff; ASSERT_EQ(detail::native_popc(v), 32); ASSERT_EQ(detail::popc(v), 32); } { uint64_t v{0}; ASSERT_EQ(detail::native_popc(v), 0); ASSERT_EQ(detail::popc(v), 0); v = 1; ASSERT_EQ(detail::native_popc(v), 1); ASSERT_EQ(detail::popc(v), 1); v = 0xffffffff; ASSERT_EQ(detail::native_popc(v), 32); ASSERT_EQ(detail::popc(v), 32); v = 0xffffffffffffffff; ASSERT_EQ(detail::native_popc(v), 64); ASSERT_EQ(detail::popc(v), 64); } // examples from numpy unravel_index { auto coord = unravel_index(22, matrix_extent<int>{7, 6}, stdex::layout_right{}); static_assert(std::tuple_size<decltype(coord)>::value == 2); ASSERT_EQ(std::get<0>(coord), 3); ASSERT_EQ(std::get<1>(coord), 4); } { auto coord = unravel_index(41, matrix_extent<int>{7, 6}, stdex::layout_right{}); static_assert(std::tuple_size<decltype(coord)>::value == 2); ASSERT_EQ(std::get<0>(coord), 6); ASSERT_EQ(std::get<1>(coord), 5); } { auto coord = unravel_index(37, matrix_extent<int>{7, 6}, stdex::layout_right{}); static_assert(std::tuple_size<decltype(coord)>::value == 2); ASSERT_EQ(std::get<0>(coord), 6); ASSERT_EQ(std::get<1>(coord), 1); } // assignment { auto m = make_host_matrix<float, size_t>(7, 6); auto m_v = m.view(); for (size_t i = 0; i < m.size(); ++i) { auto coord = unravel_index(i, m.extents(), typename decltype(m)::layout_type{}); std::apply(m_v, coord) = i; } for (size_t i = 0; i < m.size(); ++i) { auto coord = unravel_index(i, m.extents(), typename decltype(m)::layout_type{}); ASSERT_EQ(std::apply(m_v, coord), i); } } { raft::resources handle; auto m = make_device_matrix<float, size_t>(handle, 7, 6); auto m_v = m.view(); thrust::for_each_n(resource::get_thrust_policy(handle), thrust::make_counting_iterator(0ul), m_v.size(), [=] HD(size_t i) { auto coord = unravel_index(i, m_v.extents(), typename decltype(m_v)::layout_type{}); std::apply(m_v, coord) = static_cast<float>(i); }); thrust::device_vector<int32_t> status(1, 0); auto p_status = status.data().get(); thrust::for_each_n(resource::get_thrust_policy(handle), thrust::make_counting_iterator(0ul), m_v.size(), [=] __device__(size_t i) { auto coord = unravel_index(i, m_v.extents(), typename decltype(m_v)::layout_type{}); auto v = std::apply(m_v, coord); if (v != static_cast<float>(i)) { raft::myAtomicAdd(p_status, 1); } }); check_status(p_status, resource::get_cuda_stream(handle)); } } } // anonymous namespace TEST(MDArray, Unravel) { test_mdarray_unravel(); } } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/core/operators_host.cpp
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <cmath> #include <gtest/gtest.h> #include "../test_utils.h" #include <raft/core/kvp.hpp> #include <raft/core/operators.hpp> TEST(OperatorsHost, IdentityOp) { raft::identity_op op; ASSERT_TRUE(raft::match(12.34f, op(12.34f, 0), raft::Compare<float>())); } TEST(OperatorsHost, CastOp) { raft::cast_op<float> op; ASSERT_TRUE(raft::match(1234.0f, op(1234, 0), raft::CompareApprox<float>(0.00001f))); } TEST(OperatorsHost, KeyOp) { raft::key_op op; raft::KeyValuePair<int, float> kvp(12, 3.4f); ASSERT_TRUE(raft::match(12, op(kvp, 0), raft::Compare<int>())); } TEST(OperatorsHost, ValueOp) { raft::value_op op; raft::KeyValuePair<int, float> kvp(12, 3.4f); ASSERT_TRUE(raft::match(3.4f, op(kvp, 0), raft::CompareApprox<float>(0.00001f))); } TEST(OperatorsHost, SqrtOpF) { raft::sqrt_op op; ASSERT_TRUE(raft::match(std::sqrt(12.34f), op(12.34f, 0), raft::CompareApprox<float>(0.0001f))); ASSERT_TRUE(raft::match(std::sqrt(12.34), op(12.34, 0), raft::CompareApprox<double>(0.000001))); } TEST(OperatorsHost, NZOp) { raft::nz_op op; ASSERT_TRUE(raft::match(0.0f, op(0.0f, 0), raft::CompareApprox<float>(0.00001f))); ASSERT_TRUE(raft::match(1.0f, op(12.34f, 0), raft::CompareApprox<float>(0.00001f))); } TEST(OperatorsHost, AbsOp) { raft::abs_op op; ASSERT_TRUE(raft::match(12.34f, op(-12.34f, 0), raft::CompareApprox<float>(0.00001f))); ASSERT_TRUE(raft::match(12.34, op(-12.34, 0), raft::CompareApprox<double>(0.000001))); ASSERT_TRUE(raft::match(1234, op(-1234, 0), raft::Compare<int>())); } TEST(OperatorsHost, SqOp) { raft::sq_op op; ASSERT_TRUE(raft::match(152.2756f, op(12.34f, 0), raft::CompareApprox<float>(0.00001f))); ASSERT_TRUE(raft::match(289, op(-17, 0), raft::Compare<int>())); } TEST(OperatorsHost, AddOp) { raft::add_op op; ASSERT_TRUE(raft::match(12.34f, op(12.0f, 0.34f), raft::CompareApprox<float>(0.00001f))); ASSERT_TRUE(raft::match(1234, op(1200, 34), raft::Compare<int>())); } TEST(OperatorsHost, SubOp) { raft::sub_op op; ASSERT_TRUE(raft::match(12.34f, op(13.0f, 0.66f), raft::CompareApprox<float>(0.00001f))); ASSERT_TRUE(raft::match(1234, op(1300, 66), raft::Compare<int>())); } TEST(OperatorsHost, MulOp) { raft::mul_op op; ASSERT_TRUE(raft::match(12.34f, op(2.0f, 6.17f), raft::CompareApprox<float>(0.00001f))); } TEST(OperatorsHost, DivOp) { raft::div_op op; ASSERT_TRUE(raft::match(12.34f, op(37.02f, 3.0f), raft::CompareApprox<float>(0.00001f))); } TEST(OperatorsHost, DivCheckZeroOp) { raft::div_checkzero_op op; ASSERT_TRUE(raft::match(12.34f, op(37.02f, 3.0f), raft::CompareApprox<float>(0.00001f))); ASSERT_TRUE(raft::match(0.0f, op(37.02f, 0.0f), raft::CompareApprox<float>(0.00001f))); } TEST(OperatorsHost, PowOp) { raft::pow_op op; ASSERT_TRUE(raft::match(1000.0f, op(10.0f, 3.0f), raft::CompareApprox<float>(0.0001f))); ASSERT_TRUE(raft::match(1000.0, op(10.0, 3.0), raft::CompareApprox<double>(0.000001))); } TEST(OperatorsHost, MinOp) { raft::min_op op; ASSERT_TRUE(raft::match(3.0f, op(3.0f, 5.0f), raft::CompareApprox<float>(0.0001f))); ASSERT_TRUE(raft::match(3.0, op(5.0, 3.0), raft::CompareApprox<double>(0.000001))); ASSERT_TRUE(raft::match(3, op(3, 5), raft::Compare<int>())); } TEST(OperatorsHost, MaxOp) { raft::max_op op; ASSERT_TRUE(raft::match(5.0f, op(3.0f, 5.0f), raft::CompareApprox<float>(0.0001f))); ASSERT_TRUE(raft::match(5.0, op(5.0, 3.0), raft::CompareApprox<double>(0.000001))); ASSERT_TRUE(raft::match(5, op(3, 5), raft::Compare<int>())); } TEST(OperatorsHost, SqDiffOp) { raft::sqdiff_op op; ASSERT_TRUE(raft::match(4.0f, op(3.0f, 5.0f), raft::CompareApprox<float>(0.0001f))); ASSERT_TRUE(raft::match(4.0, op(5.0, 3.0), raft::CompareApprox<double>(0.000001))); } TEST(OperatorsHost, ArgminOp) { raft::argmin_op op; raft::KeyValuePair<int, float> kvp_a(0, 1.2f); raft::KeyValuePair<int, float> kvp_b(0, 3.4f); raft::KeyValuePair<int, float> kvp_c(1, 1.2f); ASSERT_TRUE( raft::match(kvp_a, op(kvp_a, kvp_b), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE( raft::match(kvp_a, op(kvp_b, kvp_a), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE( raft::match(kvp_a, op(kvp_a, kvp_c), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE( raft::match(kvp_a, op(kvp_c, kvp_a), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE( raft::match(kvp_c, op(kvp_b, kvp_c), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE( raft::match(kvp_c, op(kvp_c, kvp_b), raft::Compare<raft::KeyValuePair<int, float>>())); } TEST(OperatorsHost, ArgmaxOp) { raft::argmax_op op; raft::KeyValuePair<int, float> kvp_a(0, 1.2f); raft::KeyValuePair<int, float> kvp_b(0, 3.4f); raft::KeyValuePair<int, float> kvp_c(1, 1.2f); ASSERT_TRUE( raft::match(kvp_b, op(kvp_a, kvp_b), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE( raft::match(kvp_b, op(kvp_b, kvp_a), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE( raft::match(kvp_a, op(kvp_a, kvp_c), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE( raft::match(kvp_a, op(kvp_c, kvp_a), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE( raft::match(kvp_b, op(kvp_b, kvp_c), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE( raft::match(kvp_b, op(kvp_c, kvp_b), raft::Compare<raft::KeyValuePair<int, float>>())); } TEST(OperatorsHost, ConstOp) { raft::const_op op(12.34f); ASSERT_TRUE(raft::match(12.34f, op(), raft::Compare<float>())); ASSERT_TRUE(raft::match(12.34f, op(42), raft::Compare<float>())); ASSERT_TRUE(raft::match(12.34f, op(13, 37.0f), raft::Compare<float>())); } template <typename T> struct trinary_add { const T c; constexpr explicit trinary_add(const T& c_) : c{c_} {} constexpr RAFT_INLINE_FUNCTION auto operator()(T a, T b) const { return a + b + c; } }; TEST(OperatorsHost, PlugConstOp) { // First, wrap around a default-constructible op { raft::plug_const_op<float, raft::add_op> op(0.34f); ASSERT_TRUE(raft::match(12.34f, op(12.0f), raft::CompareApprox<float>(0.0001f))); } // Second, wrap around a non-default-constructible op { auto op = raft::plug_const_op(10.0f, trinary_add<float>(2.0f)); ASSERT_TRUE(raft::match(12.34f, op(0.34f), raft::CompareApprox<float>(0.0001f))); } } TEST(OperatorsHost, AddConstOp) { raft::add_const_op<float> op(0.34f); ASSERT_TRUE(raft::match(12.34f, op(12.0f), raft::CompareApprox<float>(0.0001f))); } TEST(OperatorsHost, SubConstOp) { raft::sub_const_op<float> op(0.66f); ASSERT_TRUE(raft::match(12.34f, op(13.0f), raft::CompareApprox<float>(0.0001f))); } TEST(OperatorsHost, MulConstOp) { raft::mul_const_op<float> op(2.0f); ASSERT_TRUE(raft::match(12.34f, op(6.17f), raft::CompareApprox<float>(0.0001f))); } TEST(OperatorsHost, DivConstOp) { raft::div_const_op<float> op(3.0f); ASSERT_TRUE(raft::match(12.34f, op(37.02f), raft::CompareApprox<float>(0.0001f))); } TEST(OperatorsHost, DivCheckZeroConstOp) { // Non-zero denominator { raft::div_checkzero_const_op<float> op(3.0f); ASSERT_TRUE(raft::match(12.34f, op(37.02f), raft::CompareApprox<float>(0.0001f))); } // Zero denominator { raft::div_checkzero_const_op<float> op(0.0f); ASSERT_TRUE(raft::match(0.0f, op(37.02f), raft::CompareApprox<float>(0.0001f))); } } TEST(OperatorsHost, PowConstOp) { raft::pow_const_op<float> op(3.0f); ASSERT_TRUE(raft::match(1000.0f, op(10.0f), raft::CompareApprox<float>(0.0001f))); } TEST(OperatorsHost, ComposeOp) { // All ops are default-constructible { raft::compose_op<raft::sqrt_op, raft::abs_op, raft::cast_op<float>> op; ASSERT_TRUE(raft::match(std::sqrt(42.0f), op(-42, 0), raft::CompareApprox<float>(0.0001f))); } // Some ops are not default-constructible { auto op = raft::compose_op( raft::sqrt_op(), raft::abs_op(), raft::add_const_op<float>(8.0f), raft::cast_op<float>()); ASSERT_TRUE(raft::match(std::sqrt(42.0f), op(-50, 0), raft::CompareApprox<float>(0.0001f))); } } TEST(OperatorsHost, MapArgsOp) { // All ops are default-constructible { raft::map_args_op<raft::add_op, raft::sq_op, raft::abs_op> op; ASSERT_TRUE(raft::match(42.0f, op(5.0f, -17.0f), raft::CompareApprox<float>(0.0001f))); } // Some ops are not default-constructible { auto op = raft::map_args_op( raft::add_op(), raft::pow_const_op<float>(2.0f), raft::mul_const_op<float>(-1.0f)); ASSERT_TRUE(raft::match(42.0f, op(5.0f, -17.0f), raft::CompareApprox<float>(0.0001f))); } }
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/core/operators_device.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <cmath> #include <gtest/gtest.h> #include "../test_utils.cuh" #include <raft/core/kvp.hpp> #include <raft/core/operators.hpp> #include <rmm/cuda_stream.hpp> #include <rmm/device_scalar.hpp> template <typename OutT, typename OpT, typename... Args> RAFT_KERNEL eval_op_on_device_kernel(OutT* out, OpT op, Args... args) { out[0] = op(std::forward<Args>(args)...); } template <typename OpT, typename... Args> auto eval_op_on_device(OpT op, Args&&... args) { typedef decltype(op(args...)) OutT; auto stream = rmm::cuda_stream_default; rmm::device_scalar<OutT> result(stream); eval_op_on_device_kernel<<<1, 1, 0, stream>>>(result.data(), op, std::forward<Args>(args)...); return result.value(stream); } TEST(OperatorsDevice, IdentityOp) { raft::identity_op op; ASSERT_TRUE(raft::match(12.34f, eval_op_on_device(op, 12.34f, 0), raft::Compare<float>())); } TEST(OperatorsDevice, CastOp) { raft::cast_op<float> op; ASSERT_TRUE( raft::match(1234.0f, eval_op_on_device(op, 1234, 0), raft::CompareApprox<float>(0.00001f))); } TEST(OperatorsDevice, KeyOp) { raft::key_op op; raft::KeyValuePair<int, float> kvp(12, 3.4f); ASSERT_TRUE(raft::match(12, eval_op_on_device(op, kvp, 0), raft::Compare<int>())); } TEST(OperatorsDevice, ValueOp) { raft::value_op op; raft::KeyValuePair<int, float> kvp(12, 3.4f); ASSERT_TRUE( raft::match(3.4f, eval_op_on_device(op, kvp, 0), raft::CompareApprox<float>(0.00001f))); } TEST(OperatorsDevice, SqrtOpF) { raft::sqrt_op op; ASSERT_TRUE(raft::match( std::sqrt(12.34f), eval_op_on_device(op, 12.34f, 0), raft::CompareApprox<float>(0.0001f))); ASSERT_TRUE(raft::match( std::sqrt(12.34), eval_op_on_device(op, 12.34, 0), raft::CompareApprox<double>(0.000001))); } TEST(OperatorsDevice, NZOp) { raft::nz_op op; ASSERT_TRUE( raft::match(0.0f, eval_op_on_device(op, 0.0f, 0), raft::CompareApprox<float>(0.00001f))); ASSERT_TRUE( raft::match(1.0f, eval_op_on_device(op, 12.34f, 0), raft::CompareApprox<float>(0.00001f))); } TEST(OperatorsDevice, AbsOp) { raft::abs_op op; ASSERT_TRUE( raft::match(12.34f, eval_op_on_device(op, -12.34f, 0), raft::CompareApprox<float>(0.00001f))); ASSERT_TRUE( raft::match(12.34, eval_op_on_device(op, -12.34, 0), raft::CompareApprox<double>(0.000001))); ASSERT_TRUE(raft::match(1234, eval_op_on_device(op, -1234, 0), raft::Compare<int>())); } TEST(OperatorsDevice, SqOp) { raft::sq_op op; ASSERT_TRUE( raft::match(152.2756f, eval_op_on_device(op, 12.34f, 0), raft::CompareApprox<float>(0.00001f))); ASSERT_TRUE(raft::match(289, eval_op_on_device(op, -17, 0), raft::Compare<int>())); } TEST(OperatorsDevice, AddOp) { raft::add_op op; ASSERT_TRUE( raft::match(12.34f, eval_op_on_device(op, 12.0f, 0.34f), raft::CompareApprox<float>(0.00001f))); ASSERT_TRUE(raft::match(1234, eval_op_on_device(op, 1200, 34), raft::Compare<int>())); } TEST(OperatorsDevice, SubOp) { raft::sub_op op; ASSERT_TRUE( raft::match(12.34f, eval_op_on_device(op, 13.0f, 0.66f), raft::CompareApprox<float>(0.00001f))); ASSERT_TRUE(raft::match(1234, eval_op_on_device(op, 1300, 66), raft::Compare<int>())); } TEST(OperatorsDevice, MulOp) { raft::mul_op op; ASSERT_TRUE( raft::match(12.34f, eval_op_on_device(op, 2.0f, 6.17f), raft::CompareApprox<float>(0.00001f))); } TEST(OperatorsDevice, DivOp) { raft::div_op op; ASSERT_TRUE( raft::match(12.34f, eval_op_on_device(op, 37.02f, 3.0f), raft::CompareApprox<float>(0.00001f))); } TEST(OperatorsDevice, DivCheckZeroOp) { raft::div_checkzero_op op; ASSERT_TRUE( raft::match(12.34f, eval_op_on_device(op, 37.02f, 3.0f), raft::CompareApprox<float>(0.00001f))); ASSERT_TRUE( raft::match(0.0f, eval_op_on_device(op, 37.02f, 0.0f), raft::CompareApprox<float>(0.00001f))); } TEST(OperatorsDevice, PowOp) { raft::pow_op op; ASSERT_TRUE( raft::match(1000.0f, eval_op_on_device(op, 10.0f, 3.0f), raft::CompareApprox<float>(0.0001f))); ASSERT_TRUE( raft::match(1000.0, eval_op_on_device(op, 10.0, 3.0), raft::CompareApprox<double>(0.000001))); } TEST(OperatorsDevice, MinOp) { raft::min_op op; ASSERT_TRUE( raft::match(3.0f, eval_op_on_device(op, 3.0f, 5.0f), raft::CompareApprox<float>(0.0001f))); ASSERT_TRUE( raft::match(3.0, eval_op_on_device(op, 5.0, 3.0), raft::CompareApprox<double>(0.000001))); ASSERT_TRUE(raft::match(3, eval_op_on_device(op, 3, 5), raft::Compare<int>())); } TEST(OperatorsDevice, MaxOp) { raft::max_op op; ASSERT_TRUE( raft::match(5.0f, eval_op_on_device(op, 3.0f, 5.0f), raft::CompareApprox<float>(0.0001f))); ASSERT_TRUE( raft::match(5.0, eval_op_on_device(op, 5.0, 3.0), raft::CompareApprox<double>(0.000001))); ASSERT_TRUE(raft::match(5, eval_op_on_device(op, 3, 5), raft::Compare<int>())); } TEST(OperatorsDevice, SqDiffOp) { raft::sqdiff_op op; ASSERT_TRUE( raft::match(4.0f, eval_op_on_device(op, 3.0f, 5.0f), raft::CompareApprox<float>(0.0001f))); ASSERT_TRUE( raft::match(4.0, eval_op_on_device(op, 5.0, 3.0), raft::CompareApprox<double>(0.000001))); } TEST(OperatorsDevice, ArgminOp) { raft::argmin_op op; raft::KeyValuePair<int, float> kvp_a(0, 1.2f); raft::KeyValuePair<int, float> kvp_b(0, 3.4f); raft::KeyValuePair<int, float> kvp_c(1, 1.2f); ASSERT_TRUE(raft::match( kvp_a, eval_op_on_device(op, kvp_a, kvp_b), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE(raft::match( kvp_a, eval_op_on_device(op, kvp_b, kvp_a), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE(raft::match( kvp_a, eval_op_on_device(op, kvp_a, kvp_c), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE(raft::match( kvp_a, eval_op_on_device(op, kvp_c, kvp_a), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE(raft::match( kvp_c, eval_op_on_device(op, kvp_b, kvp_c), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE(raft::match( kvp_c, eval_op_on_device(op, kvp_c, kvp_b), raft::Compare<raft::KeyValuePair<int, float>>())); } TEST(OperatorsDevice, ArgmaxOp) { raft::argmax_op op; raft::KeyValuePair<int, float> kvp_a(0, 1.2f); raft::KeyValuePair<int, float> kvp_b(0, 3.4f); raft::KeyValuePair<int, float> kvp_c(1, 1.2f); ASSERT_TRUE(raft::match( kvp_b, eval_op_on_device(op, kvp_a, kvp_b), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE(raft::match( kvp_b, eval_op_on_device(op, kvp_b, kvp_a), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE(raft::match( kvp_a, eval_op_on_device(op, kvp_a, kvp_c), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE(raft::match( kvp_a, eval_op_on_device(op, kvp_c, kvp_a), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE(raft::match( kvp_b, eval_op_on_device(op, kvp_b, kvp_c), raft::Compare<raft::KeyValuePair<int, float>>())); ASSERT_TRUE(raft::match( kvp_b, eval_op_on_device(op, kvp_c, kvp_b), raft::Compare<raft::KeyValuePair<int, float>>())); } TEST(OperatorsDevice, ConstOp) { raft::const_op op(12.34f); ASSERT_TRUE(raft::match(12.34f, eval_op_on_device(op), raft::Compare<float>())); ASSERT_TRUE(raft::match(12.34f, eval_op_on_device(op, 42), raft::Compare<float>())); ASSERT_TRUE(raft::match(12.34f, eval_op_on_device(op, 13, 37.0f), raft::Compare<float>())); } template <typename T> struct trinary_add { const T c; constexpr explicit trinary_add(const T& c_) : c{c_} {} constexpr RAFT_INLINE_FUNCTION auto operator()(T a, T b) const { return a + b + c; } }; TEST(OperatorsDevice, PlugConstOp) { // First, wrap around a default-constructible op { raft::plug_const_op<float, raft::add_op> op(0.34f); ASSERT_TRUE( raft::match(12.34f, eval_op_on_device(op, 12.0f), raft::CompareApprox<float>(0.0001f))); } // Second, wrap around a non-default-constructible op { auto op = raft::plug_const_op(10.0f, trinary_add<float>(2.0f)); ASSERT_TRUE( raft::match(12.34f, eval_op_on_device(op, 0.34f), raft::CompareApprox<float>(0.0001f))); } } TEST(OperatorsDevice, AddConstOp) { raft::add_const_op<float> op(0.34f); ASSERT_TRUE( raft::match(12.34f, eval_op_on_device(op, 12.0f), raft::CompareApprox<float>(0.0001f))); } TEST(OperatorsDevice, SubConstOp) { raft::sub_const_op<float> op(0.66f); ASSERT_TRUE( raft::match(12.34f, eval_op_on_device(op, 13.0f), raft::CompareApprox<float>(0.0001f))); } TEST(OperatorsDevice, MulConstOp) { raft::mul_const_op<float> op(2.0f); ASSERT_TRUE( raft::match(12.34f, eval_op_on_device(op, 6.17f), raft::CompareApprox<float>(0.0001f))); } TEST(OperatorsDevice, DivConstOp) { raft::div_const_op<float> op(3.0f); ASSERT_TRUE( raft::match(12.34f, eval_op_on_device(op, 37.02f), raft::CompareApprox<float>(0.0001f))); } TEST(OperatorsDevice, DivCheckZeroConstOp) { // Non-zero denominator { raft::div_checkzero_const_op<float> op(3.0f); ASSERT_TRUE( raft::match(12.34f, eval_op_on_device(op, 37.02f), raft::CompareApprox<float>(0.0001f))); } // Zero denominator { raft::div_checkzero_const_op<float> op(0.0f); ASSERT_TRUE( raft::match(0.0f, eval_op_on_device(op, 37.02f), raft::CompareApprox<float>(0.0001f))); } } TEST(OperatorsDevice, PowConstOp) { raft::pow_const_op<float> op(3.0f); ASSERT_TRUE( raft::match(1000.0f, eval_op_on_device(op, 10.0f), raft::CompareApprox<float>(0.0001f))); } TEST(OperatorsDevice, ComposeOp) { // All ops are default-constructible { raft::compose_op<raft::sqrt_op, raft::abs_op, raft::cast_op<float>> op; ASSERT_TRUE(raft::match( std::sqrt(42.0f), eval_op_on_device(op, -42, 0), raft::CompareApprox<float>(0.0001f))); } // Some ops are not default-constructible { auto op = raft::compose_op( raft::sqrt_op(), raft::abs_op(), raft::add_const_op<float>(8.0f), raft::cast_op<float>()); ASSERT_TRUE(raft::match( std::sqrt(42.0f), eval_op_on_device(op, -50, 0), raft::CompareApprox<float>(0.0001f))); } } TEST(OperatorsDevice, MapArgsOp) { // All ops are default-constructible { raft::map_args_op<raft::add_op, raft::sq_op, raft::abs_op> op; ASSERT_TRUE( raft::match(42.0f, eval_op_on_device(op, 5.0f, -17.0f), raft::CompareApprox<float>(0.0001f))); } // Some ops are not default-constructible { auto op = raft::map_args_op( raft::add_op(), raft::pow_const_op<float>(2.0f), raft::mul_const_op<float>(-1.0f)); ASSERT_TRUE( raft::match(42.0f, eval_op_on_device(op, 5.0f, -17.0f), raft::CompareApprox<float>(0.0001f))); } }
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/core/test_span.hpp
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/span.hpp> namespace raft { template <typename Iter> __host__ __device__ void initialize_range(Iter _begin, Iter _end) { float j = 0; for (Iter i = _begin; i != _end; ++i, ++j) { *i = j; } } #define SPAN_ASSERT_TRUE(cond, status) \ if (!(cond)) { *(status) = -1; } #define SPAN_ASSERT_FALSE(cond, status) \ if ((cond)) { *(status) = -1; } struct test_test_status_t { int* status; explicit test_test_status_t(int* _status) : status(_status) {} __host__ __device__ void operator()() { this->operator()(0); } __host__ __device__ void operator()(int _idx) { SPAN_ASSERT_TRUE(false, status); } }; template <bool is_device> struct test_assignment_t { int* status; explicit test_assignment_t(int* _status) : status(_status) {} __host__ __device__ void operator()() { this->operator()(0); } __host__ __device__ void operator()(int _idx) { span<float, is_device> s1; float arr[] = {3, 4, 5}; span<const float, is_device> s2 = arr; SPAN_ASSERT_TRUE(s2.size() == 3, status); SPAN_ASSERT_TRUE(s2.data() == &arr[0], status); s2 = s1; SPAN_ASSERT_TRUE(s2.empty(), status); } }; template <bool is_device> struct test_beginend_t { int* status; explicit test_beginend_t(int* _status) : status(_status) {} __host__ __device__ void operator()() { this->operator()(0); } __host__ __device__ void operator()(int _idx) { float arr[16]; initialize_range(arr, arr + 16); span<float, is_device> s(arr); typename span<float, is_device>::iterator beg{s.begin()}; typename span<float, is_device>::iterator end{s.end()}; SPAN_ASSERT_TRUE(end == beg + 16, status); SPAN_ASSERT_TRUE(*beg == arr[0], status); SPAN_ASSERT_TRUE(*(end - 1) == arr[15], status); } }; template <bool is_device> struct test_rbeginrend_t { int* status; explicit test_rbeginrend_t(int* _status) : status(_status) {} __host__ __device__ void operator()() { this->operator()(0); } __host__ __device__ void operator()(int _idx) { float arr[16]; initialize_range(arr, arr + 16); span<float, is_device> s(arr); s.rbegin(); typename span<float, is_device>::reverse_iterator rbeg{s.rbegin()}; typename span<float, is_device>::reverse_iterator rend{s.rend()}; SPAN_ASSERT_TRUE(rbeg + 16 == rend, status); SPAN_ASSERT_TRUE(*(rbeg) == arr[15], status); SPAN_ASSERT_TRUE(*(rend - 1) == arr[0], status); typename span<float, is_device>::const_reverse_iterator crbeg{s.crbegin()}; typename span<float, is_device>::const_reverse_iterator crend{s.crend()}; SPAN_ASSERT_TRUE(crbeg + 16 == crend, status); SPAN_ASSERT_TRUE(*(crbeg) == arr[15], status); SPAN_ASSERT_TRUE(*(crend - 1) == arr[0], status); } }; template <bool is_device> struct test_observers_t { int* status; explicit test_observers_t(int* _status) : status(_status) {} __host__ __device__ void operator()() { this->operator()(0); } __host__ __device__ void operator()(int _idx) { // empty { float* arr = nullptr; span<float, is_device> s(arr, static_cast<typename span<float, is_device>::size_type>(0)); SPAN_ASSERT_TRUE(s.empty(), status); } // size, size_types { float* arr = new float[16]; span<float, is_device> s(arr, 16); SPAN_ASSERT_TRUE(s.size() == 16, status); SPAN_ASSERT_TRUE(s.size_bytes() == 16 * sizeof(float), status); delete[] arr; } } }; template <bool is_device> struct test_compare_t { int* status; explicit test_compare_t(int* _status) : status(_status) {} __host__ __device__ void operator()() { this->operator()(0); } __host__ __device__ void operator()(int _idx) { float lhs_arr[16], rhs_arr[16]; initialize_range(lhs_arr, lhs_arr + 16); initialize_range(rhs_arr, rhs_arr + 16); span<float, is_device> lhs(lhs_arr); span<float, is_device> rhs(rhs_arr); SPAN_ASSERT_TRUE(lhs == rhs, status); SPAN_ASSERT_FALSE(lhs != rhs, status); SPAN_ASSERT_TRUE(lhs <= rhs, status); SPAN_ASSERT_TRUE(lhs >= rhs, status); lhs[2] -= 1; SPAN_ASSERT_FALSE(lhs == rhs, status); SPAN_ASSERT_TRUE(lhs < rhs, status); SPAN_ASSERT_FALSE(lhs > rhs, status); } }; template <bool is_device> struct test_as_bytes_t { int* status; explicit test_as_bytes_t(int* _status) : status(_status) {} __host__ __device__ void operator()() { this->operator()(0); } __host__ __device__ void operator()(int _idx) { float arr[16]; initialize_range(arr, arr + 16); { const span<const float, is_device> s{arr}; const span<const std::byte, is_device> bs = as_bytes(s); SPAN_ASSERT_TRUE(bs.size() == s.size_bytes(), status); SPAN_ASSERT_TRUE(static_cast<const void*>(bs.data()) == static_cast<const void*>(s.data()), status); } { span<float, is_device> s; const span<const std::byte, is_device> bs = as_bytes(s); SPAN_ASSERT_TRUE(bs.size() == s.size(), status); SPAN_ASSERT_TRUE(bs.size() == 0, status); SPAN_ASSERT_TRUE(bs.size_bytes() == 0, status); SPAN_ASSERT_TRUE(static_cast<const void*>(bs.data()) == static_cast<const void*>(s.data()), status); SPAN_ASSERT_TRUE(bs.data() == nullptr, status); } } }; template <bool is_device> struct test_as_writable_bytes_t { int* status; explicit test_as_writable_bytes_t(int* _status) : status(_status) {} __host__ __device__ void operator()() { this->operator()(0); } __host__ __device__ void operator()(int _idx) { float arr[16]; initialize_range(arr, arr + 16); { span<float, is_device> s; span<std::byte, is_device> byte_s = as_writable_bytes(s); SPAN_ASSERT_TRUE(byte_s.size() == s.size(), status); SPAN_ASSERT_TRUE(byte_s.size_bytes() == s.size_bytes(), status); SPAN_ASSERT_TRUE(byte_s.size() == 0, status); SPAN_ASSERT_TRUE(byte_s.size_bytes() == 0, status); SPAN_ASSERT_TRUE(byte_s.data() == nullptr, status); SPAN_ASSERT_TRUE(static_cast<void*>(byte_s.data()) == static_cast<void*>(s.data()), status); } { span<float, is_device> s{arr}; span<std::byte, is_device> bs{as_writable_bytes(s)}; SPAN_ASSERT_TRUE(s.size_bytes() == bs.size_bytes(), status); SPAN_ASSERT_TRUE(static_cast<void*>(bs.data()) == static_cast<void*>(s.data()), status); } } }; } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_matrix_detail_select_k.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/matrix/detail/select_k.cuh>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/00_generate.py
# Copyright (c) 2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. copyright_notice = """ /* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ """ ext_headers = [ "raft/neighbors/brute_force-ext.cuh", "raft/distance/distance-ext.cuh", "raft/distance/detail/pairwise_matrix/dispatch-ext.cuh", "raft/matrix/detail/select_k-ext.cuh", "raft/neighbors/ball_cover-ext.cuh", "raft/spatial/knn/detail/fused_l2_knn-ext.cuh", "raft/distance/fused_l2_nn-ext.cuh", "raft/neighbors/ivf_pq-ext.cuh", "raft/util/memory_pool-ext.hpp", "raft/neighbors/ivf_flat-ext.cuh", "raft/core/logger-ext.hpp", "raft/neighbors/refine-ext.cuh", "raft/neighbors/detail/ivf_flat_search-ext.cuh", "raft/neighbors/detail/selection_faiss-ext.cuh", "raft/linalg/detail/coalesced_reduction-ext.cuh", "raft/spatial/knn/detail/ball_cover/registers-ext.cuh", "raft/neighbors/detail/ivf_flat_interleaved_scan-ext.cuh", "raft/neighbors/detail/ivf_pq_compute_similarity-ext.cuh", ] for ext_header in ext_headers: header = ext_header.replace("-ext", "") path = ( header .replace("/", "_") .replace(".cuh", ".cu") .replace(".hpp", ".cpp") ) with open(path, "w") as f: f.write(copyright_notice) f.write(f"#include <{header}>\n") # For in CMakeLists.txt print(f"test/ext_headers/{path}")
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_neighbors_detail_selection_faiss.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/neighbors/detail/selection_faiss.cuh>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_neighbors_ivf_flat.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/neighbors/ivf_flat.cuh>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_neighbors_ivf_pq.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/neighbors/ivf_pq.cuh>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_neighbors_detail_ivf_pq_compute_similarity.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/neighbors/detail/ivf_pq_compute_similarity.cuh>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_neighbors_ball_cover.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/neighbors/ball_cover.cuh>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_spatial_knn_detail_ball_cover_registers.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/spatial/knn/detail/ball_cover/registers.cuh>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_spatial_knn_detail_fused_l2_knn.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/spatial/knn/detail/fused_l2_knn.cuh>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_neighbors_detail_ivf_flat_search.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/neighbors/detail/ivf_flat_search.cuh>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_neighbors_refine.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/neighbors/refine.cuh>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_neighbors_detail_ivf_flat_interleaved_scan.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/neighbors/detail/ivf_flat_interleaved_scan.cuh>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_core_logger.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/core/logger.hpp>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_util_memory_pool.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/util/memory_pool.hpp>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_distance_detail_pairwise_matrix_dispatch.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/distance/detail/pairwise_matrix/dispatch.cuh>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_neighbors_brute_force.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/neighbors/brute_force.cuh>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_distance_distance.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/distance/distance.cuh>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_linalg_detail_coalesced_reduction.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/linalg/detail/coalesced_reduction.cuh>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/ext_headers/raft_distance_fused_l2_nn.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by 00_generate.py * * Make changes there and run in this directory: * * > python 00_generate.py * */ #include <raft/distance/fused_l2_nn.cuh>
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/lap/lap.cu
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * Copyright 2020 KETAN DATE & RAKESH NAGI * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * CUDA Implementation of O(n^3) alternating tree Hungarian Algorithm * Authors: Ketan Date and Rakesh Nagi * * Article reference: * Date, Ketan, and Rakesh Nagi. "GPU-accelerated Hungarian algorithms * for the Linear Assignment Problem." Parallel Computing 57 (2016): 52-72. * */ #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <rmm/device_uvector.hpp> #include <iostream> #include <omp.h> #include <raft/solver/linear_assignment.cuh> #include <random> #define PROBLEMSIZE 1000 // Number of rows/columns #define BATCHSIZE 10 // Number of problems in the batch #define COSTRANGE 1000 #define PROBLEMCOUNT 1 #define REPETITIONS 1 #define SEED 01010001 std::default_random_engine generator(SEED); namespace raft { // Function for generating problem with uniformly distributed integer costs between [0, COSTRANGE]. template <typename weight_t> void generateProblem(weight_t* cost_matrix, int SP, int N, int costrange) { long N2 = SP * N * N; std::uniform_int_distribution<int> distribution(0, costrange); for (long i = 0; i < N2; i++) { int val = distribution(generator); cost_matrix[i] = (weight_t)val; } } template <typename vertex_t, typename weight_t> void hungarian_test(int problemsize, int costrange, int problemcount, int repetitions, int batchsize, weight_t epsilon, bool verbose = false) { raft::resources handle; weight_t* h_cost = new weight_t[batchsize * problemsize * problemsize]; for (int j = 0; j < problemcount; j++) { generateProblem(h_cost, batchsize, problemsize, costrange); rmm::device_uvector<weight_t> elements_v(batchsize * problemsize * problemsize, resource::get_cuda_stream(handle)); rmm::device_uvector<vertex_t> row_assignment_v(batchsize * problemsize, resource::get_cuda_stream(handle)); rmm::device_uvector<vertex_t> col_assignment_v(batchsize * problemsize, resource::get_cuda_stream(handle)); raft::update_device(elements_v.data(), h_cost, batchsize * problemsize * problemsize, resource::get_cuda_stream(handle)); for (int i = 0; i < repetitions; i++) { float start = omp_get_wtime(); // Create an instance of LinearAssignmentProblem using problem size, number of subproblems raft::solver::LinearAssignmentProblem<vertex_t, weight_t> lpx( handle, problemsize, batchsize, epsilon); // Solve LAP(s) for given cost matrix lpx.solve(elements_v.data(), row_assignment_v.data(), col_assignment_v.data()); float end = omp_get_wtime(); float total_time = (end - start); if (verbose) { // Use getPrimalObjectiveValue and getDualObjectiveValue APIs to get primal and dual // objectives. At optimality both values should match. for (int k = 0; k < batchsize; k++) { std::cout << j << ":" << i << ":" << k << ":" << lpx.getPrimalObjectiveValue(k) << ":" << lpx.getDualObjectiveValue(k) << ":" << total_time << std::endl; } } } } delete[] h_cost; } TEST(Raft, HungarianIntFloat) { hungarian_test<int, float>( PROBLEMSIZE, COSTRANGE, PROBLEMCOUNT, REPETITIONS, BATCHSIZE, float{1e-6}); } TEST(Raft, HungarianIntDouble) { hungarian_test<int, double>( PROBLEMSIZE, COSTRANGE, PROBLEMCOUNT, REPETITIONS, BATCHSIZE, double{1e-6}); } TEST(Raft, HungarianIntLong) { hungarian_test<int, long>(PROBLEMSIZE, COSTRANGE, PROBLEMCOUNT, REPETITIONS, BATCHSIZE, long{0}); } TEST(Raft, HungarianLongFloat) { hungarian_test<long, float>( PROBLEMSIZE, COSTRANGE, PROBLEMCOUNT, REPETITIONS, BATCHSIZE, float{1e-6}); } TEST(Raft, HungarianLongDouble) { hungarian_test<long, double>( PROBLEMSIZE, COSTRANGE, PROBLEMCOUNT, REPETITIONS, BATCHSIZE, double{1e-6}); } TEST(Raft, HungarianLongLong) { hungarian_test<long, long>(PROBLEMSIZE, COSTRANGE, PROBLEMCOUNT, REPETITIONS, BATCHSIZE, long{0}); } } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/convert_coo.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/sparse/convert/coo.cuh> #include <raft/sparse/csr.hpp> #include <raft/util/cudart_utils.hpp> #include "../test_utils.cuh" #include <iostream> #include <limits> namespace raft { namespace sparse { template <typename Index_> struct CSRtoCOOInputs { std::vector<Index_> ex_scan; std::vector<Index_> verify; }; template <typename Index_> class CSRtoCOOTest : public ::testing::TestWithParam<CSRtoCOOInputs<Index_>> { public: CSRtoCOOTest() : params(::testing::TestWithParam<CSRtoCOOInputs<Index_>>::GetParam()), stream(resource::get_cuda_stream(handle)), ex_scan(params.ex_scan.size(), stream), verify(params.verify.size(), stream), result(params.verify.size(), stream) { } protected: void SetUp() override {} void Run() { Index_ n_rows = params.ex_scan.size(); Index_ nnz = params.verify.size(); raft::update_device(ex_scan.data(), params.ex_scan.data(), n_rows, stream); raft::update_device(verify.data(), params.verify.data(), nnz, stream); convert::csr_to_coo<Index_>(ex_scan.data(), n_rows, result.data(), nnz, stream); ASSERT_TRUE( raft::devArrMatch<Index_>(verify.data(), result.data(), nnz, raft::Compare<float>(), stream)); } protected: raft::resources handle; cudaStream_t stream; CSRtoCOOInputs<Index_> params; rmm::device_uvector<Index_> ex_scan, verify, result; }; using CSRtoCOOTestI = CSRtoCOOTest<int>; TEST_P(CSRtoCOOTestI, Result) { Run(); } using CSRtoCOOTestL = CSRtoCOOTest<int64_t>; TEST_P(CSRtoCOOTestL, Result) { Run(); } const std::vector<CSRtoCOOInputs<int>> csrtocoo_inputs_32 = { {{0, 0, 2, 2}, {1, 1, 3}}, {{0, 4, 8, 9}, {0, 0, 0, 0, 1, 1, 1, 1, 2, 3}}, }; const std::vector<CSRtoCOOInputs<int64_t>> csrtocoo_inputs_64 = { {{0, 0, 2, 2}, {1, 1, 3}}, {{0, 4, 8, 9}, {0, 0, 0, 0, 1, 1, 1, 1, 2, 3}}, }; INSTANTIATE_TEST_CASE_P(SparseConvertCOOTest, CSRtoCOOTestI, ::testing::ValuesIn(csrtocoo_inputs_32)); INSTANTIATE_TEST_CASE_P(SparseConvertCOOTest, CSRtoCOOTestL, ::testing::ValuesIn(csrtocoo_inputs_64)); } // namespace sparse } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/distance.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <cusparse_v2.h> #include <raft/distance/distance_types.hpp> #include <raft/sparse/detail/cusparse_wrappers.h> #include <raft/util/cudart_utils.hpp> #include <raft/sparse/distance/distance.cuh> #include "../test_utils.cuh" namespace raft { namespace sparse { namespace distance { using namespace raft; using namespace raft::sparse; template <typename value_idx, typename value_t> struct SparseDistanceInputs { value_idx n_cols; std::vector<value_idx> indptr_h; std::vector<value_idx> indices_h; std::vector<value_t> data_h; std::vector<value_t> out_dists_ref_h; raft::distance::DistanceType metric; float metric_arg = 0.0; }; template <typename value_idx, typename value_t> ::std::ostream& operator<<(::std::ostream& os, const SparseDistanceInputs<value_idx, value_t>& dims) { return os; } template <typename value_idx, typename value_t> class SparseDistanceTest : public ::testing::TestWithParam<SparseDistanceInputs<value_idx, value_t>> { public: SparseDistanceTest() : params(::testing::TestWithParam<SparseDistanceInputs<value_idx, value_t>>::GetParam()), indptr(0, resource::get_cuda_stream(handle)), indices(0, resource::get_cuda_stream(handle)), data(0, resource::get_cuda_stream(handle)), out_dists(0, resource::get_cuda_stream(handle)), out_dists_ref(0, resource::get_cuda_stream(handle)) { } void SetUp() override { make_data(); int out_size = static_cast<value_idx>(params.indptr_h.size() - 1) * static_cast<value_idx>(params.indptr_h.size() - 1); out_dists.resize(out_size, resource::get_cuda_stream(handle)); auto out = raft::make_device_matrix_view<value_t, value_idx>( out_dists.data(), static_cast<value_idx>(params.indptr_h.size() - 1), static_cast<value_idx>(params.indptr_h.size() - 1)); auto x_structure = raft::make_device_compressed_structure_view<value_idx, value_idx, value_idx>( indptr.data(), indices.data(), static_cast<value_idx>(params.indptr_h.size() - 1), params.n_cols, static_cast<value_idx>(params.indices_h.size())); auto x = raft::make_device_csr_matrix_view<const value_t>(data.data(), x_structure); pairwise_distance(handle, x, x, out, params.metric, params.metric_arg); RAFT_CUDA_TRY(cudaStreamSynchronize(resource::get_cuda_stream(handle))); } void compare() { ASSERT_TRUE(devArrMatch(out_dists_ref.data(), out_dists.data(), params.out_dists_ref_h.size(), CompareApprox<value_t>(1e-3))); } protected: void make_data() { std::vector<value_idx> indptr_h = params.indptr_h; std::vector<value_idx> indices_h = params.indices_h; std::vector<value_t> data_h = params.data_h; auto stream = resource::get_cuda_stream(handle); indptr.resize(indptr_h.size(), stream); indices.resize(indices_h.size(), stream); data.resize(data_h.size(), stream); update_device(indptr.data(), indptr_h.data(), indptr_h.size(), stream); update_device(indices.data(), indices_h.data(), indices_h.size(), stream); update_device(data.data(), data_h.data(), data_h.size(), stream); std::vector<value_t> out_dists_ref_h = params.out_dists_ref_h; out_dists_ref.resize((indptr_h.size() - 1) * (indptr_h.size() - 1), stream); update_device(out_dists_ref.data(), out_dists_ref_h.data(), out_dists_ref_h.size(), resource::get_cuda_stream(handle)); } raft::resources handle; // input data rmm::device_uvector<value_idx> indptr, indices; rmm::device_uvector<value_t> data; // output data rmm::device_uvector<value_t> out_dists, out_dists_ref; SparseDistanceInputs<value_idx, value_t> params; }; const std::vector<SparseDistanceInputs<int, float>> inputs_i32_f = { {5, {0, 0, 1, 2}, {1, 2}, {0.5, 0.5}, {0, 1, 1, 1, 0, 1, 1, 1, 0}, raft::distance::DistanceType::CosineExpanded, 0.0}, {5, {0, 0, 1, 2}, {1, 2}, {1.0, 1.0}, {0, 1, 1, 1, 0, 1, 1, 1, 0}, raft::distance::DistanceType::JaccardExpanded, 0.0}, {2, {0, 2, 4, 6, 8}, {0, 1, 0, 1, 0, 1, 0, 1}, // indices {1.0f, 3.0f, 1.0f, 5.0f, 50.0f, 28.0f, 16.0f, 2.0f}, { // dense output 0.0, 4.0, 3026.0, 226.0, 4.0, 0.0, 2930.0, 234.0, 3026.0, 2930.0, 0.0, 1832.0, 226.0, 234.0, 1832.0, 0.0, }, raft::distance::DistanceType::L2Expanded, 0.0}, {2, {0, 2, 4, 6, 8}, {0, 1, 0, 1, 0, 1, 0, 1}, {1.0f, 2.0f, 1.0f, 2.0f, 1.0f, 2.0f, 1.0f, 2.0f}, {5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0}, raft::distance::DistanceType::InnerProduct, 0.0}, {2, {0, 2, 4, 6, 8}, {0, 1, 0, 1, 0, 1, 0, 1}, // indices {1.0f, 3.0f, 1.0f, 5.0f, 50.0f, 28.0f, 16.0f, 2.0f}, { // dense output 0.0, 4.0, 3026.0, 226.0, 4.0, 0.0, 2930.0, 234.0, 3026.0, 2930.0, 0.0, 1832.0, 226.0, 234.0, 1832.0, 0.0, }, raft::distance::DistanceType::L2Unexpanded, 0.0}, {10, {0, 5, 11, 15, 20, 27, 32, 36, 43, 47, 50}, {0, 1, 3, 6, 8, 0, 1, 2, 3, 5, 6, 1, 2, 4, 8, 0, 2, 3, 4, 7, 0, 1, 2, 3, 4, 6, 8, 0, 1, 2, 5, 7, 1, 5, 8, 9, 0, 1, 2, 5, 6, 8, 9, 2, 4, 5, 7, 0, 3, 9}, // indices {0.5438, 0.2695, 0.4377, 0.7174, 0.9251, 0.7648, 0.3322, 0.7279, 0.4131, 0.5167, 0.8655, 0.0730, 0.0291, 0.9036, 0.7988, 0.5019, 0.7663, 0.2190, 0.8206, 0.3625, 0.0411, 0.3995, 0.5688, 0.7028, 0.8706, 0.3199, 0.4431, 0.0535, 0.2225, 0.8853, 0.1932, 0.3761, 0.3379, 0.1771, 0.2107, 0.228, 0.5279, 0.4885, 0.3495, 0.5079, 0.2325, 0.2331, 0.3018, 0.6231, 0.2645, 0.8429, 0.6625, 0.0797, 0.2724, 0.4218}, {0., 0.39419924, 0.54823225, 0.79593037, 0.45658883, 0.93634219, 0.58146987, 0.44940102, 1., 0.76978799, 0.39419924, 0., 0.97577154, 0.48904013, 0.48300801, 0.45087445, 0.73323749, 0.21050481, 0.54847744, 0.78021386, 0.54823225, 0.97577154, 0., 0.51413997, 0.31195441, 0.96546343, 0.67534399, 0.81665436, 0.8321819, 1., 0.79593037, 0.48904013, 0.51413997, 0., 0.28605559, 0.35772784, 1., 0.60889396, 0.43324829, 0.84923694, 0.45658883, 0.48300801, 0.31195441, 0.28605559, 0., 0.58623212, 0.6745457, 0.60287165, 0.67676228, 0.73155632, 0.93634219, 0.45087445, 0.96546343, 0.35772784, 0.58623212, 0., 0.77917274, 0.48390993, 0.24558392, 0.99166225, 0.58146987, 0.73323749, 0.67534399, 1., 0.6745457, 0.77917274, 0., 0.27605686, 0.76064776, 0.61547536, 0.44940102, 0.21050481, 0.81665436, 0.60889396, 0.60287165, 0.48390993, 0.27605686, 0., 0.51360432, 0.68185144, 1., 0.54847744, 0.8321819, 0.43324829, 0.67676228, 0.24558392, 0.76064776, 0.51360432, 0., 1., 0.76978799, 0.78021386, 1., 0.84923694, 0.73155632, 0.99166225, 0.61547536, 0.68185144, 1., 0.}, raft::distance::DistanceType::CosineExpanded, 0.0}, {10, {0, 5, 11, 15, 20, 27, 32, 36, 43, 47, 50}, {0, 1, 3, 6, 8, 0, 1, 2, 3, 5, 6, 1, 2, 4, 8, 0, 2, 3, 4, 7, 0, 1, 2, 3, 4, 6, 8, 0, 1, 2, 5, 7, 1, 5, 8, 9, 0, 1, 2, 5, 6, 8, 9, 2, 4, 5, 7, 0, 3, 9}, // indices {1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.}, {0.0, 0.42857142857142855, 0.7142857142857143, 0.75, 0.2857142857142857, 0.75, 0.7142857142857143, 0.5, 1.0, 0.6666666666666666, 0.42857142857142855, 0.0, 0.75, 0.625, 0.375, 0.42857142857142855, 0.75, 0.375, 0.75, 0.7142857142857143, 0.7142857142857143, 0.75, 0.0, 0.7142857142857143, 0.42857142857142855, 0.7142857142857143, 0.6666666666666666, 0.625, 0.6666666666666666, 1.0, 0.75, 0.625, 0.7142857142857143, 0.0, 0.5, 0.5714285714285714, 1.0, 0.8, 0.5, 0.6666666666666666, 0.2857142857142857, 0.375, 0.42857142857142855, 0.5, 0.0, 0.6666666666666666, 0.7777777777777778, 0.4444444444444444, 0.7777777777777778, 0.75, 0.75, 0.42857142857142855, 0.7142857142857143, 0.5714285714285714, 0.6666666666666666, 0.0, 0.7142857142857143, 0.5, 0.5, 0.8571428571428571, 0.7142857142857143, 0.75, 0.6666666666666666, 1.0, 0.7777777777777778, 0.7142857142857143, 0.0, 0.42857142857142855, 0.8571428571428571, 0.8333333333333334, 0.5, 0.375, 0.625, 0.8, 0.4444444444444444, 0.5, 0.42857142857142855, 0.0, 0.7777777777777778, 0.75, 1.0, 0.75, 0.6666666666666666, 0.5, 0.7777777777777778, 0.5, 0.8571428571428571, 0.7777777777777778, 0.0, 1.0, 0.6666666666666666, 0.7142857142857143, 1.0, 0.6666666666666666, 0.75, 0.8571428571428571, 0.8333333333333334, 0.75, 1.0, 0.0}, raft::distance::DistanceType::JaccardExpanded, 0.0}, {10, {0, 5, 11, 15, 20, 27, 32, 36, 43, 47, 50}, {0, 1, 3, 6, 8, 0, 1, 2, 3, 5, 6, 1, 2, 4, 8, 0, 2, 3, 4, 7, 0, 1, 2, 3, 4, 6, 8, 0, 1, 2, 5, 7, 1, 5, 8, 9, 0, 1, 2, 5, 6, 8, 9, 2, 4, 5, 7, 0, 3, 9}, // indices {0.5438, 0.2695, 0.4377, 0.7174, 0.9251, 0.7648, 0.3322, 0.7279, 0.4131, 0.5167, 0.8655, 0.0730, 0.0291, 0.9036, 0.7988, 0.5019, 0.7663, 0.2190, 0.8206, 0.3625, 0.0411, 0.3995, 0.5688, 0.7028, 0.8706, 0.3199, 0.4431, 0.0535, 0.2225, 0.8853, 0.1932, 0.3761, 0.3379, 0.1771, 0.2107, 0.228, 0.5279, 0.4885, 0.3495, 0.5079, 0.2325, 0.2331, 0.3018, 0.6231, 0.2645, 0.8429, 0.6625, 0.0797, 0.2724, 0.4218}, {0.0, 3.3954660629919076, 5.6469232737388815, 6.373112846266441, 4.0212880272531715, 6.916281504639404, 5.741508386786526, 5.411470999663036, 9.0, 4.977014354725805, 3.3954660629919076, 0.0, 7.56256082439209, 5.540261147481582, 4.832322929216881, 4.62003193872216, 6.498056792320361, 4.309846252268695, 6.317531174829905, 6.016362684141827, 5.6469232737388815, 7.56256082439209, 0.0, 5.974878731322299, 4.898357301336036, 6.442097410320605, 5.227077347287883, 7.134101195584642, 5.457753923371659, 7.0, 6.373112846266441, 5.540261147481582, 5.974878731322299, 0.0, 5.5507273748583, 4.897749658726415, 9.0, 8.398776718824767, 3.908281400328807, 4.83431066343688, 4.0212880272531715, 4.832322929216881, 4.898357301336036, 5.5507273748583, 0.0, 6.632989819428174, 7.438852294822894, 5.6631570310967465, 7.579428202635459, 6.760811985364303, 6.916281504639404, 4.62003193872216, 6.442097410320605, 4.897749658726415, 6.632989819428174, 0.0, 5.249404187382862, 6.072559523278559, 4.07661278488929, 6.19678948003145, 5.741508386786526, 6.498056792320361, 5.227077347287883, 9.0, 7.438852294822894, 5.249404187382862, 0.0, 3.854811639654704, 6.652724827169063, 5.298236851430971, 5.411470999663036, 4.309846252268695, 7.134101195584642, 8.398776718824767, 5.6631570310967465, 6.072559523278559, 3.854811639654704, 0.0, 7.529184598969917, 6.903282911791188, 9.0, 6.317531174829905, 5.457753923371659, 3.908281400328807, 7.579428202635459, 4.07661278488929, 6.652724827169063, 7.529184598969917, 0.0, 7.0, 4.977014354725805, 6.016362684141827, 7.0, 4.83431066343688, 6.760811985364303, 6.19678948003145, 5.298236851430971, 6.903282911791188, 7.0, 0.0}, raft::distance::DistanceType::Canberra, 0.0}, {10, {0, 5, 11, 15, 20, 27, 32, 36, 43, 47, 50}, {0, 1, 3, 6, 8, 0, 1, 2, 3, 5, 6, 1, 2, 4, 8, 0, 2, 3, 4, 7, 0, 1, 2, 3, 4, 6, 8, 0, 1, 2, 5, 7, 1, 5, 8, 9, 0, 1, 2, 5, 6, 8, 9, 2, 4, 5, 7, 0, 3, 9}, // indices {0.5438, 0.2695, 0.4377, 0.7174, 0.9251, 0.7648, 0.3322, 0.7279, 0.4131, 0.5167, 0.8655, 0.0730, 0.0291, 0.9036, 0.7988, 0.5019, 0.7663, 0.2190, 0.8206, 0.3625, 0.0411, 0.3995, 0.5688, 0.7028, 0.8706, 0.3199, 0.4431, 0.0535, 0.2225, 0.8853, 0.1932, 0.3761, 0.3379, 0.1771, 0.2107, 0.228, 0.5279, 0.4885, 0.3495, 0.5079, 0.2325, 0.2331, 0.3018, 0.6231, 0.2645, 0.8429, 0.6625, 0.0797, 0.2724, 0.4218}, {0.0, 1.31462855332296, 1.3690307816129905, 1.698603990921237, 1.3460470789553531, 1.6636670712582544, 1.2651744044972217, 1.1938329352055201, 1.8811409082590185, 1.3653115050624267, 1.31462855332296, 0.0, 1.9447722703291133, 1.42818777206562, 1.4685491458946494, 1.3071999866010466, 1.4988622861692171, 0.9698559287406783, 1.4972023224597841, 1.5243383567266802, 1.3690307816129905, 1.9447722703291133, 0.0, 1.2748400840107568, 1.0599569946448246, 1.546591282841402, 1.147526531928459, 1.447002179128145, 1.5982242387673176, 1.3112533607072414, 1.698603990921237, 1.42818777206562, 1.2748400840107568, 0.0, 1.038121552545461, 1.011788365364402, 1.3907391109256988, 1.3128200942311496, 1.19595706584447, 1.3233328139624725, 1.3460470789553531, 1.4685491458946494, 1.0599569946448246, 1.038121552545461, 0.0, 1.3642741698145529, 1.3493868683808095, 1.394942694628328, 1.572881849642552, 1.380122665319464, 1.6636670712582544, 1.3071999866010466, 1.546591282841402, 1.011788365364402, 1.3642741698145529, 0.0, 1.018961640373018, 1.0114394258945634, 0.8338711034820684, 1.1247823842299223, 1.2651744044972217, 1.4988622861692171, 1.147526531928459, 1.3907391109256988, 1.3493868683808095, 1.018961640373018, 0.0, 0.7701238110357329, 1.245486437864406, 0.5551259549534626, 1.1938329352055201, 0.9698559287406783, 1.447002179128145, 1.3128200942311496, 1.394942694628328, 1.0114394258945634, 0.7701238110357329, 0.0, 1.1886800117391216, 1.0083692448135637, 1.8811409082590185, 1.4972023224597841, 1.5982242387673176, 1.19595706584447, 1.572881849642552, 0.8338711034820684, 1.245486437864406, 1.1886800117391216, 0.0, 1.3661374102525012, 1.3653115050624267, 1.5243383567266802, 1.3112533607072414, 1.3233328139624725, 1.380122665319464, 1.1247823842299223, 0.5551259549534626, 1.0083692448135637, 1.3661374102525012, 0.0}, raft::distance::DistanceType::LpUnexpanded, 2.0}, {10, {0, 5, 11, 15, 20, 27, 32, 36, 43, 47, 50}, {0, 1, 3, 6, 8, 0, 1, 2, 3, 5, 6, 1, 2, 4, 8, 0, 2, 3, 4, 7, 0, 1, 2, 3, 4, 6, 8, 0, 1, 2, 5, 7, 1, 5, 8, 9, 0, 1, 2, 5, 6, 8, 9, 2, 4, 5, 7, 0, 3, 9}, // indices {0.5438, 0.2695, 0.4377, 0.7174, 0.9251, 0.7648, 0.3322, 0.7279, 0.4131, 0.5167, 0.8655, 0.0730, 0.0291, 0.9036, 0.7988, 0.5019, 0.7663, 0.2190, 0.8206, 0.3625, 0.0411, 0.3995, 0.5688, 0.7028, 0.8706, 0.3199, 0.4431, 0.0535, 0.2225, 0.8853, 0.1932, 0.3761, 0.3379, 0.1771, 0.2107, 0.228, 0.5279, 0.4885, 0.3495, 0.5079, 0.2325, 0.2331, 0.3018, 0.6231, 0.2645, 0.8429, 0.6625, 0.0797, 0.2724, 0.4218}, {0.0, 0.9251771844789913, 0.9036452083899731, 0.9251771844789913, 0.8706483735804971, 0.9251771844789913, 0.717493881903289, 0.6920214832303888, 0.9251771844789913, 0.9251771844789913, 0.9251771844789913, 0.0, 0.9036452083899731, 0.8655339692155823, 0.8706483735804971, 0.8655339692155823, 0.8655339692155823, 0.6329837991017668, 0.8655339692155823, 0.8655339692155823, 0.9036452083899731, 0.9036452083899731, 0.0, 0.7988276152181608, 0.7028075145996631, 0.9036452083899731, 0.9036452083899731, 0.9036452083899731, 0.8429599432532096, 0.9036452083899731, 0.9251771844789913, 0.8655339692155823, 0.7988276152181608, 0.0, 0.48376552205293305, 0.8206394616536681, 0.8206394616536681, 0.8206394616536681, 0.8429599432532096, 0.8206394616536681, 0.8706483735804971, 0.8706483735804971, 0.7028075145996631, 0.48376552205293305, 0.0, 0.8706483735804971, 0.8706483735804971, 0.8706483735804971, 0.8429599432532096, 0.8706483735804971, 0.9251771844789913, 0.8655339692155823, 0.9036452083899731, 0.8206394616536681, 0.8706483735804971, 0.0, 0.8853924473642432, 0.535821510936138, 0.6497196601457607, 0.8853924473642432, 0.717493881903289, 0.8655339692155823, 0.9036452083899731, 0.8206394616536681, 0.8706483735804971, 0.8853924473642432, 0.0, 0.5279604218147174, 0.6658348373853169, 0.33799874888632914, 0.6920214832303888, 0.6329837991017668, 0.9036452083899731, 0.8206394616536681, 0.8706483735804971, 0.535821510936138, 0.5279604218147174, 0.0, 0.662579808115858, 0.5079750812968089, 0.9251771844789913, 0.8655339692155823, 0.8429599432532096, 0.8429599432532096, 0.8429599432532096, 0.6497196601457607, 0.6658348373853169, 0.662579808115858, 0.0, 0.8429599432532096, 0.9251771844789913, 0.8655339692155823, 0.9036452083899731, 0.8206394616536681, 0.8706483735804971, 0.8853924473642432, 0.33799874888632914, 0.5079750812968089, 0.8429599432532096, 0.0}, raft::distance::DistanceType::Linf, 0.0}, {15, {0, 5, 8, 9, 15, 20, 26, 31, 34, 38, 45}, {0, 1, 5, 6, 9, 1, 4, 14, 7, 3, 4, 7, 9, 11, 14, 0, 3, 7, 8, 12, 0, 2, 5, 7, 8, 14, 4, 9, 10, 11, 13, 4, 10, 14, 5, 6, 8, 9, 0, 2, 3, 4, 6, 10, 11}, {0.13537497, 0.51440163, 0.17231936, 0.02417618, 0.15372786, 0.17760507, 0.73789274, 0.08450219, 1., 0.20184723, 0.18036963, 0.12581403, 0.13867603, 0.24040536, 0.11288773, 0.00290246, 0.09120187, 0.31190555, 0.43245423, 0.16153588, 0.3233026, 0.05279589, 0.1387149, 0.05962761, 0.41751856, 0.00804045, 0.03262381, 0.27507131, 0.37245804, 0.16378881, 0.15605804, 0.3867739, 0.24908977, 0.36413632, 0.37643732, 0.28910679, 0.0198409, 0.31461499, 0.24412279, 0.08327667, 0.04444576, 0.05047969, 0.26190054, 0.2077349, 0.10803964}, {1.05367121e-08, 8.35309089e-01, 1.00000000e+00, 9.24116813e-01, 9.90039274e-01, 7.97613546e-01, 8.91271059e-01, 1.00000000e+00, 6.64669302e-01, 8.59439512e-01, 8.35309089e-01, 1.05367121e-08, 1.00000000e+00, 7.33151506e-01, 1.00000000e+00, 9.86880955e-01, 9.19154851e-01, 5.38849774e-01, 1.00000000e+00, 8.98332369e-01, 1.00000000e+00, 1.00000000e+00, 0.00000000e+00, 8.03303970e-01, 6.64465915e-01, 8.69374690e-01, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 9.24116813e-01, 7.33151506e-01, 8.03303970e-01, 0.00000000e+00, 8.16225843e-01, 9.39818306e-01, 7.27700415e-01, 7.30155528e-01, 8.89451011e-01, 8.05419635e-01, 9.90039274e-01, 1.00000000e+00, 6.64465915e-01, 8.16225843e-01, 0.00000000e+00, 6.38804490e-01, 1.00000000e+00, 1.00000000e+00, 9.52559809e-01, 9.53789212e-01, 7.97613546e-01, 9.86880955e-01, 8.69374690e-01, 9.39818306e-01, 6.38804490e-01, 0.0, 1.00000000e+00, 9.72569112e-01, 8.24907516e-01, 8.07933016e-01, 8.91271059e-01, 9.19154851e-01, 1.00000000e+00, 7.27700415e-01, 1.00000000e+00, 1.00000000e+00, 0.00000000e+00, 7.63596268e-01, 8.40131263e-01, 7.40428532e-01, 1.00000000e+00, 5.38849774e-01, 1.00000000e+00, 7.30155528e-01, 1.00000000e+00, 9.72569112e-01, 7.63596268e-01, 0.00000000e+00, 1.00000000e+00, 7.95485011e-01, 6.64669302e-01, 1.00000000e+00, 1.00000000e+00, 8.89451011e-01, 9.52559809e-01, 8.24907516e-01, 8.40131263e-01, 1.00000000e+00, 0.00000000e+00, 8.51370877e-01, 8.59439512e-01, 8.98332369e-01, 1.00000000e+00, 8.05419635e-01, 9.53789212e-01, 8.07933016e-01, 7.40428532e-01, 7.95485011e-01, 8.51370877e-01, 1.49011612e-08}, // Dataset is L1 normalized into pdfs raft::distance::DistanceType::HellingerExpanded, 0.0}, {4, {0, 1, 1, 2, 4}, {3, 2, 0, 1}, // indices {0.99296, 0.42180, 0.11687, 0.305869}, { // dense output 0.0, 0.99296, 1.41476, 1.415707, 0.99296, 0.0, 0.42180, 0.42274, 1.41476, 0.42180, 0.0, 0.84454, 1.41570, 0.42274, 0.84454, 0.0, }, raft::distance::DistanceType::L1, 0.0}, {5, {0, 3, 8, 12, 16, 20, 25, 30, 35, 40, 45}, {0, 3, 4, 0, 1, 2, 3, 4, 1, 2, 3, 4, 0, 2, 3, 4, 0, 1, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4}, {0.70862347, 0.8232774, 0.12108795, 0.84527547, 0.94937088, 0.03258545, 0.99584118, 0.76835667, 0.34426657, 0.2357925, 0.01274851, 0.11422017, 0.3437756, 0.31967718, 0.5956055, 0.31610373, 0.04147273, 0.03724415, 0.21515727, 0.04751052, 0.50283183, 0.99957274, 0.01395933, 0.96032529, 0.88438711, 0.46095378, 0.27432481, 0.54294211, 0.54280225, 0.59503329, 0.61364678, 0.22837736, 0.56609561, 0.29809423, 0.76736686, 0.56460608, 0.98165371, 0.02140123, 0.19881268, 0.26057815, 0.31648823, 0.89874295, 0.27366735, 0.5119944, 0.11416134}, {// dense output 0., 0.48769777, 1.88014197, 0.26127048, 0.26657011, 0.7874794, 0.76962708, 1.122858, 1.1232498, 1.08166081, 0.48769777, 0., 1.31332116, 0.98318907, 0.42661815, 0.09279052, 1.35187836, 1.38429055, 0.40658897, 0.56136388, 1.88014197, 1.31332116, 0., 1.82943642, 1.54826077, 1.05918884, 1.59360067, 1.34698954, 0.60215168, 0.46993848, 0.26127048, 0.98318907, 1.82943642, 0., 0.29945563, 1.08494093, 0.22934281, 0.82801925, 1.74288748, 1.50610116, 0.26657011, 0.42661815, 1.54826077, 0.29945563, 0., 0.45060069, 0.77814948, 1.45245711, 1.18328348, 0.82486987, 0.7874794, 0.09279052, 1.05918884, 1.08494093, 0.45060069, 0., 1.29899154, 1.40683824, 0.48505269, 0.53862363, 0.76962708, 1.35187836, 1.59360067, 0.22934281, 0.77814948, 1.29899154, 0., 0.33202426, 1.92108999, 1.88812175, 1.122858, 1.38429055, 1.34698954, 0.82801925, 1.45245711, 1.40683824, 0.33202426, 0., 1.47318624, 1.92660889, 1.1232498, 0.40658897, 0.60215168, 1.74288748, 1.18328348, 0.48505269, 1.92108999, 1.47318624, 0., 0.24992619, 1.08166081, 0.56136388, 0.46993848, 1.50610116, 0.82486987, 0.53862363, 1.88812175, 1.92660889, 0.24992619, 0.}, raft::distance::DistanceType::CorrelationExpanded, 0.0}, {5, {0, 1, 2, 4, 4, 5, 6, 7, 9, 9, 10}, {1, 4, 0, 4, 1, 3, 0, 1, 3, 0}, {1., 1., 1., 1., 1., 1., 1., 1., 1., 1.}, {// dense output 0., 1., 1., 1., 0.8, 1., 1., 0.8, 1., 1., 1., 0., 0.8, 1., 1., 1., 1., 1., 1., 1., 1., 0.8, 0., 1., 1., 1., 0.8, 1., 1., 0.8, 1., 1., 1., 0., 1., 1., 1., 1., 1., 1., 0.8, 1., 1., 1., 0., 1., 1., 0.8, 1., 1., 1., 1., 1., 1., 1., 0., 1., 0.8, 1., 1., 1., 1., 0.8, 1., 1., 1., 0., 1., 1., 0.8, 0.8, 1., 1., 1., 0.8, 0.8, 1., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1., 1., 0.8, 1., 1., 1., 0.8, 1., 1., 0.}, raft::distance::DistanceType::RusselRaoExpanded, 0.0}, {5, {0, 1, 1, 3, 3, 4, 4, 6, 9, 10, 10}, {0, 3, 4, 4, 2, 3, 0, 2, 3, 2}, {1., 1., 1., 1., 1., 1., 1., 1., 1., 1.}, {// dense output 0., 0.2, 0.6, 0.2, 0.4, 0.2, 0.6, 0.4, 0.4, 0.2, 0.2, 0., 0.4, 0., 0.2, 0., 0.4, 0.6, 0.2, 0., 0.6, 0.4, 0., 0.4, 0.2, 0.4, 0.4, 0.6, 0.6, 0.4, 0.2, 0., 0.4, 0., 0.2, 0., 0.4, 0.6, 0.2, 0., 0.4, 0.2, 0.2, 0.2, 0., 0.2, 0.6, 0.8, 0.4, 0.2, 0.2, 0., 0.4, 0., 0.2, 0., 0.4, 0.6, 0.2, 0., 0.6, 0.4, 0.4, 0.4, 0.6, 0.4, 0., 0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.6, 0.8, 0.6, 0.2, 0., 0.4, 0.6, 0.4, 0.2, 0.6, 0.2, 0.4, 0.2, 0.2, 0.4, 0., 0.2, 0.2, 0., 0.4, 0., 0.2, 0., 0.4, 0.6, 0.2, 0.}, raft::distance::DistanceType::HammingUnexpanded, 0.0}, {3, {0, 1, 2}, {0, 1}, {1.0, 1.0}, {0.0, 0.83255, 0.83255, 0.0}, raft::distance::DistanceType::JensenShannon, 0.0}, {2, {0, 1, 3}, {0, 0, 1}, {1.0, 0.5, 0.5}, {0, 0.4645014, 0.4645014, 0}, raft::distance::DistanceType::JensenShannon, 0.0}, {3, {0, 1, 2}, {0, 0}, {1.0, 1.0}, {0.0, 0.0, 0.0, 0.0}, raft::distance::DistanceType::JensenShannon, 0.0}, {3, {0, 1, 2}, {0, 1}, {1.0, 1.0}, {0.0, 1.0, 1.0, 0.0}, raft::distance::DistanceType::DiceExpanded, 0.0}, {3, {0, 1, 3}, {0, 0, 1}, {1.0, 1.0, 1.0}, {0, 0.333333, 0.333333, 0}, raft::distance::DistanceType::DiceExpanded, 0.0}, }; typedef SparseDistanceTest<int, float> SparseDistanceTestF; TEST_P(SparseDistanceTestF, Result) { compare(); } INSTANTIATE_TEST_CASE_P(SparseDistanceTests, SparseDistanceTestF, ::testing::ValuesIn(inputs_i32_f)); }; // namespace distance }; // end namespace sparse }; // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/gram.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if defined RAFT_DISTANCE_COMPILED #include <raft/core/resource/cuda_stream.hpp> #include <raft/distance/specializations.cuh> #endif #include "../distance/gram_base.cuh" #include "../test_utils.cuh" #include <gtest/gtest.h> #include <iostream> #include <memory> #include <raft/distance/distance_types.hpp> #include <raft/distance/kernels.cuh> #include <raft/random/rng.cuh> #include <raft/sparse/convert/dense.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <raft/util/itertools.hpp> #include <rmm/device_uvector.hpp> namespace raft::distance::kernels { /** * Structure to describe structure of the input matrices: * - DENSE: dense, dense * - MIX: CSR, dense * - CSR: CSR, CSR */ enum SparseType { DENSE, MIX, CSR }; struct GramMatrixInputs { int n1; // feature vectors in matrix 1 int n2; // featuer vectors in matrix 2 int n_cols; // number of elements in a feature vector bool is_row_major; SparseType sparse_input; KernelParams kernel; int ld1; int ld2; int ld_out; // We will generate random input using the dimensions given here. // The reference output is calculated by a custom kernel. }; std::ostream& operator<<(std::ostream& os, const GramMatrixInputs& p) { std::vector<std::string> kernel_names{"linear", "poly", "rbf", "tanh"}; os << "/" << p.n1 << "x" << p.n2 << "x" << p.n_cols << "/" << (p.is_row_major ? "RowMajor/" : "ColMajor/") << (p.sparse_input == SparseType::DENSE ? "DenseDense/" : (p.sparse_input == SparseType::MIX ? "CsrDense/" : "CsrCsr/")) << kernel_names[p.kernel.kernel] << "/ld_" << p.ld1 << "x" << p.ld2 << "x" << p.ld_out; return os; } /*struct KernelParams { // Kernel function parameters KernelType kernel; //!< Type of the kernel function int degree; //!< Degree of polynomial kernel (ignored by others) double gamma; //!< multiplier in the double coef0; //!< additive constant in poly and tanh kernels };*/ // const KernelParams linear_kernel_params{.kernel=KernelType::LINEAR}; // {KernelType::POLYNOMIAL, 2, 0.5, 2.4}, {KernelType::TANH, 0, 0.5, 2.4}, {KernelType::RBF, 0, 0.5} const std::vector<GramMatrixInputs> inputs = raft::util::itertools::product<GramMatrixInputs>( {42}, {137}, {2}, {true, false}, {SparseType::DENSE, SparseType::MIX, SparseType::CSR}, {KernelParams{KernelType::LINEAR}, KernelParams{KernelType::POLYNOMIAL, 2, 0.5, 2.4}, KernelParams{KernelType::TANH, 0, 0.5, 2.4}, KernelParams{KernelType::RBF, 0, 0.5}}); // (ld_1, ld_2, ld_out) not supported by RBF and CSR const std::vector<GramMatrixInputs> inputs_ld = raft::util::itertools::product<GramMatrixInputs>( {137}, {42}, {2}, {true, false}, {SparseType::DENSE, SparseType::MIX}, {KernelParams{KernelType::LINEAR}, KernelParams{KernelType::POLYNOMIAL, 2, 0.5, 2.4}, KernelParams{KernelType::TANH, 0, 0.5, 2.4}}, {159}, {73}, {144}); // (ld_1, ld_2) are supported by CSR const std::vector<GramMatrixInputs> inputs_ld_csr = raft::util::itertools::product<GramMatrixInputs>( {42}, {137}, {2}, {true, false}, {SparseType::CSR, SparseType::MIX}, {KernelParams{KernelType::LINEAR}, KernelParams{KernelType::POLYNOMIAL, 2, 0.5, 2.4}, KernelParams{KernelType::TANH, 0, 0.5, 2.4}}, {64}, {155}, {0}); template <typename math_t> class GramMatrixTest : public ::testing::TestWithParam<GramMatrixInputs> { protected: GramMatrixTest() : params(GetParam()), stream(resource::get_cuda_stream(handle)), x1(0, stream), x2(0, stream), x1_csr_indptr(0, stream), x1_csr_indices(0, stream), x1_csr_data(0, stream), x2_csr_indptr(0, stream), x2_csr_indices(0, stream), x2_csr_data(0, stream), gram(0, stream), gram_host(0) { if (params.ld1 == 0) { params.ld1 = params.is_row_major ? params.n_cols : params.n1; } if (params.ld2 == 0) { params.ld2 = params.is_row_major ? params.n_cols : params.n2; } if (params.ld_out == 0) { params.ld_out = params.is_row_major ? params.n2 : params.n1; } // Derive the size of the output from the offset of the last element. size_t size = get_offset(params.n1 - 1, params.n_cols - 1, params.ld1, params.is_row_major) + 1; x1.resize(size, stream); size = get_offset(params.n2 - 1, params.n_cols - 1, params.ld2, params.is_row_major) + 1; x2.resize(size, stream); size = get_offset(params.n1 - 1, params.n2 - 1, params.ld_out, params.is_row_major) + 1; gram.resize(size, stream); RAFT_CUDA_TRY(cudaMemsetAsync(gram.data(), 0, gram.size() * sizeof(math_t), stream)); gram_host.resize(gram.size()); std::fill(gram_host.begin(), gram_host.end(), 0); raft::random::RngState r(42137ULL); raft::random::uniform(handle, r, x1.data(), x1.size(), math_t(0), math_t(1)); raft::random::uniform(handle, r, x2.data(), x2.size(), math_t(0), math_t(1)); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } ~GramMatrixTest() override {} int prepareCsr(math_t* dense, int n_rows, int ld, int* indptr, int* indices, math_t* data) { int nnz = 0; double eps = 1e-6; int n_cols = params.n_cols; bool is_row_major = params.is_row_major; size_t dense_size = get_offset(n_rows - 1, n_cols - 1, ld, is_row_major) + 1; std::vector<math_t> dense_host(dense_size); raft::update_host(dense_host.data(), dense, dense_size, stream); resource::sync_stream(handle, stream); std::vector<int> indptr_host(n_rows + 1); std::vector<int> indices_host(n_rows * n_cols); std::vector<math_t> data_host(n_rows * n_cols); // create csr matrix from dense (with threshold) for (int i = 0; i < n_rows; ++i) { indptr_host[i] = nnz; for (int j = 0; j < n_cols; ++j) { math_t value = dense_host[get_offset(i, j, ld, is_row_major)]; if (value > eps) { indices_host[nnz] = j; data_host[nnz] = value; nnz++; } } } indptr_host[n_rows] = nnz; // fill back dense matrix from CSR std::fill(dense_host.data(), dense_host.data() + dense_size, 0); for (int i = 0; i < n_rows; ++i) { for (int idx = indptr_host[i]; idx < indptr_host[i + 1]; ++idx) { dense_host[get_offset(i, indices_host[idx], ld, is_row_major)] = data_host[idx]; } } raft::update_device(dense, dense_host.data(), dense_size, stream); raft::update_device(indptr, indptr_host.data(), n_rows + 1, stream); raft::update_device(indices, indices_host.data(), nnz, stream); raft::update_device(data, data_host.data(), nnz, stream); resource::sync_stream(handle, stream); return nnz; } void runTest() { std::unique_ptr<GramMatrixBase<math_t>> kernel = std::unique_ptr<GramMatrixBase<math_t>>(KernelFactory<math_t>::create(params.kernel)); auto x1_span = params.is_row_major ? raft::make_device_strided_matrix_view<const math_t, int, raft::layout_c_contiguous>( x1.data(), params.n1, params.n_cols, params.ld1) : raft::make_device_strided_matrix_view<const math_t, int, raft::layout_f_contiguous>( x1.data(), params.n1, params.n_cols, params.ld1); auto x2_span = params.is_row_major ? raft::make_device_strided_matrix_view<const math_t, int, raft::layout_c_contiguous>( x2.data(), params.n2, params.n_cols, params.ld2) : raft::make_device_strided_matrix_view<const math_t, int, raft::layout_f_contiguous>( x2.data(), params.n2, params.n_cols, params.ld2); auto out_span = params.is_row_major ? raft::make_device_strided_matrix_view<math_t, int, raft::layout_c_contiguous>( gram.data(), params.n1, params.n2, params.ld_out) : raft::make_device_strided_matrix_view<math_t, int, raft::layout_f_contiguous>( gram.data(), params.n1, params.n2, params.ld_out); if (params.sparse_input == SparseType::DENSE) { (*kernel)(handle, x1_span, x2_span, out_span); } else { x1_csr_indptr.reserve(params.n1 + 1, stream); x1_csr_indices.reserve(params.n1 * params.n_cols, stream); x1_csr_data.reserve(params.n1 * params.n_cols, stream); int x1_nnz = prepareCsr(x1.data(), params.n1, params.ld1, x1_csr_indptr.data(), x1_csr_indices.data(), x1_csr_data.data()); auto x1_csr_structure = raft::make_device_compressed_structure_view<int, int, int>( x1_csr_indptr.data(), x1_csr_indices.data(), params.n1, params.n_cols, x1_nnz); auto x1_csr = raft::device_csr_matrix_view<const math_t, int, int, int>( raft::device_span<const math_t>(x1_csr_data.data(), x1_csr_structure.get_nnz()), x1_csr_structure); if (params.sparse_input == SparseType::MIX) { (*kernel)(handle, x1_csr, x2_span, out_span); } else { x2_csr_indptr.reserve(params.n2 + 1, stream); x2_csr_indices.reserve(params.n2 * params.n_cols, stream); x2_csr_data.reserve(params.n2 * params.n_cols, stream); int x2_nnz = prepareCsr(x2.data(), params.n2, params.ld2, x2_csr_indptr.data(), x2_csr_indices.data(), x2_csr_data.data()); auto x2_csr_structure = raft::make_device_compressed_structure_view<int, int, int>( x2_csr_indptr.data(), x2_csr_indices.data(), params.n2, params.n_cols, x2_nnz); auto x2_csr = raft::device_csr_matrix_view<const math_t, int, int, int>( raft::device_span<const math_t>(x2_csr_data.data(), x2_csr_structure.get_nnz()), x2_csr_structure); (*kernel)(handle, x1_csr, x2_csr, out_span); } } // Something in gram is executing not on the 'stream' and therefore // a full device sync is required RAFT_CUDA_TRY(cudaDeviceSynchronize()); naiveGramMatrixKernel(params.n1, params.n2, params.n_cols, x1, x2, gram_host.data(), params.ld1, params.ld2, params.ld_out, params.is_row_major, params.kernel, stream, handle); resource::sync_stream(handle, stream); ASSERT_TRUE(raft::devArrMatchHost( gram_host.data(), gram.data(), gram.size(), raft::CompareApprox<math_t>(1e-6f), stream)); } raft::resources handle; cudaStream_t stream = 0; GramMatrixInputs params; rmm::device_uvector<math_t> x1; rmm::device_uvector<math_t> x2; rmm::device_uvector<int> x1_csr_indptr; rmm::device_uvector<int> x1_csr_indices; rmm::device_uvector<math_t> x1_csr_data; rmm::device_uvector<int> x2_csr_indptr; rmm::device_uvector<int> x2_csr_indices; rmm::device_uvector<math_t> x2_csr_data; rmm::device_uvector<math_t> gram; std::vector<math_t> gram_host; }; typedef GramMatrixTest<float> GramMatrixTestFloatStandard; typedef GramMatrixTest<float> GramMatrixTestFloatLd; typedef GramMatrixTest<float> GramMatrixTestFloatLdCsr; TEST_P(GramMatrixTestFloatStandard, Gram) { runTest(); } TEST_P(GramMatrixTestFloatLd, Gram) { runTest(); } TEST_P(GramMatrixTestFloatLdCsr, Gram) { runTest(); } INSTANTIATE_TEST_SUITE_P(GramMatrixTests, GramMatrixTestFloatStandard, ::testing::ValuesIn(inputs)); INSTANTIATE_TEST_SUITE_P(GramMatrixTests, GramMatrixTestFloatLd, ::testing::ValuesIn(inputs_ld)); INSTANTIATE_TEST_SUITE_P(GramMatrixTests, GramMatrixTestFloatLdCsr, ::testing::ValuesIn(inputs_ld_csr)); }; // end namespace raft::distance::kernels
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/symmetrize.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/sparse/convert/coo.cuh> #include <raft/sparse/coo.hpp> #include <raft/sparse/linalg/symmetrize.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> #include "../test_utils.cuh" #include <iostream> namespace raft { namespace sparse { template <typename value_idx, typename value_t> RAFT_KERNEL assert_symmetry( value_idx* rows, value_idx* cols, value_t* vals, value_idx nnz, value_idx* sum) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= nnz) return; atomicAdd(sum, rows[tid]); atomicAdd(sum, -1 * cols[tid]); } template <typename value_idx, typename value_t> struct SparseSymmetrizeInputs { value_idx n_cols; std::vector<value_idx> indptr_h; std::vector<value_idx> indices_h; std::vector<value_t> data_h; }; template <typename value_idx, typename value_t> ::std::ostream& operator<<(::std::ostream& os, const SparseSymmetrizeInputs<value_idx, value_t>& dims) { return os; } template <typename value_idx, typename value_t> class SparseSymmetrizeTest : public ::testing::TestWithParam<SparseSymmetrizeInputs<value_idx, value_t>> { public: SparseSymmetrizeTest() : params(::testing::TestWithParam<SparseSymmetrizeInputs<value_idx, value_t>>::GetParam()), stream(resource::get_cuda_stream(handle)), indptr(0, stream), indices(0, stream), data(0, stream) { } protected: void make_data() { std::vector<value_idx> indptr_h = params.indptr_h; std::vector<value_idx> indices_h = params.indices_h; std::vector<value_t> data_h = params.data_h; indptr.resize(indptr_h.size(), stream); indices.resize(indices_h.size(), stream); data.resize(data_h.size(), stream); update_device(indptr.data(), indptr_h.data(), indptr_h.size(), stream); update_device(indices.data(), indices_h.data(), indices_h.size(), stream); update_device(data.data(), data_h.data(), data_h.size(), stream); } void SetUp() override { make_data(); value_idx m = params.indptr_h.size() - 1; value_idx n = params.n_cols; value_idx nnz = params.indices_h.size(); rmm::device_uvector<value_idx> coo_rows(nnz, stream); raft::sparse::convert::csr_to_coo(indptr.data(), m, coo_rows.data(), nnz, stream); raft::sparse::COO<value_t, value_idx> out(stream); raft::sparse::linalg::symmetrize( handle, coo_rows.data(), indices.data(), data.data(), m, n, coo_rows.size(), out); rmm::device_scalar<value_idx> sum(stream); sum.set_value_to_zero_async(stream); assert_symmetry<<<raft::ceildiv(out.nnz, 256), 256, 0, stream>>>( out.rows(), out.cols(), out.vals(), out.nnz, sum.data()); sum_h = sum.value(stream); resource::sync_stream(handle, stream); } protected: raft::resources handle; cudaStream_t stream; // input data rmm::device_uvector<value_idx> indptr, indices; rmm::device_uvector<value_t> data; value_idx sum_h; SparseSymmetrizeInputs<value_idx, value_t> params; }; template <typename T> struct COOSymmetrizeInputs { int m, n, nnz; unsigned long long int seed; }; template <typename T> class COOSymmetrizeTest : public ::testing::TestWithParam<COOSymmetrizeInputs<T>> { protected: void SetUp() override {} void TearDown() override {} }; const std::vector<COOSymmetrizeInputs<float>> inputsf = {{5, 10, 5, 1234ULL}}; typedef COOSymmetrizeTest<float> COOSymmetrize; TEST_P(COOSymmetrize, Result) { cudaStream_t stream; cudaStreamCreate(&stream); int nnz = 8; int* in_rows_h = new int[nnz]{0, 0, 1, 1, 2, 2, 3, 3}; int* in_cols_h = new int[nnz]{1, 3, 2, 3, 0, 1, 0, 2}; float* in_vals_h = new float[nnz]{0.5, 1.0, 0.5, 0.5, 0.5, 0.0, 0.5, 0.5}; int* exp_rows_h = new int[nnz * 2]{1, 0, 0, 0, 1, 3, 1, 0, 0, 2, 2, 0, 3, 2, 3, 0}; int* exp_cols_h = new int[nnz * 2]{0, 1, 3, 0, 2, 1, 3, 0, 2, 0, 1, 0, 0, 3, 2, 0}; float* exp_vals_h = new float[nnz * 2]{0.5, 0.5, 1.5, 0, 0.5, 0.5, 0.5, 0, 0.5, 0.5, 0.5, 0, 1.5, 0.5, 0.5, 0.0}; COO<float> in(stream, nnz, 4, 4); raft::update_device(in.rows(), *&in_rows_h, nnz, stream); raft::update_device(in.cols(), *&in_cols_h, nnz, stream); raft::update_device(in.vals(), *&in_vals_h, nnz, stream); COO<float> out(stream); linalg::coo_symmetrize<float>( &in, &out, [] __device__(int row, int col, float val, float trans) { return val + trans; }, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); std::cout << out << std::endl; ASSERT_TRUE(out.nnz == nnz * 2); ASSERT_TRUE(raft::devArrMatch<int>(out.rows(), exp_rows_h, out.nnz, raft::Compare<int>())); ASSERT_TRUE(raft::devArrMatch<int>(out.cols(), exp_cols_h, out.nnz, raft::Compare<int>())); ASSERT_TRUE(raft::devArrMatch<float>(out.vals(), exp_vals_h, out.nnz, raft::Compare<float>())); cudaStreamDestroy(stream); delete[] in_rows_h; delete[] in_cols_h; delete[] in_vals_h; delete[] exp_rows_h; delete[] exp_cols_h; delete[] exp_vals_h; } INSTANTIATE_TEST_CASE_P(COOSymmetrizeTest, COOSymmetrize, ::testing::ValuesIn(inputsf)); const std::vector<SparseSymmetrizeInputs<int, float>> symm_inputs_fint = { // Test n_clusters == n_points { 2, {0, 2, 4, 6, 8}, {0, 1, 0, 1, 0, 1, 0, 1}, {1.0f, 2.0f, 1.0f, 2.0f, 1.0f, 2.0f, 1.0f, 2.0f}, }, {2, {0, 2, 4, 6, 8}, {0, 1, 0, 1, 0, 1, 0, 1}, // indices {1.0f, 3.0f, 1.0f, 5.0f, 50.0f, 28.0f, 16.0f, 2.0f}}, }; typedef SparseSymmetrizeTest<int, float> SparseSymmetrizeTestF_int; TEST_P(SparseSymmetrizeTestF_int, Result) { ASSERT_TRUE(sum_h == 0); } INSTANTIATE_TEST_CASE_P(SparseSymmetrizeTest, SparseSymmetrizeTestF_int, ::testing::ValuesIn(symm_inputs_fint)); } // namespace sparse } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/dist_coo_spmv.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/operators.cuh> #include <raft/core/operators.hpp> #include <raft/distance/distance_types.hpp> #include <raft/linalg/unary_op.cuh> #include <raft/sparse/detail/cusparse_wrappers.h> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <raft/sparse/convert/coo.cuh> #include <raft/sparse/distance/detail/coo_spmv.cuh> #include "../test_utils.cuh" #include <type_traits> namespace raft { namespace sparse { namespace distance { using namespace raft; using namespace raft::sparse; template <typename value_idx, typename value_t> struct InputConfiguration { value_idx n_cols; std::vector<value_idx> indptr_h; std::vector<value_idx> indices_h; std::vector<value_t> data_h; std::vector<value_t> out_dists_ref_h; raft::distance::DistanceType metric; float metric_arg = 0.0; }; using dense_smem_strategy_t = detail::dense_smem_strategy<int, float, 1024>; using hash_strategy_t = detail::hash_strategy<int, float, 1024>; template <typename value_idx, typename value_t, typename strategy_t> struct SparseDistanceCOOSPMVInputs { InputConfiguration<value_idx, value_t> input_configuration; float capacity_threshold = 0.5; int map_size = detail::hash_strategy<value_idx, value_t, 1024>::get_map_size(); }; template <typename value_idx, typename value_t, typename strategy_t> ::std::ostream& operator<<(::std::ostream& os, const SparseDistanceCOOSPMVInputs<value_idx, value_t, strategy_t>& dims) { return os; } template <typename value_idx, typename value_t, typename strategy_t> class SparseDistanceCOOSPMVTest : public ::testing::TestWithParam<SparseDistanceCOOSPMVInputs<value_idx, value_t, strategy_t>> { public: SparseDistanceCOOSPMVTest() : dist_config(handle), indptr(0, resource::get_cuda_stream(handle)), indices(0, resource::get_cuda_stream(handle)), data(0, resource::get_cuda_stream(handle)), out_dists(0, resource::get_cuda_stream(handle)), out_dists_ref(0, resource::get_cuda_stream(handle)) { } template <typename U, std::enable_if_t<std::is_same_v<U, hash_strategy_t>>* = nullptr> U make_strategy() { return strategy_t(dist_config, params.capacity_threshold, params.map_size); } template <typename U, std::enable_if_t<std::is_same_v<U, dense_smem_strategy_t>>* = nullptr> U make_strategy() { return strategy_t(dist_config); } template <typename reduce_f, typename accum_f, typename write_f> void compute_dist(reduce_f reduce_func, accum_f accum_func, write_f write_func, bool rev = true) { rmm::device_uvector<value_idx> coo_rows(max(dist_config.b_nnz, dist_config.a_nnz), resource::get_cuda_stream(dist_config.handle)); raft::sparse::convert::csr_to_coo(dist_config.b_indptr, dist_config.b_nrows, coo_rows.data(), dist_config.b_nnz, resource::get_cuda_stream(dist_config.handle)); strategy_t selected_strategy = make_strategy<strategy_t>(); detail::balanced_coo_pairwise_generalized_spmv<value_idx, value_t>(out_dists.data(), dist_config, coo_rows.data(), reduce_func, accum_func, write_func, selected_strategy); if (rev) { raft::sparse::convert::csr_to_coo(dist_config.a_indptr, dist_config.a_nrows, coo_rows.data(), dist_config.a_nnz, resource::get_cuda_stream(dist_config.handle)); detail::balanced_coo_pairwise_generalized_spmv_rev<value_idx, value_t>(out_dists.data(), dist_config, coo_rows.data(), reduce_func, accum_func, write_func, selected_strategy); } } void run_spmv() { switch (params.input_configuration.metric) { case raft::distance::DistanceType::InnerProduct: compute_dist(raft::mul_op(), raft::add_op(), raft::atomic_add_op(), true); break; case raft::distance::DistanceType::L2Unexpanded: compute_dist(raft::sqdiff_op(), raft::add_op(), raft::atomic_add_op()); break; case raft::distance::DistanceType::Canberra: compute_dist( [] __device__(value_t a, value_t b) { return fabsf(a - b) / (fabsf(a) + fabsf(b)); }, raft::add_op(), raft::atomic_add_op()); break; case raft::distance::DistanceType::L1: compute_dist(absdiff_op(), raft::add_op(), raft::atomic_add_op()); break; case raft::distance::DistanceType::Linf: compute_dist(absdiff_op(), raft::max_op(), raft::atomic_max_op()); break; case raft::distance::DistanceType::LpUnexpanded: { compute_dist( raft::compose_op(raft::pow_const_op<value_t>(params.input_configuration.metric_arg), raft::sub_op()), raft::add_op(), raft::atomic_add_op()); value_t p = value_t{1} / params.input_configuration.metric_arg; raft::linalg::unaryOp<value_t>(out_dists.data(), out_dists.data(), dist_config.a_nrows * dist_config.b_nrows, raft::pow_const_op<value_t>{p}, resource::get_cuda_stream(dist_config.handle)); } break; default: throw raft::exception("Unknown distance"); } } protected: void make_data() { std::vector<value_idx> indptr_h = params.input_configuration.indptr_h; std::vector<value_idx> indices_h = params.input_configuration.indices_h; std::vector<value_t> data_h = params.input_configuration.data_h; auto stream = resource::get_cuda_stream(handle); indptr.resize(indptr_h.size(), stream); indices.resize(indices_h.size(), stream); data.resize(data_h.size(), stream); update_device(indptr.data(), indptr_h.data(), indptr_h.size(), stream); update_device(indices.data(), indices_h.data(), indices_h.size(), stream); update_device(data.data(), data_h.data(), data_h.size(), stream); std::vector<value_t> out_dists_ref_h = params.input_configuration.out_dists_ref_h; out_dists_ref.resize((indptr_h.size() - 1) * (indptr_h.size() - 1), stream); update_device(out_dists_ref.data(), out_dists_ref_h.data(), out_dists_ref_h.size(), stream); } void SetUp() override { params = ::testing::TestWithParam< SparseDistanceCOOSPMVInputs<value_idx, value_t, strategy_t>>::GetParam(); make_data(); dist_config.b_nrows = params.input_configuration.indptr_h.size() - 1; dist_config.b_ncols = params.input_configuration.n_cols; dist_config.b_nnz = params.input_configuration.indices_h.size(); dist_config.b_indptr = indptr.data(); dist_config.b_indices = indices.data(); dist_config.b_data = data.data(); dist_config.a_nrows = params.input_configuration.indptr_h.size() - 1; dist_config.a_ncols = params.input_configuration.n_cols; dist_config.a_nnz = params.input_configuration.indices_h.size(); dist_config.a_indptr = indptr.data(); dist_config.a_indices = indices.data(); dist_config.a_data = data.data(); int out_size = dist_config.a_nrows * dist_config.b_nrows; out_dists.resize(out_size, resource::get_cuda_stream(handle)); run_spmv(); RAFT_CUDA_TRY(cudaStreamSynchronize(resource::get_cuda_stream(handle))); } void compare() { ASSERT_TRUE(devArrMatch(out_dists_ref.data(), out_dists.data(), params.input_configuration.out_dists_ref_h.size(), CompareApprox<value_t>(1e-3))); } protected: raft::resources handle; // input data rmm::device_uvector<value_idx> indptr, indices; rmm::device_uvector<value_t> data; // output data rmm::device_uvector<value_t> out_dists, out_dists_ref; raft::sparse::distance::detail::distances_config_t<value_idx, value_t> dist_config; SparseDistanceCOOSPMVInputs<value_idx, value_t, strategy_t> params; }; const InputConfiguration<int, float> input_inner_product = { 2, {0, 2, 4, 6, 8}, {0, 1, 0, 1, 0, 1, 0, 1}, {1.0f, 2.0f, 1.0f, 2.0f, 1.0f, 2.0f, 1.0f, 2.0f}, {5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0}, raft::distance::DistanceType::InnerProduct, 0.0}; const InputConfiguration<int, float> input_l2_unexpanded = { 2, {0, 2, 4, 6, 8}, {0, 1, 0, 1, 0, 1, 0, 1}, // indices {1.0f, 3.0f, 1.0f, 5.0f, 50.0f, 28.0f, 16.0f, 2.0f}, { // dense output 0.0, 4.0, 3026.0, 226.0, 4.0, 0.0, 2930.0, 234.0, 3026.0, 2930.0, 0.0, 1832.0, 226.0, 234.0, 1832.0, 0.0, }, raft::distance::DistanceType::L2Unexpanded, 0.0}; const InputConfiguration<int, float> input_canberra = { 10, {0, 5, 11, 15, 20, 27, 32, 36, 43, 47, 50}, {0, 1, 3, 6, 8, 0, 1, 2, 3, 5, 6, 1, 2, 4, 8, 0, 2, 3, 4, 7, 0, 1, 2, 3, 4, 6, 8, 0, 1, 2, 5, 7, 1, 5, 8, 9, 0, 1, 2, 5, 6, 8, 9, 2, 4, 5, 7, 0, 3, 9}, // indices {0.5438, 0.2695, 0.4377, 0.7174, 0.9251, 0.7648, 0.3322, 0.7279, 0.4131, 0.5167, 0.8655, 0.0730, 0.0291, 0.9036, 0.7988, 0.5019, 0.7663, 0.2190, 0.8206, 0.3625, 0.0411, 0.3995, 0.5688, 0.7028, 0.8706, 0.3199, 0.4431, 0.0535, 0.2225, 0.8853, 0.1932, 0.3761, 0.3379, 0.1771, 0.2107, 0.228, 0.5279, 0.4885, 0.3495, 0.5079, 0.2325, 0.2331, 0.3018, 0.6231, 0.2645, 0.8429, 0.6625, 0.0797, 0.2724, 0.4218}, {0.0, 3.3954660629919076, 5.6469232737388815, 6.373112846266441, 4.0212880272531715, 6.916281504639404, 5.741508386786526, 5.411470999663036, 9.0, 4.977014354725805, 3.3954660629919076, 0.0, 7.56256082439209, 5.540261147481582, 4.832322929216881, 4.62003193872216, 6.498056792320361, 4.309846252268695, 6.317531174829905, 6.016362684141827, 5.6469232737388815, 7.56256082439209, 0.0, 5.974878731322299, 4.898357301336036, 6.442097410320605, 5.227077347287883, 7.134101195584642, 5.457753923371659, 7.0, 6.373112846266441, 5.540261147481582, 5.974878731322299, 0.0, 5.5507273748583, 4.897749658726415, 9.0, 8.398776718824767, 3.908281400328807, 4.83431066343688, 4.0212880272531715, 4.832322929216881, 4.898357301336036, 5.5507273748583, 0.0, 6.632989819428174, 7.438852294822894, 5.6631570310967465, 7.579428202635459, 6.760811985364303, 6.916281504639404, 4.62003193872216, 6.442097410320605, 4.897749658726415, 6.632989819428174, 0.0, 5.249404187382862, 6.072559523278559, 4.07661278488929, 6.19678948003145, 5.741508386786526, 6.498056792320361, 5.227077347287883, 9.0, 7.438852294822894, 5.249404187382862, 0.0, 3.854811639654704, 6.652724827169063, 5.298236851430971, 5.411470999663036, 4.309846252268695, 7.134101195584642, 8.398776718824767, 5.6631570310967465, 6.072559523278559, 3.854811639654704, 0.0, 7.529184598969917, 6.903282911791188, 9.0, 6.317531174829905, 5.457753923371659, 3.908281400328807, 7.579428202635459, 4.07661278488929, 6.652724827169063, 7.529184598969917, 0.0, 7.0, 4.977014354725805, 6.016362684141827, 7.0, 4.83431066343688, 6.760811985364303, 6.19678948003145, 5.298236851430971, 6.903282911791188, 7.0, 0.0}, raft::distance::DistanceType::Canberra, 0.0}; const InputConfiguration<int, float> input_lp_unexpanded = { 10, {0, 5, 11, 15, 20, 27, 32, 36, 43, 47, 50}, {0, 1, 3, 6, 8, 0, 1, 2, 3, 5, 6, 1, 2, 4, 8, 0, 2, 3, 4, 7, 0, 1, 2, 3, 4, 6, 8, 0, 1, 2, 5, 7, 1, 5, 8, 9, 0, 1, 2, 5, 6, 8, 9, 2, 4, 5, 7, 0, 3, 9}, // indices {0.5438, 0.2695, 0.4377, 0.7174, 0.9251, 0.7648, 0.3322, 0.7279, 0.4131, 0.5167, 0.8655, 0.0730, 0.0291, 0.9036, 0.7988, 0.5019, 0.7663, 0.2190, 0.8206, 0.3625, 0.0411, 0.3995, 0.5688, 0.7028, 0.8706, 0.3199, 0.4431, 0.0535, 0.2225, 0.8853, 0.1932, 0.3761, 0.3379, 0.1771, 0.2107, 0.228, 0.5279, 0.4885, 0.3495, 0.5079, 0.2325, 0.2331, 0.3018, 0.6231, 0.2645, 0.8429, 0.6625, 0.0797, 0.2724, 0.4218}, {0.0, 1.31462855332296, 1.3690307816129905, 1.698603990921237, 1.3460470789553531, 1.6636670712582544, 1.2651744044972217, 1.1938329352055201, 1.8811409082590185, 1.3653115050624267, 1.31462855332296, 0.0, 1.9447722703291133, 1.42818777206562, 1.4685491458946494, 1.3071999866010466, 1.4988622861692171, 0.9698559287406783, 1.4972023224597841, 1.5243383567266802, 1.3690307816129905, 1.9447722703291133, 0.0, 1.2748400840107568, 1.0599569946448246, 1.546591282841402, 1.147526531928459, 1.447002179128145, 1.5982242387673176, 1.3112533607072414, 1.698603990921237, 1.42818777206562, 1.2748400840107568, 0.0, 1.038121552545461, 1.011788365364402, 1.3907391109256988, 1.3128200942311496, 1.19595706584447, 1.3233328139624725, 1.3460470789553531, 1.4685491458946494, 1.0599569946448246, 1.038121552545461, 0.0, 1.3642741698145529, 1.3493868683808095, 1.394942694628328, 1.572881849642552, 1.380122665319464, 1.6636670712582544, 1.3071999866010466, 1.546591282841402, 1.011788365364402, 1.3642741698145529, 0.0, 1.018961640373018, 1.0114394258945634, 0.8338711034820684, 1.1247823842299223, 1.2651744044972217, 1.4988622861692171, 1.147526531928459, 1.3907391109256988, 1.3493868683808095, 1.018961640373018, 0.0, 0.7701238110357329, 1.245486437864406, 0.5551259549534626, 1.1938329352055201, 0.9698559287406783, 1.447002179128145, 1.3128200942311496, 1.394942694628328, 1.0114394258945634, 0.7701238110357329, 0.0, 1.1886800117391216, 1.0083692448135637, 1.8811409082590185, 1.4972023224597841, 1.5982242387673176, 1.19595706584447, 1.572881849642552, 0.8338711034820684, 1.245486437864406, 1.1886800117391216, 0.0, 1.3661374102525012, 1.3653115050624267, 1.5243383567266802, 1.3112533607072414, 1.3233328139624725, 1.380122665319464, 1.1247823842299223, 0.5551259549534626, 1.0083692448135637, 1.3661374102525012, 0.0}, raft::distance::DistanceType::LpUnexpanded, 2.0}; const InputConfiguration<int, float> input_linf = { 10, {0, 5, 11, 15, 20, 27, 32, 36, 43, 47, 50}, {0, 1, 3, 6, 8, 0, 1, 2, 3, 5, 6, 1, 2, 4, 8, 0, 2, 3, 4, 7, 0, 1, 2, 3, 4, 6, 8, 0, 1, 2, 5, 7, 1, 5, 8, 9, 0, 1, 2, 5, 6, 8, 9, 2, 4, 5, 7, 0, 3, 9}, // indices {0.5438, 0.2695, 0.4377, 0.7174, 0.9251, 0.7648, 0.3322, 0.7279, 0.4131, 0.5167, 0.8655, 0.0730, 0.0291, 0.9036, 0.7988, 0.5019, 0.7663, 0.2190, 0.8206, 0.3625, 0.0411, 0.3995, 0.5688, 0.7028, 0.8706, 0.3199, 0.4431, 0.0535, 0.2225, 0.8853, 0.1932, 0.3761, 0.3379, 0.1771, 0.2107, 0.228, 0.5279, 0.4885, 0.3495, 0.5079, 0.2325, 0.2331, 0.3018, 0.6231, 0.2645, 0.8429, 0.6625, 0.0797, 0.2724, 0.4218}, {0.0, 0.9251771844789913, 0.9036452083899731, 0.9251771844789913, 0.8706483735804971, 0.9251771844789913, 0.717493881903289, 0.6920214832303888, 0.9251771844789913, 0.9251771844789913, 0.9251771844789913, 0.0, 0.9036452083899731, 0.8655339692155823, 0.8706483735804971, 0.8655339692155823, 0.8655339692155823, 0.6329837991017668, 0.8655339692155823, 0.8655339692155823, 0.9036452083899731, 0.9036452083899731, 0.0, 0.7988276152181608, 0.7028075145996631, 0.9036452083899731, 0.9036452083899731, 0.9036452083899731, 0.8429599432532096, 0.9036452083899731, 0.9251771844789913, 0.8655339692155823, 0.7988276152181608, 0.0, 0.48376552205293305, 0.8206394616536681, 0.8206394616536681, 0.8206394616536681, 0.8429599432532096, 0.8206394616536681, 0.8706483735804971, 0.8706483735804971, 0.7028075145996631, 0.48376552205293305, 0.0, 0.8706483735804971, 0.8706483735804971, 0.8706483735804971, 0.8429599432532096, 0.8706483735804971, 0.9251771844789913, 0.8655339692155823, 0.9036452083899731, 0.8206394616536681, 0.8706483735804971, 0.0, 0.8853924473642432, 0.535821510936138, 0.6497196601457607, 0.8853924473642432, 0.717493881903289, 0.8655339692155823, 0.9036452083899731, 0.8206394616536681, 0.8706483735804971, 0.8853924473642432, 0.0, 0.5279604218147174, 0.6658348373853169, 0.33799874888632914, 0.6920214832303888, 0.6329837991017668, 0.9036452083899731, 0.8206394616536681, 0.8706483735804971, 0.535821510936138, 0.5279604218147174, 0.0, 0.662579808115858, 0.5079750812968089, 0.9251771844789913, 0.8655339692155823, 0.8429599432532096, 0.8429599432532096, 0.8429599432532096, 0.6497196601457607, 0.6658348373853169, 0.662579808115858, 0.0, 0.8429599432532096, 0.9251771844789913, 0.8655339692155823, 0.9036452083899731, 0.8206394616536681, 0.8706483735804971, 0.8853924473642432, 0.33799874888632914, 0.5079750812968089, 0.8429599432532096, 0.0}, raft::distance::DistanceType::Linf, 0.0}; const InputConfiguration<int, float> input_l1 = {4, {0, 1, 1, 2, 4}, {3, 2, 0, 1}, // indices {0.99296, 0.42180, 0.11687, 0.305869}, { // dense output 0.0, 0.99296, 1.41476, 1.415707, 0.99296, 0.0, 0.42180, 0.42274, 1.41476, 0.42180, 0.0, 0.84454, 1.41570, 0.42274, 0.84454, 0.0, }, raft::distance::DistanceType::L1, 0.0}; // test dense smem strategy const std::vector<SparseDistanceCOOSPMVInputs<int, float, dense_smem_strategy_t>> inputs_dense_strategy = {{input_inner_product}, {input_l2_unexpanded}, {input_canberra}, {input_lp_unexpanded}, {input_linf}, {input_l1}}; typedef SparseDistanceCOOSPMVTest<int, float, dense_smem_strategy_t> SparseDistanceCOOSPMVTestDenseStrategyF; TEST_P(SparseDistanceCOOSPMVTestDenseStrategyF, Result) { compare(); } INSTANTIATE_TEST_CASE_P(SparseDistanceCOOSPMVTests, SparseDistanceCOOSPMVTestDenseStrategyF, ::testing::ValuesIn(inputs_dense_strategy)); // test hash and chunk strategy const std::vector<SparseDistanceCOOSPMVInputs<int, float, hash_strategy_t>> inputs_hash_strategy = { {input_inner_product}, {input_inner_product, 0.5, 2}, {input_l2_unexpanded}, {input_l2_unexpanded, 0.5, 2}, {input_canberra}, {input_canberra, 0.5, 2}, {input_canberra, 0.5, 6}, {input_lp_unexpanded}, {input_lp_unexpanded, 0.5, 2}, {input_lp_unexpanded, 0.5, 6}, {input_linf}, {input_linf, 0.5, 2}, {input_linf, 0.5, 6}, {input_l1}, {input_l1, 0.5, 2}}; typedef SparseDistanceCOOSPMVTest<int, float, hash_strategy_t> SparseDistanceCOOSPMVTestHashStrategyF; TEST_P(SparseDistanceCOOSPMVTestHashStrategyF, Result) { compare(); } INSTANTIATE_TEST_CASE_P(SparseDistanceCOOSPMVTests, SparseDistanceCOOSPMVTestHashStrategyF, ::testing::ValuesIn(inputs_hash_strategy)); }; // namespace distance }; // end namespace sparse }; // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/row_op.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/sparse/csr.hpp> #include <raft/sparse/op/row_op.cuh> #include "../test_utils.cuh" #include <raft/util/cudart_utils.hpp> #include <iostream> #include <limits> namespace raft { namespace sparse { template <typename Type_f, typename Index_> struct CSRRowOpInputs { std::vector<Index_> ex_scan; std::vector<Type_f> verify; }; /** Wrapper to call csr_row_op because the enclosing function of a __device__ * lambda cannot have private ot protected access within the class. */ template <typename Type_f, typename Index_> void csr_row_op_wrapper( const Index_* row_ind, Index_ n_rows, Index_ nnz, Type_f* result, cudaStream_t stream) { op::csr_row_op<Index_>( row_ind, n_rows, nnz, [result] __device__(Index_ row, Index_ start_idx, Index_ stop_idx) { for (Index_ i = start_idx; i < stop_idx; i++) result[i] = row; }, stream); } template <typename Type_f, typename Index_> class CSRRowOpTest : public ::testing::TestWithParam<CSRRowOpInputs<Type_f, Index_>> { public: CSRRowOpTest() : params(::testing::TestWithParam<CSRRowOpInputs<Type_f, Index_>>::GetParam()), stream(resource::get_cuda_stream(handle)), verify(params.verify.size(), stream), ex_scan(params.ex_scan.size(), stream), result(params.verify.size(), stream) { } protected: void SetUp() override { n_rows = params.ex_scan.size(); nnz = params.verify.size(); } void Run() { raft::update_device(ex_scan.data(), params.ex_scan.data(), n_rows, stream); raft::update_device(verify.data(), params.verify.data(), nnz, stream); csr_row_op_wrapper<Type_f, Index_>(ex_scan.data(), n_rows, nnz, result.data(), stream); ASSERT_TRUE(raft::devArrMatch<Type_f>( verify.data(), result.data(), nnz, raft::Compare<Type_f>(), stream)); } protected: raft::resources handle; cudaStream_t stream; CSRRowOpInputs<Type_f, Index_> params; Index_ n_rows, nnz; rmm::device_uvector<Index_> ex_scan; rmm::device_uvector<Type_f> result, verify; }; using CSRRowOpTestF = CSRRowOpTest<float, int>; TEST_P(CSRRowOpTestF, Result) { Run(); } using CSRRowOpTestD = CSRRowOpTest<double, int>; TEST_P(CSRRowOpTestD, Result) { Run(); } const std::vector<CSRRowOpInputs<float, int>> csrrowop_inputs_f = { {{0, 4, 8, 9}, {0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 2.0, 3.0}}, }; const std::vector<CSRRowOpInputs<double, int>> csrrowop_inputs_d = { {{0, 4, 8, 9}, {0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 2.0, 3.0}}, }; INSTANTIATE_TEST_CASE_P(SparseRowOpTest, CSRRowOpTestF, ::testing::ValuesIn(csrrowop_inputs_f)); INSTANTIATE_TEST_CASE_P(SparseRowOpTest, CSRRowOpTestD, ::testing::ValuesIn(csrrowop_inputs_d)); } // namespace sparse } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/add.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/sparse/csr.hpp> #include <raft/sparse/linalg/add.cuh> #include "../test_utils.cuh" #include <raft/util/cudart_utils.hpp> #include <iostream> #include <limits> namespace raft { namespace sparse { template <typename Type_f, typename Index_> struct CSRMatrixVal { std::vector<Index_> row_ind; std::vector<Index_> row_ind_ptr; std::vector<Type_f> values; }; template <typename Type_f, typename Index_> struct CSRAddInputs { CSRMatrixVal<Type_f, Index_> matrix_a; CSRMatrixVal<Type_f, Index_> matrix_b; CSRMatrixVal<Type_f, Index_> matrix_verify; }; template <typename Type_f, typename Index_> class CSRAddTest : public ::testing::TestWithParam<CSRAddInputs<Type_f, Index_>> { public: CSRAddTest() : params(::testing::TestWithParam<CSRAddInputs<Type_f, Index_>>::GetParam()), stream(resource::get_cuda_stream(handle)), ind_a(params.matrix_a.row_ind.size(), stream), ind_ptr_a(params.matrix_a.row_ind_ptr.size(), stream), values_a(params.matrix_a.row_ind_ptr.size(), stream), ind_b(params.matrix_a.row_ind.size(), stream), ind_ptr_b(params.matrix_b.row_ind_ptr.size(), stream), values_b(params.matrix_b.row_ind_ptr.size(), stream), ind_verify(params.matrix_a.row_ind.size(), stream), ind_ptr_verify(params.matrix_verify.row_ind_ptr.size(), stream), values_verify(params.matrix_verify.row_ind_ptr.size(), stream), ind_result(params.matrix_a.row_ind.size(), stream), ind_ptr_result(params.matrix_verify.row_ind_ptr.size(), stream), values_result(params.matrix_verify.row_ind_ptr.size(), stream) { } protected: void SetUp() override { n_rows = params.matrix_a.row_ind.size(); nnz_a = params.matrix_a.row_ind_ptr.size(); nnz_b = params.matrix_b.row_ind_ptr.size(); nnz_result = params.matrix_verify.row_ind_ptr.size(); } void Run() { raft::update_device(ind_a.data(), params.matrix_a.row_ind.data(), n_rows, stream); raft::update_device(ind_ptr_a.data(), params.matrix_a.row_ind_ptr.data(), nnz_a, stream); raft::update_device(values_a.data(), params.matrix_a.values.data(), nnz_a, stream); raft::update_device(ind_b.data(), params.matrix_b.row_ind.data(), n_rows, stream); raft::update_device(ind_ptr_b.data(), params.matrix_b.row_ind_ptr.data(), nnz_b, stream); raft::update_device(values_b.data(), params.matrix_b.values.data(), nnz_b, stream); raft::update_device(ind_verify.data(), params.matrix_verify.row_ind.data(), n_rows, stream); raft::update_device( ind_ptr_verify.data(), params.matrix_verify.row_ind_ptr.data(), nnz_result, stream); raft::update_device( values_verify.data(), params.matrix_verify.values.data(), nnz_result, stream); Index_ nnz = linalg::csr_add_calc_inds<Type_f>(ind_a.data(), ind_ptr_a.data(), values_a.data(), nnz_a, ind_b.data(), ind_ptr_b.data(), values_b.data(), nnz_b, n_rows, ind_result.data(), stream); ASSERT_TRUE(nnz == nnz_result); ASSERT_TRUE(raft::devArrMatch<Index_>( ind_verify.data(), ind_result.data(), n_rows, raft::Compare<Index_>(), stream)); linalg::csr_add_finalize<Type_f>(ind_a.data(), ind_ptr_a.data(), values_a.data(), nnz_a, ind_b.data(), ind_ptr_b.data(), values_b.data(), nnz_b, n_rows, ind_result.data(), ind_ptr_result.data(), values_result.data(), stream); ASSERT_TRUE(raft::devArrMatch<Index_>( ind_ptr_verify.data(), ind_ptr_result.data(), nnz, raft::Compare<Index_>(), stream)); ASSERT_TRUE(raft::devArrMatch<Type_f>( values_verify.data(), values_result.data(), nnz, raft::Compare<Type_f>(), stream)); } protected: raft::resources handle; cudaStream_t stream; CSRAddInputs<Type_f, Index_> params; Index_ n_rows, nnz_a, nnz_b, nnz_result; rmm::device_uvector<Index_> ind_a, ind_b, ind_verify, ind_result, ind_ptr_a, ind_ptr_b, ind_ptr_verify, ind_ptr_result; rmm::device_uvector<Type_f> values_a, values_b, values_verify, values_result; }; using CSRAddTestF = CSRAddTest<float, int>; TEST_P(CSRAddTestF, Result) { Run(); } using CSRAddTestD = CSRAddTest<double, int>; TEST_P(CSRAddTestD, Result) { Run(); } const std::vector<CSRAddInputs<float, int>> csradd_inputs_f = { {{{0, 4, 8, 9}, {1, 2, 3, 4, 1, 2, 3, 5, 0, 1}, {1.0, 1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, 1.0, 1.0}}, {{0, 4, 8, 9}, {1, 2, 5, 4, 0, 2, 3, 5, 1, 0}, {1.0, 1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, 1.0, 1.0}}, {{0, 5, 10, 12}, {1, 2, 3, 4, 5, 1, 2, 3, 5, 0, 0, 1, 1, 0}, {2.0, 2.0, 0.5, 1.0, 0.5, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}}}, }; const std::vector<CSRAddInputs<double, int>> csradd_inputs_d = { {{{0, 4, 8, 9}, {1, 2, 3, 4, 1, 2, 3, 5, 0, 1}, {1.0, 1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, 1.0, 1.0}}, {{0, 4, 8, 9}, {1, 2, 5, 4, 0, 2, 3, 5, 1, 0}, {1.0, 1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, 1.0, 1.0}}, {{0, 5, 10, 12}, {1, 2, 3, 4, 5, 1, 2, 3, 5, 0, 0, 1, 1, 0}, {2.0, 2.0, 0.5, 1.0, 0.5, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}}}, }; INSTANTIATE_TEST_CASE_P(SparseAddTest, CSRAddTestF, ::testing::ValuesIn(csradd_inputs_f)); INSTANTIATE_TEST_CASE_P(SparseAddTest, CSRAddTestD, ::testing::ValuesIn(csradd_inputs_d)); } // namespace sparse } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/spectral_matrix.cu
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <iostream> #include <memory> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/device_id.hpp> #include <raft/core/resources.hpp> #include <raft/spectral/matrix_wrappers.hpp> namespace raft { namespace spectral { namespace matrix { namespace { template <typename index_type, typename value_type> struct csr_view_t { index_type* offsets; index_type* indices; value_type* edge_data; index_type number_of_vertices; index_type number_of_edges; }; } // namespace TEST(Raft, SpectralMatrices) { using index_type = int; using value_type = double; raft::resources h; ASSERT_EQ(0, raft::resource::get_device_id(h)); csr_view_t<index_type, value_type> csr_v{nullptr, nullptr, nullptr, 0, 0}; int const sz = 10; vector_t<index_type> d_v{h, sz}; index_type* ro{nullptr}; index_type* ci{nullptr}; value_type* vs{nullptr}; index_type nnz = 0; index_type nrows = 0; sparse_matrix_t<index_type, value_type> sm1{h, ro, ci, vs, nrows, nnz}; sparse_matrix_t<index_type, value_type> sm2{h, csr_v}; ASSERT_EQ(nullptr, sm1.row_offsets_); ASSERT_EQ(nullptr, sm2.row_offsets_); auto stream = resource::get_cuda_stream(h); auto cnstr_lm1 = [&h, ro, ci, vs, nrows, nnz](void) { laplacian_matrix_t<index_type, value_type> lm1{h, ro, ci, vs, nrows, nnz}; }; EXPECT_ANY_THROW(cnstr_lm1()); // because of nullptr ptr args auto cnstr_lm2 = [&h, &sm2](void) { laplacian_matrix_t<index_type, value_type> lm2{h, sm2}; }; EXPECT_ANY_THROW(cnstr_lm2()); // because of nullptr ptr args auto cnstr_mm1 = [&h, ro, ci, vs, nrows, nnz](void) { modularity_matrix_t<index_type, value_type> mm1{h, ro, ci, vs, nrows, nnz}; }; EXPECT_ANY_THROW(cnstr_mm1()); // because of nullptr ptr args auto cnstr_mm2 = [&h, &sm2](void) { modularity_matrix_t<index_type, value_type> mm2{h, sm2}; }; EXPECT_ANY_THROW(cnstr_mm2()); // because of nullptr ptr args } } // namespace matrix } // namespace spectral } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/filter.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/util/cudart_utils.hpp> #include <raft/random/rng.cuh> #include <raft/sparse/coo.hpp> #include <raft/sparse/op/filter.cuh> #include <raft/sparse/op/sort.cuh> #include <iostream> namespace raft { namespace sparse { template <typename T> struct SparseFilterInputs { int m, n, nnz; unsigned long long int seed; }; template <typename T> class SparseFilterTests : public ::testing::TestWithParam<SparseFilterInputs<T>> { protected: void SetUp() override {} void TearDown() override {} protected: SparseFilterInputs<T> params; }; const std::vector<SparseFilterInputs<float>> inputsf = {{5, 10, 5, 1234ULL}}; typedef SparseFilterTests<float> COORemoveZeros; TEST_P(COORemoveZeros, Result) { raft::resources h; auto stream = resource::get_cuda_stream(h); params = ::testing::TestWithParam<SparseFilterInputs<float>>::GetParam(); float* in_h_vals = new float[params.nnz]; COO<float> in(stream, params.nnz, 5, 5); raft::random::RngState r(params.seed); uniform(h, r, in.vals(), params.nnz, float(-1.0), float(1.0)); raft::update_host(in_h_vals, in.vals(), params.nnz, stream); in_h_vals[0] = 0; in_h_vals[2] = 0; in_h_vals[3] = 0; int* in_h_rows = new int[params.nnz]; int* in_h_cols = new int[params.nnz]; for (int i = 0; i < params.nnz; i++) { in_h_rows[i] = params.nnz - i - 1; in_h_cols[i] = i; } raft::update_device(in.rows(), in_h_rows, params.nnz, stream); raft::update_device(in.cols(), in_h_cols, params.nnz, stream); raft::update_device(in.vals(), in_h_vals, params.nnz, stream); op::coo_sort<float>(&in, stream); int out_rows_ref_h[2] = {0, 3}; int out_cols_ref_h[2] = {4, 1}; float* out_vals_ref_h = (float*)malloc(2 * sizeof(float)); out_vals_ref_h[0] = in_h_vals[4]; out_vals_ref_h[1] = in_h_vals[1]; COO<float> out_ref(stream, 2, 5, 5); COO<float> out(stream); raft::update_device(out_ref.rows(), *&out_rows_ref_h, 2, stream); raft::update_device(out_ref.cols(), *&out_cols_ref_h, 2, stream); raft::update_device(out_ref.vals(), out_vals_ref_h, 2, stream); op::coo_remove_zeros<float>(&in, &out, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); ASSERT_TRUE(raft::devArrMatch<int>(out_ref.rows(), out.rows(), 2, raft::Compare<int>())); ASSERT_TRUE(raft::devArrMatch<int>(out_ref.cols(), out.cols(), 2, raft::Compare<int>())); ASSERT_TRUE(raft::devArrMatch<float>(out_ref.vals(), out.vals(), 2, raft::Compare<float>())); free(out_vals_ref_h); delete[] in_h_rows; delete[] in_h_cols; delete[] in_h_vals; } INSTANTIATE_TEST_CASE_P(SparseFilterTests, COORemoveZeros, ::testing::ValuesIn(inputsf)); } // namespace sparse } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/normalize.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include "../test_utils.cuh" #include <raft/core/resources.hpp> #include <raft/sparse/csr.hpp> #include <raft/sparse/linalg/norm.cuh> #include <raft/util/cudart_utils.hpp> #include <iostream> #include <limits> namespace raft { namespace sparse { enum NormalizeMethod { MAX, L1 }; template <typename Type_f, typename Index_> struct CSRRowNormalizeInputs { NormalizeMethod method; std::vector<Index_> ex_scan; std::vector<Type_f> in_vals; std::vector<Type_f> verify; }; template <typename Type_f, typename Index_> class CSRRowNormalizeTest : public ::testing::TestWithParam<CSRRowNormalizeInputs<Type_f, Index_>> { public: CSRRowNormalizeTest() : params(::testing::TestWithParam<CSRRowNormalizeInputs<Type_f, Index_>>::GetParam()), stream(resource::get_cuda_stream(handle)), in_vals(params.in_vals.size(), stream), verify(params.verify.size(), stream), ex_scan(params.ex_scan.size(), stream), result(params.verify.size(), stream) { } protected: void SetUp() override {} void Run() { Index_ n_rows = params.ex_scan.size(); Index_ nnz = params.in_vals.size(); raft::update_device(ex_scan.data(), params.ex_scan.data(), n_rows, stream); raft::update_device(in_vals.data(), params.in_vals.data(), nnz, stream); raft::update_device(verify.data(), params.verify.data(), nnz, stream); switch (params.method) { case MAX: linalg::csr_row_normalize_max<Type_f>( ex_scan.data(), in_vals.data(), nnz, n_rows, result.data(), stream); break; case L1: linalg::csr_row_normalize_l1<Type_f>( ex_scan.data(), in_vals.data(), nnz, n_rows, result.data(), stream); break; } RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); ASSERT_TRUE( raft::devArrMatch<Type_f>(verify.data(), result.data(), nnz, raft::Compare<Type_f>())); } protected: raft::resources handle; cudaStream_t stream; CSRRowNormalizeInputs<Type_f, Index_> params; rmm::device_uvector<Index_> ex_scan; rmm::device_uvector<Type_f> in_vals, result, verify; }; using CSRRowNormalizeTestF = CSRRowNormalizeTest<float, int>; TEST_P(CSRRowNormalizeTestF, Result) { Run(); } using CSRRowNormalizeTestD = CSRRowNormalizeTest<double, int>; TEST_P(CSRRowNormalizeTestD, Result) { Run(); } const std::vector<CSRRowNormalizeInputs<float, int>> csrnormalize_inputs_f = { {MAX, {0, 4, 8, 9}, {5.0, 1.0, 0.0, 0.0, 10.0, 1.0, 0.0, 0.0, 1.0, 0.0}, {1.0, 0.2, 0.0, 0.0, 1.0, 0.1, 0.0, 0.0, 1, 0.0}}, {L1, {0, 4, 8, 9}, {1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0}, {0.5, 0.5, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 1, 0.0}}, }; const std::vector<CSRRowNormalizeInputs<double, int>> csrnormalize_inputs_d = { {MAX, {0, 4, 8, 9}, {5.0, 1.0, 0.0, 0.0, 10.0, 1.0, 0.0, 0.0, 1.0, 0.0}, {1.0, 0.2, 0.0, 0.0, 1.0, 0.1, 0.0, 0.0, 1, 0.0}}, {L1, {0, 4, 8, 9}, {1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0}, {0.5, 0.5, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 1, 0.0}}, }; INSTANTIATE_TEST_CASE_P(SparseNormTest, CSRRowNormalizeTestF, ::testing::ValuesIn(csrnormalize_inputs_f)); INSTANTIATE_TEST_CASE_P(SparseNormTest, CSRRowNormalizeTestD, ::testing::ValuesIn(csrnormalize_inputs_d)); } // namespace sparse } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/mst.cu
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <bits/stdc++.h> #include <raft/core/resource/cuda_stream.hpp> #include "../test_utils.cuh" #include <gtest/gtest.h> #include <iostream> #include <rmm/device_uvector.hpp> #include <vector> #include <raft/core/resources.hpp> #include <raft/sparse/mst/mst.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_buffer.hpp> #include <gtest/gtest.h> #include <bits/stdc++.h> #include <thrust/execution_policy.h> #include <thrust/memory.h> #include <thrust/reduce.h> #include <cstddef> #include <vector> template <typename vertex_t, typename edge_t, typename weight_t> struct CSRHost { std::vector<edge_t> offsets; std::vector<vertex_t> indices; std::vector<weight_t> weights; }; template <typename vertex_t, typename edge_t, typename weight_t> struct MSTTestInput { struct CSRHost<vertex_t, edge_t, weight_t> csr_h; int iterations; }; template <typename vertex_t, typename edge_t, typename weight_t> struct CSRDevice { rmm::device_buffer offsets; rmm::device_buffer indices; rmm::device_buffer weights; }; namespace raft { namespace mst { // Sequential prims function // Returns total weight of MST template <typename vertex_t, typename edge_t, typename weight_t> weight_t prims(CSRHost<vertex_t, edge_t, weight_t>& csr_h) { std::size_t n_vertices = csr_h.offsets.size() - 1; bool active_vertex[n_vertices]; // bool mst_set[csr_h.n_edges]; weight_t curr_edge[n_vertices]; for (std::size_t i = 0; i < n_vertices; i++) { active_vertex[i] = false; curr_edge[i] = static_cast<weight_t>(std::numeric_limits<int>::max()); } curr_edge[0] = 0; // function to pick next min vertex-edge auto min_vertex_edge = [](auto* curr_edge, auto* active_vertex, auto n_vertices) { auto min = static_cast<weight_t>(std::numeric_limits<int>::max()); vertex_t min_vertex{}; for (std::size_t v = 0; v < n_vertices; v++) { if (!active_vertex[v] && curr_edge[v] < min) { min = curr_edge[v]; min_vertex = v; } } return min_vertex; }; // iterate over n vertices for (std::size_t v = 0; v < n_vertices - 1; v++) { // pick min vertex-edge auto curr_v = min_vertex_edge(curr_edge, active_vertex, n_vertices); active_vertex[curr_v] = true; // set to active // iterate through edges of current active vertex auto edge_st = csr_h.offsets[curr_v]; auto edge_end = csr_h.offsets[curr_v + 1]; for (auto e = edge_st; e < edge_end; e++) { // put edges to be considered for next iteration auto neighbor_idx = csr_h.indices[e]; if (!active_vertex[neighbor_idx] && csr_h.weights[e] < curr_edge[neighbor_idx]) { curr_edge[neighbor_idx] = csr_h.weights[e]; } } } // find sum of MST weight_t total_weight = 0; for (std::size_t v = 1; v < n_vertices; v++) { total_weight += curr_edge[v]; } return total_weight; } template <typename vertex_t, typename edge_t, typename weight_t> class MSTTest : public ::testing::TestWithParam<MSTTestInput<vertex_t, edge_t, weight_t>> { protected: std::pair<raft::Graph_COO<vertex_t, edge_t, weight_t>, raft::Graph_COO<vertex_t, edge_t, weight_t>> mst_gpu() { edge_t* offsets = static_cast<edge_t*>(csr_d.offsets.data()); vertex_t* indices = static_cast<vertex_t*>(csr_d.indices.data()); weight_t* weights = static_cast<weight_t*>(csr_d.weights.data()); v = static_cast<vertex_t>((csr_d.offsets.size() / sizeof(vertex_t)) - 1); e = static_cast<edge_t>(csr_d.indices.size() / sizeof(edge_t)); rmm::device_uvector<vertex_t> mst_src(2 * v - 2, resource::get_cuda_stream(handle)); rmm::device_uvector<vertex_t> mst_dst(2 * v - 2, resource::get_cuda_stream(handle)); rmm::device_uvector<vertex_t> color(v, resource::get_cuda_stream(handle)); RAFT_CUDA_TRY(cudaMemsetAsync(mst_src.data(), std::numeric_limits<vertex_t>::max(), mst_src.size() * sizeof(vertex_t), resource::get_cuda_stream(handle))); RAFT_CUDA_TRY(cudaMemsetAsync(mst_dst.data(), std::numeric_limits<vertex_t>::max(), mst_dst.size() * sizeof(vertex_t), resource::get_cuda_stream(handle))); RAFT_CUDA_TRY(cudaMemsetAsync( color.data(), 0, color.size() * sizeof(vertex_t), resource::get_cuda_stream(handle))); vertex_t* color_ptr = thrust::raw_pointer_cast(color.data()); if (iterations == 0) { MST_solver<vertex_t, edge_t, weight_t, float> symmetric_solver( handle, offsets, indices, weights, v, e, color_ptr, resource::get_cuda_stream(handle), true, true, 0); auto symmetric_result = symmetric_solver.solve(); MST_solver<vertex_t, edge_t, weight_t, float> non_symmetric_solver( handle, offsets, indices, weights, v, e, color_ptr, resource::get_cuda_stream(handle), false, true, 0); auto non_symmetric_result = non_symmetric_solver.solve(); EXPECT_LE(symmetric_result.n_edges, 2 * v - 2); EXPECT_LE(non_symmetric_result.n_edges, v - 1); return std::make_pair(std::move(symmetric_result), std::move(non_symmetric_result)); } else { MST_solver<vertex_t, edge_t, weight_t, float> intermediate_solver( handle, offsets, indices, weights, v, e, color_ptr, resource::get_cuda_stream(handle), true, true, iterations); auto intermediate_result = intermediate_solver.solve(); MST_solver<vertex_t, edge_t, weight_t, float> symmetric_solver( handle, offsets, indices, weights, v, e, color_ptr, resource::get_cuda_stream(handle), true, false, 0); auto symmetric_result = symmetric_solver.solve(); // symmetric_result.n_edges += intermediate_result.n_edges; auto total_edge_size = symmetric_result.n_edges + intermediate_result.n_edges; symmetric_result.src.resize(total_edge_size, resource::get_cuda_stream(handle)); symmetric_result.dst.resize(total_edge_size, resource::get_cuda_stream(handle)); symmetric_result.weights.resize(total_edge_size, resource::get_cuda_stream(handle)); raft::copy(symmetric_result.src.data() + symmetric_result.n_edges, intermediate_result.src.data(), intermediate_result.n_edges, resource::get_cuda_stream(handle)); raft::copy(symmetric_result.dst.data() + symmetric_result.n_edges, intermediate_result.dst.data(), intermediate_result.n_edges, resource::get_cuda_stream(handle)); raft::copy(symmetric_result.weights.data() + symmetric_result.n_edges, intermediate_result.weights.data(), intermediate_result.n_edges, resource::get_cuda_stream(handle)); symmetric_result.n_edges = total_edge_size; MST_solver<vertex_t, edge_t, weight_t, float> non_symmetric_solver( handle, offsets, indices, weights, v, e, color_ptr, resource::get_cuda_stream(handle), false, true, 0); auto non_symmetric_result = non_symmetric_solver.solve(); EXPECT_LE(symmetric_result.n_edges, 2 * v - 2); EXPECT_LE(non_symmetric_result.n_edges, v - 1); return std::make_pair(std::move(symmetric_result), std::move(non_symmetric_result)); } } void SetUp() override { mst_input = ::testing::TestWithParam<MSTTestInput<vertex_t, edge_t, weight_t>>::GetParam(); iterations = mst_input.iterations; csr_d.offsets = rmm::device_buffer(mst_input.csr_h.offsets.data(), mst_input.csr_h.offsets.size() * sizeof(edge_t), resource::get_cuda_stream(handle)); csr_d.indices = rmm::device_buffer(mst_input.csr_h.indices.data(), mst_input.csr_h.indices.size() * sizeof(vertex_t), resource::get_cuda_stream(handle)); csr_d.weights = rmm::device_buffer(mst_input.csr_h.weights.data(), mst_input.csr_h.weights.size() * sizeof(weight_t), resource::get_cuda_stream(handle)); } void TearDown() override {} protected: MSTTestInput<vertex_t, edge_t, weight_t> mst_input; CSRDevice<vertex_t, edge_t, weight_t> csr_d; vertex_t v; edge_t e; int iterations; raft::resources handle; }; // connected components tests // a full MST is produced const std::vector<MSTTestInput<int, int, float>> csr_in_h = { // single iteration {{{0, 3, 5, 7, 8}, {1, 2, 3, 0, 3, 0, 0, 1}, {2, 3, 4, 2, 1, 3, 4, 1}}, 0}, // multiple iterations and cycles {{{0, 4, 6, 9, 12, 15, 17, 20}, {2, 4, 5, 6, 3, 6, 0, 4, 5, 1, 4, 6, 0, 2, 3, 0, 2, 0, 1, 3}, {5.0f, 9.0f, 1.0f, 4.0f, 8.0f, 7.0f, 5.0f, 2.0f, 6.0f, 8.0f, 1.0f, 10.0f, 9.0f, 2.0f, 1.0f, 1.0f, 6.0f, 4.0f, 7.0f, 10.0f}}, 1}, // negative weights {{{0, 4, 6, 9, 12, 15, 17, 20}, {2, 4, 5, 6, 3, 6, 0, 4, 5, 1, 4, 6, 0, 2, 3, 0, 2, 0, 1, 3}, {-5.0f, -9.0f, -1.0f, 4.0f, -8.0f, -7.0f, -5.0f, -2.0f, -6.0f, -8.0f, -1.0f, -10.0f, -9.0f, -2.0f, -1.0f, -1.0f, -6.0f, 4.0f, -7.0f, -10.0f}}, 0}, // // equal weights {{{0, 4, 6, 9, 12, 15, 17, 20}, {2, 4, 5, 6, 3, 6, 0, 4, 5, 1, 4, 6, 0, 2, 3, 0, 2, 0, 1, 3}, {0.1, 0.1, 0.1, 0.1, 0.2, 0.2, 0.1, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1, 0.2, 0.1, 0.1, 0.2, 0.1, 0.2, 0.1}}, 0}, // //self loop {{{0, 4, 6, 9, 12, 15, 17, 20}, {0, 4, 5, 6, 3, 6, 2, 4, 5, 1, 4, 6, 0, 2, 3, 0, 2, 0, 1, 3}, {0.5f, 9.0f, 1.0f, 4.0f, 8.0f, 7.0f, 0.5f, 2.0f, 6.0f, 8.0f, 1.0f, 10.0f, 9.0f, 2.0f, 1.0f, 1.0f, 6.0f, 4.0f, 7.0f, 10.0f}}, 0}}; // disconnected const std::vector<CSRHost<int, int, float>> csr_in4_h = { {{0, 3, 5, 8, 10, 12, 14, 16}, {2, 4, 5, 3, 6, 0, 4, 5, 1, 6, 0, 2, 0, 2, 1, 3}, {5.0f, 9.0f, 1.0f, 8.0f, 7.0f, 5.0f, 2.0f, 6.0f, 8.0f, 10.0f, 9.0f, 2.0f, 1.0f, 6.0f, 7.0f, 10.0f}}}; // singletons const std::vector<CSRHost<int, int, float>> csr_in5_h = { {{0, 3, 5, 8, 10, 10, 10, 12, 14, 16, 16}, {2, 8, 7, 3, 8, 0, 8, 7, 1, 8, 0, 2, 0, 2, 1, 3}, {5.0f, 9.0f, 1.0f, 8.0f, 7.0f, 5.0f, 2.0f, 6.0f, 8.0f, 10.0f, 9.0f, 2.0f, 1.0f, 6.0f, 7.0f, 10.0f}}}; typedef MSTTest<int, int, float> MSTTestSequential; TEST_P(MSTTestSequential, Sequential) { auto results_pair = mst_gpu(); auto& symmetric_result = results_pair.first; auto& non_symmetric_result = results_pair.second; // do assertions here // in this case, running sequential MST auto prims_result = prims(mst_input.csr_h); auto symmetric_sum = thrust::reduce(thrust::device, symmetric_result.weights.data(), symmetric_result.weights.data() + symmetric_result.n_edges); auto non_symmetric_sum = thrust::reduce(thrust::device, non_symmetric_result.weights.data(), non_symmetric_result.weights.data() + non_symmetric_result.n_edges); ASSERT_TRUE(raft::match(2 * prims_result, symmetric_sum, raft::CompareApprox<float>(0.1))); ASSERT_TRUE(raft::match(prims_result, non_symmetric_sum, raft::CompareApprox<float>(0.1))); } INSTANTIATE_TEST_SUITE_P(MSTTests, MSTTestSequential, ::testing::ValuesIn(csr_in_h)); } // namespace mst } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/spgemmi.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/cusparse_handle.hpp> #include "../test_utils.cuh" #include <raft/core/resources.hpp> #include <raft/linalg/transpose.cuh> #include <raft/sparse/detail/cusparse_wrappers.h> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <iostream> #include <limits> namespace raft { namespace sparse { struct SPGemmiInputs { int n_rows, n_cols; }; template <typename data_t> class SPGemmiTest : public ::testing::TestWithParam<SPGemmiInputs> { public: SPGemmiTest() : params(::testing::TestWithParam<SPGemmiInputs>::GetParam()), stream(resource::get_cuda_stream(handle)) { } protected: void SetUp() override {} void Run() { // Host problem definition float alpha = 1.0f; float beta = 0.0f; int A_num_rows = 5; int A_num_cols = 3; // int B_num_rows = A_num_cols; int B_num_cols = 4; int B_nnz = 9; int lda = A_num_rows; int ldc = A_num_rows; int A_size = lda * A_num_cols; int C_size = ldc * B_num_cols; int hB_cscOffsets[] = {0, 3, 4, 7, 9}; int hB_rows[] = {0, 2, 3, 1, 0, 2, 3, 1, 3}; float hB_values[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f}; float hA[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f}; std::vector<float> hC(C_size); std::vector<float> hC_expected{23, 26, 29, 32, 35, 24, 28, 32, 36, 40, 71, 82, 93, 104, 115, 48, 56, 64, 72, 80}; //-------------------------------------------------------------------------- // Device memory management rmm::device_uvector<int> dB_cscOffsets(B_num_cols + 1, stream); rmm::device_uvector<int> dB_rows(B_nnz, stream); rmm::device_uvector<float> dB_values(B_nnz, stream); rmm::device_uvector<float> dA(A_size, stream); rmm::device_uvector<float> dC(C_size, stream); rmm::device_uvector<float> dCT(C_size, stream); raft::update_device(dB_cscOffsets.data(), hB_cscOffsets, B_num_cols + 1, stream); raft::update_device(dB_rows.data(), hB_rows, B_nnz, stream); raft::update_device(dB_values.data(), hB_values, B_nnz, stream); raft::update_device(dA.data(), hA, A_size, stream); raft::update_device(dC.data(), hC.data(), C_size, stream); //-------------------------------------------------------------------------- // execute gemmi RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsegemmi(resource::get_cusparse_handle(handle), A_num_rows, B_num_cols, A_num_cols, B_nnz, &alpha, dA.data(), lda, dB_values.data(), dB_cscOffsets.data(), dB_rows.data(), &beta, dC.data(), ldc, resource::get_cuda_stream(handle))); //-------------------------------------------------------------------------- // result check raft::update_host(hC.data(), dC.data(), C_size, stream); ASSERT_TRUE(hostVecMatch(hC_expected, hC, raft::Compare<float>())); } protected: raft::resources handle; cudaStream_t stream; SPGemmiInputs params; }; using SPGemmiTestF = SPGemmiTest<float>; TEST_P(SPGemmiTestF, Result) { Run(); } using SPGemmiTestD = SPGemmiTest<double>; TEST_P(SPGemmiTestD, Result) { Run(); } const std::vector<SPGemmiInputs> csc_inputs_f = {{5, 4}}; const std::vector<SPGemmiInputs> csc_inputs_d = {{5, 4}}; INSTANTIATE_TEST_CASE_P(SparseGemmi, SPGemmiTestF, ::testing::ValuesIn(csc_inputs_f)); INSTANTIATE_TEST_CASE_P(SparseGemmi, SPGemmiTestD, ::testing::ValuesIn(csc_inputs_d)); } // namespace sparse } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/csr_row_slice.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cusparse_v2.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/util/cudart_utils.hpp> #include <gtest/gtest.h> #include <raft/sparse/detail/cusparse_wrappers.h> #include <raft/sparse/op/slice.cuh> #include <rmm/device_uvector.hpp> #include "../test_utils.cuh" namespace raft { namespace sparse { using namespace raft; using namespace raft::sparse; template <typename value_idx, typename value_t> struct CSRRowSliceInputs { value_idx start_row; value_idx stop_row; std::vector<value_idx> indptr_h; std::vector<value_idx> indices_h; std::vector<value_t> data_h; std::vector<value_idx> out_indptr_ref_h; std::vector<value_idx> out_indices_ref_h; std::vector<value_t> out_data_ref_h; }; template <typename value_idx, typename value_t> ::std::ostream& operator<<(::std::ostream& os, const CSRRowSliceInputs<value_idx, value_t>& dims) { return os; } template <typename value_idx, typename value_t> class CSRRowSliceTest : public ::testing::TestWithParam<CSRRowSliceInputs<value_idx, value_t>> { public: CSRRowSliceTest() : params(::testing::TestWithParam<CSRRowSliceInputs<value_idx, value_t>>::GetParam()), stream(resource::get_cuda_stream(handle)), indptr(0, stream), indices(0, stream), data(0, stream), out_indptr_ref(0, stream), out_indices_ref(0, stream), out_data_ref(0, stream), out_indptr(0, stream), out_indices(0, stream), out_data(0, stream) { indptr.resize(params.indptr_h.size(), stream); indices.resize(params.indices_h.size(), stream); data.resize(params.data_h.size(), stream); out_indptr_ref.resize(params.out_indptr_ref_h.size(), stream); out_indices_ref.resize(params.out_indices_ref_h.size(), stream); out_data_ref.resize(params.out_data_ref_h.size(), stream); out_indptr.resize(params.out_indptr_ref_h.size(), stream); out_indices.resize(params.out_indices_ref_h.size(), stream); out_data.resize(params.out_data_ref_h.size(), stream); } protected: void make_data() { std::vector<value_idx> indptr_h = params.indptr_h; std::vector<value_idx> indices_h = params.indices_h; std::vector<value_t> data_h = params.data_h; update_device(indptr.data(), indptr_h.data(), indptr_h.size(), stream); update_device(indices.data(), indices_h.data(), indices_h.size(), stream); update_device(data.data(), data_h.data(), data_h.size(), stream); std::vector<value_idx> out_indptr_ref_h = params.out_indptr_ref_h; std::vector<value_idx> out_indices_ref_h = params.out_indices_ref_h; std::vector<value_t> out_data_ref_h = params.out_data_ref_h; update_device(out_indptr_ref.data(), out_indptr_ref_h.data(), out_indptr_ref_h.size(), stream); update_device( out_indices_ref.data(), out_indices_ref_h.data(), out_indices_ref_h.size(), stream); update_device(out_data_ref.data(), out_data_ref_h.data(), out_data_ref_h.size(), stream); resource::sync_stream(handle, stream); } void SetUp() override { make_data(); int csr_start_offset; int csr_stop_offset; raft::sparse::op::csr_row_slice_indptr(params.start_row, params.stop_row, indptr.data(), out_indptr.data(), &csr_start_offset, &csr_stop_offset, stream); raft::sparse::op::csr_row_slice_populate(csr_start_offset, csr_stop_offset, indices.data(), data.data(), out_indices.data(), out_data.data(), stream); resource::sync_stream(handle, stream); } void compare() { ASSERT_TRUE(devArrMatch(out_indptr.data(), out_indptr_ref.data(), params.out_indptr_ref_h.size(), Compare<value_t>())); ASSERT_TRUE(devArrMatch(out_indices.data(), out_indices_ref.data(), params.out_indices_ref_h.size(), Compare<value_t>())); ASSERT_TRUE(devArrMatch( out_data.data(), out_data_ref.data(), params.out_data_ref_h.size(), Compare<value_t>())); } protected: raft::resources handle; cudaStream_t stream; // input data rmm::device_uvector<value_idx> indptr, indices; rmm::device_uvector<value_t> data; // output data rmm::device_uvector<value_idx> out_indptr, out_indices; rmm::device_uvector<value_t> out_data; // expected output data rmm::device_uvector<value_idx> out_indptr_ref, out_indices_ref; rmm::device_uvector<value_t> out_data_ref; CSRRowSliceInputs<value_idx, value_t> params; }; const std::vector<CSRRowSliceInputs<int, float>> inputs_i32_f = { {1, 3, {0, 2, 4, 6, 8}, {0, 1, 0, 1, 0, 1, 0, 1}, // indices {1.0f, 3.0f, 1.0f, 5.0f, 50.0f, 28.0f, 16.0f, 2.0f}, {0, 2, 4, 6}, {0, 1, 0, 1, 0, 1}, // indices {1.0f, 5.0f, 50.0f, 28.0f, 16.0f, 2.0f}}, { 2, 3, {0, 2, 4, 6, 8}, {0, 1, 0, 1, 0, 1, 0, 1}, // indices {1.0f, 3.0f, 1.0f, 5.0f, 50.0f, 28.0f, 16.0f, 2.0f}, {0, 2, 4}, {0, 1, 0, 1}, // indices {50.0f, 28.0f, 16.0f, 2.0f}, } }; typedef CSRRowSliceTest<int, float> CSRRowSliceTestF; TEST_P(CSRRowSliceTestF, Result) { compare(); } INSTANTIATE_TEST_CASE_P(CSRRowSliceTest, CSRRowSliceTestF, ::testing::ValuesIn(inputs_i32_f)); }; // end namespace sparse }; // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/norm.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include "../test_utils.cuh" #include <raft/core/resources.hpp> #include <raft/linalg/norm_types.hpp> #include <raft/sparse/linalg/norm.cuh> #include <raft/util/cudart_utils.hpp> #include <iostream> #include <limits> namespace raft { namespace sparse { template <typename Type_f, typename Index_> struct CSRRowNormInputs { raft::linalg::NormType norm; std::vector<Index_> indptr; std::vector<Type_f> data; std::vector<Type_f> verify; }; template <typename Type_f, typename Index_> class CSRRowNormTest : public ::testing::TestWithParam<CSRRowNormInputs<Type_f, Index_>> { public: CSRRowNormTest() : params(::testing::TestWithParam<CSRRowNormInputs<Type_f, Index_>>::GetParam()), stream(resource::get_cuda_stream(handle)), data(params.data.size(), stream), verify(params.indptr.size() - 1, stream), indptr(params.indptr.size(), stream), result(params.indptr.size() - 1, stream) { } protected: void SetUp() override {} void Run() { Index_ n_rows = params.indptr.size() - 1; Index_ nnz = params.data.size(); raft::update_device(indptr.data(), params.indptr.data(), n_rows + 1, stream); raft::update_device(data.data(), params.data.data(), nnz, stream); raft::update_device(verify.data(), params.verify.data(), n_rows, stream); linalg::rowNormCsr(handle, indptr.data(), data.data(), nnz, n_rows, result.data(), params.norm); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); ASSERT_TRUE( raft::devArrMatch<Type_f>(verify.data(), result.data(), n_rows, raft::Compare<Type_f>())); } protected: raft::resources handle; cudaStream_t stream; CSRRowNormInputs<Type_f, Index_> params; rmm::device_uvector<Index_> indptr; rmm::device_uvector<Type_f> data, result, verify; }; using CSRRowNormTestF = CSRRowNormTest<float, int>; TEST_P(CSRRowNormTestF, Result) { Run(); } using CSRRowNormTestD = CSRRowNormTest<double, int>; TEST_P(CSRRowNormTestD, Result) { Run(); } const std::vector<CSRRowNormInputs<float, int>> csrnorm_inputs_f = { {raft::linalg::NormType::LinfNorm, {0, 3, 7, 10}, {5.0, 1.0, 2.0, 0.0, 10.0, 1.0, 2.0, 1.0, 1.0, 2.0}, {5.0, 10.0, 2.0}}, {raft::linalg::NormType::L1Norm, {0, 3, 7, 10}, {5.0, 1.0, 2.0, 0.0, 10.0, 1.0, 2.0, 1.0, 1.0, 2.0}, {8.0, 13.0, 4.0}}, {raft::linalg::NormType::L2Norm, {0, 3, 7, 10}, {5.0, 1.0, 2.0, 0.0, 10.0, 1.0, 2.0, 1.0, 1.0, 2.0}, {30.0, 105.0, 6.0}}, }; const std::vector<CSRRowNormInputs<double, int>> csrnorm_inputs_d = { {raft::linalg::NormType::LinfNorm, {0, 3, 7, 10}, {5.0, 1.0, 2.0, 0.0, 10.0, 1.0, 2.0, 1.0, 1.0, 2.0}, {5.0, 10.0, 2.0}}, {raft::linalg::NormType::L1Norm, {0, 3, 7, 10}, {5.0, 1.0, 2.0, 0.0, 10.0, 1.0, 2.0, 1.0, 1.0, 2.0}, {8.0, 13.0, 4.0}}, {raft::linalg::NormType::L2Norm, {0, 3, 7, 10}, {5.0, 1.0, 2.0, 0.0, 10.0, 1.0, 2.0, 1.0, 1.0, 2.0}, {30.0, 105.0, 6.0}}, }; INSTANTIATE_TEST_CASE_P(SparseNormTest, CSRRowNormTestF, ::testing::ValuesIn(csrnorm_inputs_f)); INSTANTIATE_TEST_CASE_P(SparseNormTest, CSRRowNormTestD, ::testing::ValuesIn(csrnorm_inputs_d)); } // namespace sparse } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/reduce.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include "../test_utils.cuh" #include <iostream> #include <limits> #include <raft/core/resources.hpp> #include <raft/sparse/coo.hpp> #include <raft/sparse/op/reduce.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> namespace raft { namespace sparse { template <typename value_t, typename value_idx> struct SparseReduceInputs { std::vector<value_idx> in_rows; std::vector<value_idx> in_cols; std::vector<value_t> in_vals; std::vector<value_idx> out_rows; std::vector<value_idx> out_cols; std::vector<value_t> out_vals; size_t m; size_t n; }; template <typename value_t, typename value_idx> class SparseReduceTest : public ::testing::TestWithParam<SparseReduceInputs<value_t, value_idx>> { protected: void SetUp() override { params = ::testing::TestWithParam<SparseReduceInputs<value_t, value_idx>>::GetParam(); } void Run() { raft::resources handle; auto stream = resource::get_cuda_stream(handle); rmm::device_uvector<value_idx> in_rows(params.in_rows.size(), stream); rmm::device_uvector<value_idx> in_cols(params.in_cols.size(), stream); rmm::device_uvector<value_t> in_vals(params.in_vals.size(), stream); rmm::device_uvector<value_idx> out_rows(params.out_rows.size(), stream); rmm::device_uvector<value_idx> out_cols(params.out_cols.size(), stream); rmm::device_uvector<value_t> out_vals(params.out_vals.size(), stream); raft::update_device(in_rows.data(), params.in_rows.data(), params.in_rows.size(), stream); raft::update_device(in_cols.data(), params.in_cols.data(), params.in_cols.size(), stream); raft::update_device(in_vals.data(), params.in_vals.data(), params.in_vals.size(), stream); raft::update_device(out_rows.data(), params.out_rows.data(), params.out_rows.size(), stream); raft::update_device(out_cols.data(), params.out_cols.data(), params.out_cols.size(), stream); raft::update_device(out_vals.data(), params.out_vals.data(), params.out_vals.size(), stream); raft::sparse::COO<value_t, value_idx> out(stream); raft::sparse::op::max_duplicates(handle, out, in_rows.data(), in_cols.data(), in_vals.data(), params.in_rows.size(), params.m, params.n); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); ASSERT_TRUE(raft::devArrMatch<value_idx>( out_rows.data(), out.rows(), out.nnz, raft::Compare<value_idx>())); ASSERT_TRUE(raft::devArrMatch<value_idx>( out_cols.data(), out.cols(), out.nnz, raft::Compare<value_idx>())); ASSERT_TRUE( raft::devArrMatch<value_t>(out_vals.data(), out.vals(), out.nnz, raft::Compare<value_t>())); } void TearDown() override {} protected: SparseReduceInputs<value_t, value_idx> params; value_idx *in_rows, *in_cols, *out_rows, *out_cols; value_t *in_vals, *out_vals; }; using SparseReduceTestF = SparseReduceTest<float, int>; TEST_P(SparseReduceTestF, Result) { Run(); } // Max reduce expects COO to be sorted already const std::vector<SparseReduceInputs<float, int>> max_reduce_inputs_f = { {// input rows/cols/vals {0, 0, 0, 0, 1, 1, 1, 2, 3, 3, 3}, {1, 1, 1, 2, 0, 3, 3, 0, 2, 3, 3}, {3.0, 50.0, 0.0, 2.0, 40.0, 2.0, 1.0, 4.0, 1.0, 0.0, 30.0}, // output rows/cols/vals {0, 0, 1, 1, 2, 3, 3}, {1, 2, 0, 3, 0, 2, 3}, {50.0, 2.0, 40.0, 2.0, 4.0, 1.0, 30.0}, 4, 4}, }; INSTANTIATE_TEST_CASE_P(SparseReduceTest, SparseReduceTestF, ::testing::ValuesIn(max_reduce_inputs_f)); } // namespace sparse } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/sort.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/random/rng.cuh> #include <raft/util/cudart_utils.hpp> #include <raft/sparse/op/sort.cuh> #include <iostream> #include <memory> namespace raft { namespace sparse { template <typename T> struct SparseSortInput { int m, n, nnz; unsigned long long int seed; }; template <typename T> class SparseSortTest : public ::testing::TestWithParam<SparseSortInput<T>> { protected: void SetUp() override {} void TearDown() override {} protected: SparseSortInput<T> params; }; const std::vector<SparseSortInput<float>> inputsf = {{5, 10, 5, 1234ULL}}; typedef SparseSortTest<float> COOSort; TEST_P(COOSort, Result) { params = ::testing::TestWithParam<SparseSortInput<float>>::GetParam(); raft::random::RngState r(params.seed); raft::resources h; auto stream = resource::get_cuda_stream(h); rmm::device_uvector<int> in_rows(params.nnz, stream); rmm::device_uvector<int> in_cols(params.nnz, stream); rmm::device_uvector<int> verify(params.nnz, stream); rmm::device_uvector<float> in_vals(params.nnz, stream); uniform(h, r, in_vals.data(), params.nnz, float(-1.0), float(1.0)); auto in_rows_h = std::make_unique<int[]>(params.nnz); auto in_cols_h = std::make_unique<int[]>(params.nnz); auto verify_h = std::make_unique<int[]>(params.nnz); for (int i = 0; i < params.nnz; i++) { in_rows_h[i] = params.nnz - i - 1; verify_h[i] = i; in_cols_h[i] = i; } raft::update_device(in_rows.data(), in_rows_h.get(), params.nnz, stream); raft::update_device(in_cols.data(), in_cols_h.get(), params.nnz, stream); raft::update_device(verify.data(), verify_h.get(), params.nnz, stream); op::coo_sort( params.m, params.n, params.nnz, in_rows.data(), in_cols.data(), in_vals.data(), stream); ASSERT_TRUE(raft::devArrMatch<int>( verify.data(), in_rows.data(), params.nnz, raft::Compare<int>(), stream)); } INSTANTIATE_TEST_CASE_P(SparseSortTest, COOSort, ::testing::ValuesIn(inputsf)); } // namespace sparse } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/csr_to_dense.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cusparse_v2.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/util/cudart_utils.hpp> #include <gtest/gtest.h> #include <raft/sparse/convert/dense.cuh> #include <raft/sparse/detail/cusparse_wrappers.h> #include <rmm/device_uvector.hpp> #include "../test_utils.cuh" namespace raft { namespace sparse { using namespace raft; using namespace raft::sparse; template <typename value_idx, typename value_t> struct CSRToDenseInputs { value_idx nrows; value_idx ncols; value_idx nnz; std::vector<value_idx> indptr_h; std::vector<value_idx> indices_h; std::vector<value_t> data_h; std::vector<value_t> out_ref_h; }; template <typename value_idx, typename value_t> ::std::ostream& operator<<(::std::ostream& os, const CSRToDenseInputs<value_idx, value_t>& dims) { return os; } template <typename value_idx, typename value_t> class CSRToDenseTest : public ::testing::TestWithParam<CSRToDenseInputs<value_idx, value_t>> { public: CSRToDenseTest() : params(::testing::TestWithParam<CSRToDenseInputs<value_idx, value_t>>::GetParam()), stream(resource::get_cuda_stream(raft_handle)), indptr(0, stream), indices(0, stream), data(0, stream), out_ref(0, stream), out(0, stream) { indptr.resize(params.indptr_h.size(), stream); indices.resize(params.indices_h.size(), stream); data.resize(params.data_h.size(), stream); out_ref.resize(params.out_ref_h.size(), stream); out.resize(params.out_ref_h.size(), stream); } protected: void make_data() { std::vector<value_idx> indptr_h = params.indptr_h; std::vector<value_idx> indices_h = params.indices_h; std::vector<value_t> data_h = params.data_h; update_device(indptr.data(), indptr_h.data(), indptr_h.size(), stream); update_device(indices.data(), indices_h.data(), indices_h.size(), stream); update_device(data.data(), data_h.data(), data_h.size(), stream); std::vector<value_t> out_ref_h = params.out_ref_h; update_device(out_ref.data(), out_ref_h.data(), out_ref_h.size(), stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } void SetUp() override { RAFT_CUSPARSE_TRY(cusparseCreate(&handle)); make_data(); convert::csr_to_dense(handle, params.nrows, params.ncols, params.nnz, indptr.data(), indices.data(), data.data(), params.nrows, out.data(), stream, true); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); RAFT_CUSPARSE_TRY(cusparseDestroy(handle)); } void compare() { ASSERT_TRUE( devArrMatch(out.data(), out_ref.data(), params.out_ref_h.size(), Compare<value_t>())); } protected: raft::resources raft_handle; cudaStream_t stream; cusparseHandle_t handle; // input data rmm::device_uvector<value_idx> indptr, indices; rmm::device_uvector<value_t> data; // output data rmm::device_uvector<value_t> out; // expected output data rmm::device_uvector<value_t> out_ref; CSRToDenseInputs<value_idx, value_t> params; }; const std::vector<CSRToDenseInputs<int, float>> inputs_i32_f = { {4, 4, 8, {0, 2, 4, 6, 8}, {0, 1, 2, 3, 0, 1, 2, 3}, // indices {1.0f, 3.0f, 1.0f, 5.0f, 50.0f, 28.0f, 16.0f, 2.0f}, {1.0f, 3.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 5.0f, 50.0f, 28.0f, 0.0f, 0.0f, 0.0f, 0.0f, 16.0f, 2.0f}}, }; typedef CSRToDenseTest<int, float> CSRToDenseTestF; TEST_P(CSRToDenseTestF, Result) { compare(); } INSTANTIATE_TEST_CASE_P(CSRToDenseTest, CSRToDenseTestF, ::testing::ValuesIn(inputs_i32_f)); }; // end namespace sparse }; // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/degree.cu
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/util/cudart_utils.hpp> #include <raft/sparse/linalg/degree.cuh> #include <iostream> namespace raft { namespace sparse { template <typename T> struct SparseDegreeInputs { int m, n, nnz; unsigned long long int seed; }; template <typename T> class SparseDegreeTests : public ::testing::TestWithParam<SparseDegreeInputs<T>> { protected: void SetUp() override {} void TearDown() override {} protected: SparseDegreeInputs<T> params; }; const std::vector<SparseDegreeInputs<float>> inputsf = {{5, 10, 5, 1234ULL}}; typedef SparseDegreeTests<float> COODegree; TEST_P(COODegree, Result) { cudaStream_t stream; cudaStreamCreate(&stream); int in_rows_h[5] = {0, 0, 1, 2, 2}; int verify_h[5] = {2, 1, 2, 0, 0}; rmm::device_uvector<int> in_rows(5, stream); rmm::device_uvector<int> verify(5, stream); rmm::device_uvector<int> results(5, stream); RAFT_CUDA_TRY(cudaMemsetAsync(verify.data(), 0, verify.size() * sizeof(int), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(results.data(), 0, results.size() * sizeof(int), stream)); raft::update_device(in_rows.data(), *&in_rows_h, 5, stream); raft::update_device(verify.data(), *&verify_h, 5, stream); linalg::coo_degree(in_rows.data(), 5, results.data(), stream); cudaDeviceSynchronize(); ASSERT_TRUE(raft::devArrMatch<int>(verify.data(), results.data(), 5, raft::Compare<int>())); RAFT_CUDA_TRY(cudaStreamDestroy(stream)); } typedef SparseDegreeTests<float> COODegreeNonzero; TEST_P(COODegreeNonzero, Result) { cudaStream_t stream; cudaStreamCreate(&stream); int in_rows_h[5] = {0, 0, 1, 2, 2}; float in_vals_h[5] = {0.0, 5.0, 0.0, 1.0, 1.0}; int verify_h[5] = {1, 0, 2, 0, 0}; rmm::device_uvector<int> in_rows(5, stream); rmm::device_uvector<int> verify(5, stream); rmm::device_uvector<int> results(5, stream); rmm::device_uvector<float> in_vals(5, stream); RAFT_CUDA_TRY(cudaMemsetAsync(verify.data(), 0, verify.size() * sizeof(int), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(results.data(), 0, results.size() * sizeof(int), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(in_vals.data(), 0, in_vals.size() * sizeof(float), stream)); raft::update_device(in_rows.data(), *&in_rows_h, 5, stream); raft::update_device(verify.data(), *&verify_h, 5, stream); raft::update_device(in_vals.data(), *&in_vals_h, 5, stream); linalg::coo_degree_nz<float>(in_rows.data(), in_vals.data(), 5, results.data(), stream); cudaDeviceSynchronize(); ASSERT_TRUE(raft::devArrMatch<int>(verify.data(), results.data(), 5, raft::Compare<int>())); RAFT_CUDA_TRY(cudaStreamDestroy(stream)); } INSTANTIATE_TEST_CASE_P(SparseDegreeTests, COODegree, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(SparseDegreeTests, COODegreeNonzero, ::testing::ValuesIn(inputsf)); } // namespace sparse } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/convert_csr.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/util/cuda_utils.cuh> #include <raft/sparse/convert/csr.cuh> #include <raft/sparse/coo.hpp> #include <iostream> namespace raft { namespace sparse { /**************************** sorted COO to CSR ****************************/ template <typename T> struct SparseConvertCSRInputs { int m, n, nnz; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const SparseConvertCSRInputs<T>& dims) { return os; } template <typename T> class SparseConvertCSRTest : public ::testing::TestWithParam<SparseConvertCSRInputs<T>> { protected: void SetUp() override {} void TearDown() override {} protected: SparseConvertCSRInputs<T> params; }; const std::vector<SparseConvertCSRInputs<float>> inputsf = {{5, 10, 5, 1234ULL}}; typedef SparseConvertCSRTest<float> SortedCOOToCSR; TEST_P(SortedCOOToCSR, Result) { cudaStream_t stream; cudaStreamCreate(&stream); int nnz = 8; int* in_h = new int[nnz]{0, 0, 1, 1, 2, 2, 3, 3}; int* exp_h = new int[4]{0, 2, 4, 6}; rmm::device_uvector<int> in(nnz, stream); rmm::device_uvector<int> exp(4, stream); rmm::device_uvector<int> out(4, stream); RAFT_CUDA_TRY(cudaMemsetAsync(in.data(), 0, in.size() * sizeof(int), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(exp.data(), 0, exp.size() * sizeof(int), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(out.data(), 0, out.size() * sizeof(int), stream)); raft::update_device(in.data(), in_h, nnz, stream); raft::update_device(exp.data(), exp_h, 4, stream); convert::sorted_coo_to_csr<int>(in.data(), nnz, out.data(), 4, stream); ASSERT_TRUE(raft::devArrMatch<int>(out.data(), exp.data(), 4, raft::Compare<int>(), stream)); cudaStreamDestroy(stream); delete[] in_h; delete[] exp_h; } INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest, SortedCOOToCSR, ::testing::ValuesIn(inputsf)); /******************************** adj graph ********************************/ template <typename index_t> RAFT_KERNEL init_adj_kernel(bool* adj, index_t num_rows, index_t num_cols, index_t divisor) { index_t r = blockDim.y * blockIdx.y + threadIdx.y; index_t c = blockDim.x * blockIdx.x + threadIdx.x; for (; r < num_rows; r += gridDim.y * blockDim.y) { for (; c < num_cols; c += gridDim.x * blockDim.x) { adj[r * num_cols + c] = c % divisor == 0; } } } template <typename index_t> void init_adj(bool* adj, index_t num_rows, index_t num_cols, index_t divisor, cudaStream_t stream) { // adj matrix: element a_ij is set to one if j is divisible by divisor. dim3 block(32, 32); const index_t max_y_grid_dim = 65535; dim3 grid(num_cols / 32 + 1, (int)min(num_rows / 32 + 1, max_y_grid_dim)); init_adj_kernel<index_t><<<grid, block, 0, stream>>>(adj, num_rows, num_cols, divisor); RAFT_CHECK_CUDA(stream); } template <typename index_t> struct CSRAdjGraphInputs { index_t n_rows; index_t n_cols; index_t divisor; }; template <typename index_t> class CSRAdjGraphTest : public ::testing::TestWithParam<CSRAdjGraphInputs<index_t>> { public: CSRAdjGraphTest() : stream(resource::get_cuda_stream(handle)), params(::testing::TestWithParam<CSRAdjGraphInputs<index_t>>::GetParam()), adj(params.n_rows * params.n_cols, stream), row_ind(params.n_rows, stream), row_counters(params.n_rows, stream), col_ind(params.n_rows * params.n_cols, stream), row_ind_host(params.n_rows) { } protected: void SetUp() override { // Initialize adj matrix: element a_ij equals one if j is divisible by // params.divisor. init_adj(adj.data(), params.n_rows, params.n_cols, params.divisor, stream); // Initialize row_ind for (size_t i = 0; i < row_ind_host.size(); ++i) { size_t nnz_per_row = raft::ceildiv(params.n_cols, params.divisor); row_ind_host[i] = nnz_per_row * i; } raft::update_device(row_ind.data(), row_ind_host.data(), row_ind.size(), stream); // Initialize result to 1, so we can catch any errors. RAFT_CUDA_TRY(cudaMemsetAsync(col_ind.data(), 1, col_ind.size() * sizeof(index_t), stream)); } void Run() { convert::adj_to_csr<index_t>(handle, adj.data(), row_ind.data(), params.n_rows, params.n_cols, row_counters.data(), col_ind.data()); std::vector<index_t> col_ind_host(col_ind.size()); raft::update_host(col_ind_host.data(), col_ind.data(), col_ind.size(), stream); std::vector<index_t> row_counters_host(params.n_rows); raft::update_host(row_counters_host.data(), row_counters.data(), row_counters.size(), stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); // 1. Check that each row contains enough values index_t nnz_per_row = raft::ceildiv(params.n_cols, params.divisor); for (index_t i = 0; i < params.n_rows; ++i) { ASSERT_EQ(row_counters_host[i], nnz_per_row) << "where i = " << i; } // 2. Check that all column indices are divisble by divisor for (index_t i = 0; i < params.n_rows; ++i) { index_t row_base = row_ind_host[i]; for (index_t j = 0; j < nnz_per_row; ++j) { ASSERT_EQ(0, col_ind_host[row_base + j] % params.divisor); } } } protected: raft::resources handle; cudaStream_t stream; CSRAdjGraphInputs<index_t> params; rmm::device_uvector<bool> adj; rmm::device_uvector<index_t> row_ind; rmm::device_uvector<index_t> row_counters; rmm::device_uvector<index_t> col_ind; std::vector<index_t> row_ind_host; }; using CSRAdjGraphTestI = CSRAdjGraphTest<int>; TEST_P(CSRAdjGraphTestI, Result) { Run(); } using CSRAdjGraphTestL = CSRAdjGraphTest<int64_t>; TEST_P(CSRAdjGraphTestL, Result) { Run(); } const std::vector<CSRAdjGraphInputs<int>> csradjgraph_inputs_i = {{10, 10, 2}}; const std::vector<CSRAdjGraphInputs<int64_t>> csradjgraph_inputs_l = { {0, 0, 2}, {10, 10, 2}, {64 * 1024 + 10, 2, 3}, // 64K + 10 is slightly over maximum of blockDim.y {16, 16, 3}, // No peeling-remainder {17, 16, 3}, // Check peeling-remainder {18, 16, 3}, // Check peeling-remainder {32 + 9, 33, 2}, // Check peeling-remainder }; INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest, CSRAdjGraphTestI, ::testing::ValuesIn(csradjgraph_inputs_i)); INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest, CSRAdjGraphTestL, ::testing::ValuesIn(csradjgraph_inputs_l)); } // namespace sparse } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/sparse/csr_transpose.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cusparse_v2.h> #include <raft/core/resource/cuda_stream.hpp> #include <gtest/gtest.h> #include <raft/core/resources.hpp> #include <raft/sparse/detail/cusparse_wrappers.h> #include <raft/sparse/linalg/transpose.cuh> #include <raft/util/cudart_utils.hpp> #include "../test_utils.cuh" namespace raft { namespace sparse { using namespace raft; template <typename value_idx, typename value_t> struct CSRTransposeInputs { value_idx nrows; value_idx ncols; value_idx nnz; std::vector<value_idx> indptr_h; std::vector<value_idx> indices_h; std::vector<value_t> data_h; std::vector<value_idx> out_indptr_ref_h; std::vector<value_idx> out_indices_ref_h; std::vector<value_t> out_data_ref_h; }; template <typename value_idx, typename value_t> ::std::ostream& operator<<(::std::ostream& os, const CSRTransposeInputs<value_idx, value_t>& dims) { return os; } template <typename value_idx, typename value_t> class CSRTransposeTest : public ::testing::TestWithParam<CSRTransposeInputs<value_idx, value_t>> { public: CSRTransposeTest() : params(::testing::TestWithParam<CSRTransposeInputs<value_idx, value_t>>::GetParam()), stream(resource::get_cuda_stream(raft_handle)), indptr(0, stream), indices(0, stream), data(0, stream), out_indptr_ref(0, stream), out_indices_ref(0, stream), out_data_ref(0, stream), out_indptr(0, stream), out_indices(0, stream), out_data(0, stream) { indptr.resize(params.indptr_h.size(), stream); indices.resize(params.indices_h.size(), stream); data.resize(params.data_h.size(), stream); out_indptr_ref.resize(params.out_indptr_ref_h.size(), stream); out_indices_ref.resize(params.out_indices_ref_h.size(), stream); out_data_ref.resize(params.out_data_ref_h.size(), stream); out_indptr.resize(params.out_indptr_ref_h.size(), stream); out_indices.resize(params.out_indices_ref_h.size(), stream); out_data.resize(params.out_data_ref_h.size(), stream); } protected: void make_data() { std::vector<value_idx> indptr_h = params.indptr_h; std::vector<value_idx> indices_h = params.indices_h; std::vector<value_t> data_h = params.data_h; update_device(indptr.data(), indptr_h.data(), indptr_h.size(), stream); update_device(indices.data(), indices_h.data(), indices_h.size(), stream); update_device(data.data(), data_h.data(), data_h.size(), stream); std::vector<value_idx> out_indptr_ref_h = params.out_indptr_ref_h; std::vector<value_idx> out_indices_ref_h = params.out_indices_ref_h; std::vector<value_t> out_data_ref_h = params.out_data_ref_h; update_device(out_indptr_ref.data(), out_indptr_ref_h.data(), out_indptr_ref_h.size(), stream); update_device( out_indices_ref.data(), out_indices_ref_h.data(), out_indices_ref_h.size(), stream); update_device(out_data_ref.data(), out_data_ref_h.data(), out_data_ref_h.size(), stream); } void SetUp() override { raft::resources handle; make_data(); raft::sparse::linalg::csr_transpose(handle, indptr.data(), indices.data(), data.data(), out_indptr.data(), out_indices.data(), out_data.data(), params.nrows, params.ncols, params.nnz, stream); resource::sync_stream(handle, stream); } void compare() { ASSERT_TRUE(devArrMatch(out_indptr.data(), out_indptr_ref.data(), params.out_indptr_ref_h.size(), Compare<value_t>())); ASSERT_TRUE(devArrMatch(out_indices.data(), out_indices_ref.data(), params.out_indices_ref_h.size(), Compare<value_t>())); ASSERT_TRUE(devArrMatch( out_data.data(), out_data_ref.data(), params.out_data_ref_h.size(), Compare<value_t>())); } protected: raft::resources raft_handle; cudaStream_t stream; cusparseHandle_t handle; // input data rmm::device_uvector<value_idx> indptr, indices; rmm::device_uvector<value_t> data; // output data rmm::device_uvector<value_idx> out_indptr, out_indices; rmm::device_uvector<value_t> out_data; // expected output data rmm::device_uvector<value_idx> out_indptr_ref, out_indices_ref; rmm::device_uvector<value_t> out_data_ref; CSRTransposeInputs<value_idx, value_t> params; }; const std::vector<CSRTransposeInputs<int, float>> inputs_i32_f = { { 4, 2, 8, {0, 2, 4, 6, 8}, {0, 1, 0, 1, 0, 1, 0, 1}, // indices {1.0f, 3.0f, 1.0f, 5.0f, 50.0f, 28.0f, 16.0f, 2.0f}, {0, 4, 8}, {0, 1, 2, 3, 0, 1, 2, 3}, // indices {1.0f, 1.0f, 50.0f, 16.0f, 3.0f, 5.0f, 28.0f, 2.0f}, }, }; typedef CSRTransposeTest<int, float> CSRTransposeTestF; TEST_P(CSRTransposeTestF, Result) { compare(); } INSTANTIATE_TEST_CASE_P(CSRTransposeTest, CSRTransposeTestF, ::testing::ValuesIn(inputs_i32_f)); }; // end namespace sparse }; // end namespace raft
0
rapidsai_public_repos/raft/cpp/test/sparse
rapidsai_public_repos/raft/cpp/test/sparse/neighbors/knn_graph.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/util/cudart_utils.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> #include <raft/sparse/coo.hpp> #include <raft/sparse/neighbors/knn_graph.cuh> #include <iostream> namespace raft { namespace sparse { template <typename value_idx, typename value_t> RAFT_KERNEL assert_symmetry( value_idx* rows, value_idx* cols, value_t* vals, value_idx nnz, value_idx* sum) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= nnz) return; atomicAdd(sum, rows[tid]); atomicAdd(sum, -1 * cols[tid]); } template <typename value_idx, typename value_t> struct KNNGraphInputs { value_idx m; value_idx n; std::vector<value_t> X; int k = 2; }; template <typename value_idx, typename value_t> ::std::ostream& operator<<(::std::ostream& os, const KNNGraphInputs<value_idx, value_t>& dims) { return os; } template <typename value_idx, typename value_t> class KNNGraphTest : public ::testing::TestWithParam<KNNGraphInputs<value_idx, value_t>> { public: KNNGraphTest() : params(::testing::TestWithParam<KNNGraphInputs<value_idx, value_t>>::GetParam()), stream(resource::get_cuda_stream(handle)), X(0, stream) { X.resize(params.X.size(), stream); } protected: void SetUp() override { out = new raft::sparse::COO<value_t, value_idx>(stream); update_device(X.data(), params.X.data(), params.X.size(), stream); raft::sparse::neighbors::knn_graph( handle, X.data(), params.m, params.n, raft::distance::DistanceType::L2Unexpanded, *out); rmm::device_scalar<value_idx> sum(stream); sum.set_value_to_zero_async(stream); /** * Assert the knn graph is symmetric */ assert_symmetry<<<raft::ceildiv(out->nnz, 256), 256, 0, stream>>>( out->rows(), out->cols(), out->vals(), out->nnz, sum.data()); sum_h = sum.value(stream); resource::sync_stream(handle, stream); } void TearDown() override { delete out; } protected: raft::resources handle; cudaStream_t stream; // input data raft::sparse::COO<value_t, value_idx>* out; rmm::device_uvector<value_t> X; value_idx sum_h; KNNGraphInputs<value_idx, value_t> params; }; const std::vector<KNNGraphInputs<int, float>> knn_graph_inputs_fint = { // Test n_clusters == n_points {4, 2, {0, 100, 0.01, 0.02, 5000, 10000, -5, -2}, 2}}; typedef KNNGraphTest<int, float> KNNGraphTestF_int; TEST_P(KNNGraphTestF_int, Result) { // nnz should not be larger than twice m * k ASSERT_TRUE(out->nnz <= (params.m * params.k * 2)); ASSERT_TRUE(sum_h == 0); } INSTANTIATE_TEST_CASE_P(KNNGraphTest, KNNGraphTestF_int, ::testing::ValuesIn(knn_graph_inputs_fint)); } // namespace sparse } // namespace raft
0
rapidsai_public_repos/raft/cpp/test/sparse
rapidsai_public_repos/raft/cpp/test/sparse/neighbors/brute_force.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cusparse_v2.h> #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include "../../test_utils.cuh" #include <raft/distance/distance_types.hpp> #include <raft/sparse/neighbors/knn.cuh> #include <raft/util/cudart_utils.hpp> namespace raft { namespace sparse { namespace selection { using namespace raft; using namespace raft::sparse; template <typename value_idx, typename value_t> struct SparseKNNInputs { value_idx n_cols; std::vector<value_idx> indptr_h; std::vector<value_idx> indices_h; std::vector<value_t> data_h; std::vector<value_t> out_dists_ref_h; std::vector<value_idx> out_indices_ref_h; int k; int batch_size_index = 2; int batch_size_query = 2; raft::distance::DistanceType metric = raft::distance::DistanceType::L2SqrtExpanded; }; template <typename value_idx, typename value_t> ::std::ostream& operator<<(::std::ostream& os, const SparseKNNInputs<value_idx, value_t>& dims) { return os; } template <typename value_idx, typename value_t> class SparseKNNTest : public ::testing::TestWithParam<SparseKNNInputs<value_idx, value_t>> { public: SparseKNNTest() : params(::testing::TestWithParam<SparseKNNInputs<value_idx, value_t>>::GetParam()), indptr(0, resource::get_cuda_stream(handle)), indices(0, resource::get_cuda_stream(handle)), data(0, resource::get_cuda_stream(handle)), out_indices(0, resource::get_cuda_stream(handle)), out_dists(0, resource::get_cuda_stream(handle)), out_indices_ref(0, resource::get_cuda_stream(handle)), out_dists_ref(0, resource::get_cuda_stream(handle)) { } protected: void SetUp() override { n_rows = params.indptr_h.size() - 1; nnz = params.indices_h.size(); k = params.k; make_data(); raft::sparse::neighbors::brute_force_knn<value_idx, value_t>(indptr.data(), indices.data(), data.data(), nnz, n_rows, params.n_cols, indptr.data(), indices.data(), data.data(), nnz, n_rows, params.n_cols, out_indices.data(), out_dists.data(), k, handle, params.batch_size_index, params.batch_size_query, params.metric); RAFT_CUDA_TRY(cudaStreamSynchronize(resource::get_cuda_stream(handle))); } void compare() { ASSERT_TRUE(devArrMatch( out_dists_ref.data(), out_dists.data(), n_rows * k, CompareApprox<value_t>(1e-4))); ASSERT_TRUE( devArrMatch(out_indices_ref.data(), out_indices.data(), n_rows * k, Compare<value_idx>())); } protected: void make_data() { std::vector<value_idx> indptr_h = params.indptr_h; std::vector<value_idx> indices_h = params.indices_h; std::vector<value_t> data_h = params.data_h; auto stream = resource::get_cuda_stream(handle); indptr.resize(indptr_h.size(), stream); indices.resize(indices_h.size(), stream); data.resize(data_h.size(), stream); update_device(indptr.data(), indptr_h.data(), indptr_h.size(), stream); update_device(indices.data(), indices_h.data(), indices_h.size(), stream); update_device(data.data(), data_h.data(), data_h.size(), stream); std::vector<value_t> out_dists_ref_h = params.out_dists_ref_h; std::vector<value_idx> out_indices_ref_h = params.out_indices_ref_h; out_indices_ref.resize(out_indices_ref_h.size(), stream); out_dists_ref.resize(out_dists_ref_h.size(), stream); update_device( out_indices_ref.data(), out_indices_ref_h.data(), out_indices_ref_h.size(), stream); update_device(out_dists_ref.data(), out_dists_ref_h.data(), out_dists_ref_h.size(), stream); out_dists.resize(n_rows * k, stream); out_indices.resize(n_rows * k, stream); } raft::resources handle; int n_rows, nnz, k; // input data rmm::device_uvector<value_idx> indptr, indices; rmm::device_uvector<value_t> data; // output data rmm::device_uvector<value_idx> out_indices; rmm::device_uvector<value_t> out_dists; rmm::device_uvector<value_idx> out_indices_ref; rmm::device_uvector<value_t> out_dists_ref; SparseKNNInputs<value_idx, value_t> params; }; const std::vector<SparseKNNInputs<int, float>> inputs_i32_f = { {9, // ncols {0, 2, 4, 6, 8}, // indptr {0, 4, 0, 3, 0, 2, 0, 8}, // indices {0.0f, 1.0f, 5.0f, 6.0f, 5.0f, 6.0f, 0.0f, 1.0f}, // data {0, 1.41421, 0, 7.87401, 0, 7.87401, 0, 1.41421}, // dists {0, 3, 1, 0, 2, 0, 3, 0}, // inds 2, 2, 2, raft::distance::DistanceType::L2SqrtExpanded}}; typedef SparseKNNTest<int, float> SparseKNNTestF; TEST_P(SparseKNNTestF, Result) { compare(); } INSTANTIATE_TEST_CASE_P(SparseKNNTest, SparseKNNTestF, ::testing::ValuesIn(inputs_i32_f)); }; // end namespace selection }; // end namespace sparse }; // end namespace raft
0
rapidsai_public_repos/raft/cpp/test/sparse
rapidsai_public_repos/raft/cpp/test/sparse/neighbors/cross_component_nn.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // XXX: We allow the instantiation of masked_l2_nn here: // raft::linkage::FixConnectivitiesRedOp<value_idx, value_t> red_op(params.n_row); // raft::linkage::cross_component_nn<value_idx, value_t>( // handle, out_edges, data.data(), colors.data(), params.n_row, params.n_col, red_op); // // TODO: consider adding this to libraft.so or creating an instance in a // separate translation unit for this test. // // TODO: edge case testing. Reference: https://github.com/rapidsai/raft/issues/1669 #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <cub/cub.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <vector> #include <raft/sparse/linalg/symmetrize.cuh> #include <raft/sparse/mst/mst.cuh> #include <raft/sparse/neighbors/knn_graph.cuh> #include <raft/sparse/selection/cross_component_nn.cuh> #include <raft/distance/distance_types.hpp> #include <raft/linalg/transpose.cuh> #include <raft/sparse/convert/csr.cuh> #include <raft/sparse/coo.hpp> #include <raft/sparse/hierarchy/single_linkage.cuh> #include <rmm/device_uvector.hpp> #include "../../test_utils.cuh" namespace raft { namespace sparse { using namespace std; template <typename value_t, typename value_idx> struct ConnectComponentsInputs { value_idx n_row; value_idx n_col; std::vector<value_t> data; int c; }; template <typename value_idx, typename value_t> class ConnectComponentsTest : public ::testing::TestWithParam<ConnectComponentsInputs<value_t, value_idx>> { protected: void basicTest() { raft::resources handle; auto stream = resource::get_cuda_stream(handle); params = ::testing::TestWithParam<ConnectComponentsInputs<value_t, value_idx>>::GetParam(); raft::sparse::COO<value_t, value_idx> out_edges(resource::get_cuda_stream(handle)); raft::sparse::COO<value_t, value_idx> out_edges_batched(resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> data(params.n_row * params.n_col, resource::get_cuda_stream(handle)); raft::copy(data.data(), params.data.data(), data.size(), resource::get_cuda_stream(handle)); rmm::device_uvector<value_idx> indptr(params.n_row + 1, stream); /** * 1. Construct knn graph */ raft::sparse::COO<value_t, value_idx> knn_graph_coo(stream); raft::sparse::neighbors::knn_graph(handle, data.data(), params.n_row, params.n_col, raft::distance::DistanceType::L2SqrtExpanded, knn_graph_coo, params.c); raft::sparse::convert::sorted_coo_to_csr( knn_graph_coo.rows(), knn_graph_coo.nnz, indptr.data(), params.n_row + 1, stream); /** * 2. Construct MST, sorted by weights */ rmm::device_uvector<value_idx> colors(params.n_row, stream); auto mst_coo = raft::mst::mst<value_idx, value_idx, value_t, double>(handle, indptr.data(), knn_graph_coo.cols(), knn_graph_coo.vals(), params.n_row, knn_graph_coo.nnz, colors.data(), stream, false, true); /** * 3. cross_component_nn to fix connectivities */ raft::linkage::FixConnectivitiesRedOp<value_idx, value_t> red_op(params.n_row); raft::linkage::cross_component_nn<value_idx, value_t>(handle, out_edges, data.data(), colors.data(), params.n_row, params.n_col, red_op, params.n_row, params.n_col); raft::linkage::cross_component_nn<value_idx, value_t>(handle, out_edges_batched, data.data(), colors.data(), params.n_row, params.n_col, red_op, params.n_row / 2, params.n_col / 2); ASSERT_TRUE(out_edges.nnz == out_edges_batched.nnz); ASSERT_TRUE( devArrMatch(out_edges.rows(), out_edges_batched.rows(), out_edges.nnz, Compare<int>())); ASSERT_TRUE( devArrMatch(out_edges.cols(), out_edges_batched.cols(), out_edges.nnz, Compare<int>())); ASSERT_TRUE(devArrMatch( out_edges.vals(), out_edges_batched.vals(), out_edges.nnz, CompareApprox<float>(1e-4))); /** * Construct final edge list */ rmm::device_uvector<value_idx> indptr2(params.n_row + 1, stream); raft::sparse::convert::sorted_coo_to_csr( out_edges.rows(), out_edges.nnz, indptr2.data(), params.n_row + 1, stream); auto output_mst = raft::mst::mst<value_idx, value_idx, value_t>(handle, indptr2.data(), out_edges.cols(), out_edges.vals(), params.n_row, out_edges.nnz, colors.data(), stream, false, false); resource::sync_stream(handle, stream); // The sum of edges for both MST runs should be n_rows - 1 final_edges = output_mst.n_edges + mst_coo.n_edges; } void SetUp() override { basicTest(); } void TearDown() override {} protected: ConnectComponentsInputs<value_t, value_idx> params; value_idx final_edges; }; const std::vector<ConnectComponentsInputs<float, int>> fix_conn_inputsf2 = { // Test n_clusters == n_points {10, 5, {0.21390334, 0.50261639, 0.91036676, 0.59166485, 0.71162682, 0.10248392, 0.77782677, 0.43772379, 0.4035871, 0.3282796, 0.47544681, 0.59862974, 0.12319357, 0.06239463, 0.28200272, 0.1345717, 0.50498218, 0.5113505, 0.16233086, 0.62165332, 0.42281548, 0.933117, 0.41386077, 0.23264562, 0.73325968, 0.37537541, 0.70719873, 0.14522645, 0.73279625, 0.9126674, 0.84854131, 0.28890216, 0.85267903, 0.74703138, 0.83842071, 0.34942792, 0.27864171, 0.70911132, 0.21338564, 0.32035554, 0.73788331, 0.46926692, 0.57570162, 0.42559178, 0.87120209, 0.22734951, 0.01847905, 0.75549396, 0.76166195, 0.66613745}, -1}, // Test n_points == 100 {100, 10, {6.26168372e-01, 9.30437651e-01, 6.02450208e-01, 2.73025296e-01, 9.53050619e-01, 3.32164396e-01, 6.88942598e-01, 5.79163537e-01, 6.70341547e-01, 2.70140602e-02, 9.30429671e-01, 7.17721157e-01, 9.89948537e-01, 7.75253347e-01, 1.34491522e-02, 2.48522428e-02, 3.51413378e-01, 7.64405834e-01, 7.86373507e-01, 7.18748577e-01, 8.66998621e-01, 6.80316582e-01, 2.51288712e-01, 4.91078420e-01, 3.76246281e-01, 4.86828710e-01, 5.67464772e-01, 5.30734742e-01, 8.99478296e-01, 7.66699088e-01, 9.49339111e-01, 3.55248484e-01, 9.06046929e-01, 4.48407772e-01, 6.96395305e-01, 2.44277335e-01, 7.74840000e-01, 5.21046603e-01, 4.66423971e-02, 5.12019638e-02, 8.95019614e-01, 5.28956953e-01, 4.31536306e-01, 5.83857744e-01, 4.41787364e-01, 4.68656523e-01, 5.73971433e-01, 6.79989654e-01, 3.19650588e-01, 6.12579596e-01, 6.49126442e-02, 8.39131142e-01, 2.85252117e-01, 5.84848929e-01, 9.46507115e-01, 8.58440748e-01, 3.61528940e-01, 2.44215959e-01, 3.80101125e-01, 4.57128957e-02, 8.82216988e-01, 8.31498633e-01, 7.23474381e-01, 7.75788607e-01, 1.40864146e-01, 6.62092382e-01, 5.13985168e-01, 3.00686418e-01, 8.70109949e-01, 2.43187753e-01, 2.89391938e-01, 2.84214238e-01, 8.70985521e-01, 8.77491176e-01, 6.72537226e-01, 3.30929686e-01, 1.85934324e-01, 9.16222614e-01, 6.18239142e-01, 2.64768597e-01, 5.76145451e-01, 8.62961369e-01, 6.84757925e-01, 7.60549082e-01, 1.27645356e-01, 4.51004673e-01, 3.92292980e-01, 4.63170803e-01, 4.35449330e-02, 2.17583404e-01, 5.71832605e-02, 2.06763039e-01, 3.70116249e-01, 2.09750028e-01, 6.17283019e-01, 8.62549231e-01, 9.84156240e-02, 2.66249156e-01, 3.87635103e-01, 2.85591012e-02, 4.24826068e-01, 4.45795088e-01, 6.86227676e-01, 1.08848960e-01, 5.96731841e-02, 3.71770228e-01, 1.91548833e-01, 6.95136078e-01, 9.00700636e-01, 8.76363105e-01, 2.67334632e-01, 1.80619709e-01, 7.94060419e-01, 1.42854171e-02, 1.09372387e-01, 8.74028108e-01, 6.46403232e-01, 4.86588834e-01, 5.93446175e-02, 6.11886291e-01, 8.83865057e-01, 3.15879821e-01, 2.27043992e-01, 9.76764951e-01, 6.15620336e-01, 9.76199360e-01, 2.40548962e-01, 3.21795663e-01, 8.75087904e-02, 8.11234663e-01, 6.96070480e-01, 8.12062321e-01, 1.21958818e-01, 3.44348628e-02, 8.72630414e-01, 3.06162776e-01, 1.76043529e-02, 9.45894971e-01, 5.33896401e-01, 6.21642973e-01, 4.93062535e-01, 4.48984262e-01, 2.24560379e-01, 4.24052195e-02, 4.43447610e-01, 8.95646149e-01, 6.05220676e-01, 1.81840491e-01, 9.70831206e-01, 2.12563586e-02, 6.92582693e-01, 7.55946922e-01, 7.95086143e-01, 6.05328941e-01, 3.99350764e-01, 4.32846636e-01, 9.81114529e-01, 4.98266428e-01, 6.37127930e-03, 1.59085889e-01, 6.34682067e-05, 5.59429440e-01, 7.38827633e-01, 8.93214770e-01, 2.16494306e-01, 9.35430573e-02, 4.75665868e-02, 7.80503518e-01, 7.86240041e-01, 7.06854594e-01, 2.13725879e-02, 7.68246091e-01, 4.50234808e-01, 5.21231104e-01, 5.01989826e-03, 4.22081572e-02, 1.65337732e-01, 8.54134740e-01, 4.99430262e-01, 8.94525601e-01, 1.14028379e-01, 3.69739861e-01, 1.32955599e-01, 2.65563824e-01, 2.52811151e-01, 1.44792843e-01, 6.88449594e-01, 4.44921417e-01, 8.23296587e-01, 1.93266317e-01, 1.19033309e-01, 1.36368966e-01, 3.42600285e-01, 5.64505195e-01, 5.57594559e-01, 7.44257892e-01, 8.38231569e-02, 4.11548847e-01, 3.21010077e-01, 8.55081359e-01, 4.30105779e-01, 1.16229135e-01, 9.87731964e-02, 3.14712335e-01, 4.50880592e-01, 2.72289598e-01, 6.31615256e-01, 8.97432958e-01, 4.44764250e-01, 8.03776440e-01, 2.68767748e-02, 2.43374608e-01, 4.02141103e-01, 4.98881209e-01, 5.33173003e-01, 8.82890436e-01, 7.16149148e-01, 4.19664401e-01, 2.29335357e-01, 2.88637806e-01, 3.44696803e-01, 6.78171906e-01, 5.69849716e-01, 5.86454477e-01, 3.54474989e-01, 9.03876540e-01, 6.45980000e-01, 6.34887593e-01, 7.88039746e-02, 2.04814126e-01, 7.82251754e-01, 2.43147074e-01, 7.50951808e-01, 1.72799092e-02, 2.95349590e-01, 6.57991826e-01, 8.81214312e-01, 5.73970708e-01, 2.77610881e-01, 1.82155097e-01, 7.69797417e-02, 6.44792402e-01, 9.46950998e-01, 7.73064845e-01, 6.04733624e-01, 5.80094567e-01, 1.67498426e-01, 2.66514296e-01, 6.50140368e-01, 1.91170299e-01, 2.08752199e-01, 3.01664091e-01, 9.85033484e-01, 2.92909152e-01, 8.65816607e-01, 1.85222119e-01, 2.28814559e-01, 1.34286382e-02, 2.89234322e-01, 8.18668708e-01, 4.71706924e-01, 9.23199803e-01, 2.80879188e-01, 1.47319284e-01, 4.13915748e-01, 9.31274932e-02, 6.66322195e-01, 9.66953974e-01, 3.19405786e-01, 6.69486551e-01, 5.03096313e-02, 6.95225201e-01, 5.78469859e-01, 6.29481655e-01, 1.39252534e-01, 1.22564968e-01, 6.80663678e-01, 6.34607157e-01, 6.42765834e-01, 1.57127410e-02, 2.92132086e-01, 5.24423878e-01, 4.68676824e-01, 2.86003928e-01, 7.18608322e-01, 8.95617933e-01, 5.48844309e-01, 1.74517278e-01, 5.24379196e-01, 2.13526524e-01, 5.88375435e-01, 9.88560185e-01, 4.17435771e-01, 6.14438688e-01, 9.53760881e-01, 5.27151288e-01, 7.03017278e-01, 3.44448559e-01, 4.47059676e-01, 2.83414901e-01, 1.98979011e-01, 4.24917361e-01, 5.73172761e-01, 2.32398853e-02, 1.65887230e-01, 4.05552785e-01, 9.29665524e-01, 2.26135696e-01, 9.20563384e-01, 7.65259963e-01, 4.54820075e-01, 8.97710267e-01, 3.78559302e-03, 9.15219382e-01, 3.55705698e-01, 6.94905124e-01, 8.58540202e-01, 3.89790666e-01, 2.49478206e-01, 7.93679304e-01, 4.75830027e-01, 4.40425353e-01, 3.70579459e-01, 1.40578049e-01, 1.70386675e-01, 7.04056121e-01, 4.85963102e-01, 9.68450060e-01, 6.77178001e-01, 2.65934654e-01, 2.58915007e-01, 6.70052890e-01, 2.61945109e-01, 8.46207759e-01, 1.01928951e-01, 2.85611334e-01, 2.45776933e-01, 2.66658783e-01, 3.71724077e-01, 4.34319025e-01, 4.24407347e-01, 7.15417683e-01, 8.07997684e-01, 1.64296275e-01, 6.01638065e-01, 8.60606804e-02, 2.68719187e-01, 5.11764101e-01, 9.75844338e-01, 7.81226782e-01, 2.20925515e-01, 7.18135040e-01, 9.82395577e-01, 8.39160243e-01, 9.08058083e-01, 6.88010677e-01, 8.14271847e-01, 5.12460821e-01, 1.17311345e-01, 5.96075228e-01, 9.17455497e-01, 2.12052706e-01, 7.04074603e-01, 8.72872565e-02, 8.76047818e-01, 6.96235046e-01, 8.54801557e-01, 2.49729159e-01, 9.76594604e-01, 2.87386363e-01, 2.36461559e-02, 9.94075254e-01, 4.25193986e-01, 7.61869994e-01, 5.13334255e-01, 6.44711165e-02, 8.92156689e-01, 3.55235167e-01, 1.08154647e-01, 8.78446825e-01, 2.43833016e-01, 9.23071293e-01, 2.72724115e-01, 9.46631338e-01, 3.74510294e-01, 4.08451278e-02, 9.78392777e-01, 3.65079221e-01, 6.37199516e-01, 5.51144906e-01, 5.25978080e-01, 1.42803678e-01, 4.05451674e-01, 7.79788219e-01, 6.26009784e-01, 3.35249497e-01, 1.43159543e-02, 1.80363779e-01, 5.05096904e-01, 2.82619947e-01, 5.83561392e-01, 3.10951324e-01, 8.73223968e-01, 4.38545619e-01, 4.81348800e-01, 6.68497085e-01, 3.79345401e-01, 9.58832501e-01, 1.89869550e-01, 2.34083070e-01, 2.94066207e-01, 5.74892667e-02, 6.92106828e-02, 9.61127686e-02, 6.72650672e-02, 8.47345378e-01, 2.80916761e-01, 7.32177357e-03, 9.80785961e-01, 5.73192225e-02, 8.48781331e-01, 8.83225408e-01, 7.34398275e-01, 7.70381941e-01, 6.20778343e-01, 8.96822048e-01, 5.40732486e-01, 3.69704071e-01, 5.77305837e-01, 2.08221827e-01, 7.34275341e-01, 1.06110900e-01, 3.49496706e-01, 8.34948910e-01, 1.56403291e-02, 6.78576376e-01, 8.96141268e-01, 5.94835119e-01, 1.43943153e-01, 3.49618530e-01, 2.10440392e-01, 3.46585620e-01, 1.05153093e-01, 3.45446174e-01, 2.72177079e-01, 7.07946300e-01, 4.33717726e-02, 3.31232203e-01, 3.91874320e-01, 4.76338141e-01, 6.22777789e-01, 2.95989228e-02, 4.32855769e-01, 7.61049310e-01, 3.63279149e-01, 9.47210350e-01, 6.43721247e-01, 6.58025802e-01, 1.05247633e-02, 5.29974442e-01, 7.30675767e-01, 4.30041079e-01, 6.62634841e-01, 8.25936616e-01, 9.91253704e-01, 6.79399281e-01, 5.44177006e-01, 7.52876048e-01, 3.32139049e-01, 7.98732398e-01, 7.38865223e-01, 9.16055132e-01, 6.11736493e-01, 9.63672879e-01, 1.83778839e-01, 7.27558919e-02, 5.91602822e-01, 3.25235484e-01, 2.34741217e-01, 9.52346277e-01, 9.18556407e-01, 9.35373324e-01, 6.89209070e-01, 2.56049054e-01, 6.17975395e-01, 7.82285691e-01, 9.84983432e-01, 6.62322741e-01, 2.04144457e-01, 3.98446577e-01, 1.38918297e-01, 3.05919921e-01, 3.14043787e-01, 5.91072666e-01, 7.44703771e-01, 8.92272567e-01, 9.78017873e-01, 9.01203161e-01, 1.41526372e-01, 4.14878484e-01, 6.80683651e-01, 5.01733152e-02, 8.14635389e-01, 2.27926375e-01, 9.03269815e-01, 8.68443745e-01, 9.86939190e-01, 7.40779486e-01, 2.61005311e-01, 3.19276232e-01, 9.69509248e-01, 1.11908818e-01, 4.49198556e-01, 1.27056715e-01, 3.84064823e-01, 5.14591811e-01, 2.10747488e-01, 9.53884090e-01, 8.43167950e-01, 4.51187972e-01, 3.75331782e-01, 6.23566461e-01, 3.55290379e-01, 2.95705968e-01, 1.69622690e-01, 1.42981830e-01, 2.72180991e-01, 9.46468040e-01, 3.70932500e-01, 9.94292830e-01, 4.62587505e-01, 7.14817405e-01, 2.45370540e-02, 3.00906377e-01, 5.75768304e-01, 9.71448393e-01, 6.95574827e-02, 3.93693854e-01, 5.29306116e-01, 5.04694554e-01, 6.73797120e-02, 6.76596969e-01, 5.50948898e-01, 3.24909641e-01, 7.70337719e-01, 6.51842631e-03, 3.03264879e-01, 7.61037886e-03, 2.72289601e-01, 1.50502041e-01, 6.71103888e-02, 7.41503703e-01, 1.92088941e-01, 2.19043977e-01, 9.09320161e-01, 2.37993569e-01, 6.18107973e-02, 8.31447852e-01, 2.23355609e-01, 1.84789435e-01, 4.16104518e-01, 4.21573859e-01, 8.72446305e-02, 2.97294197e-01, 4.50328256e-01, 8.72199917e-01, 2.51279916e-01, 4.86219272e-01, 7.57071329e-01, 4.85655942e-01, 1.06187277e-01, 4.92341327e-01, 1.46017513e-01, 5.25421017e-01, 4.22637906e-01, 2.24685018e-01, 8.72648431e-01, 5.54051490e-01, 1.80745062e-01, 2.12756336e-01, 5.20883169e-01, 7.60363654e-01, 8.30254678e-01, 5.00003328e-01, 4.69017439e-01, 6.38105527e-01, 3.50638261e-02, 5.22217353e-02, 9.06516882e-02, 8.52975842e-01, 1.19985883e-01, 3.74926753e-01, 6.50302066e-01, 1.98875727e-01, 6.28362507e-02, 4.32693501e-01, 3.10500685e-01, 6.20732833e-01, 4.58503272e-01, 3.20790034e-01, 7.91284868e-01, 7.93054570e-01, 2.93406765e-01, 8.95399023e-01, 1.06441034e-01, 7.53085241e-02, 8.67523104e-01, 1.47963482e-01, 1.25584706e-01, 3.81545040e-02, 6.34338619e-01, 1.76368938e-02, 5.75553531e-02, 5.31607516e-01, 2.63869588e-01, 9.41945823e-01, 9.24028838e-02, 5.21496463e-01, 7.74866558e-01, 5.65210610e-01, 7.28015327e-02, 6.51963790e-01, 8.94727453e-01, 4.49571590e-01, 1.29932405e-01, 8.64026259e-01, 9.92599934e-01, 7.43721560e-01, 8.87300215e-01, 1.06369925e-01, 8.11335531e-01, 7.87734900e-01, 9.87344678e-01, 5.32502820e-01, 4.42612382e-01, 9.64041183e-01, 1.66085871e-01, 1.12937664e-01, 5.24423470e-01, 6.54689333e-01, 4.59119726e-01, 5.22774091e-01, 3.08722276e-02, 6.26979315e-01, 4.49754105e-01, 8.07495757e-01, 2.34199499e-01, 1.67765675e-01, 9.22168418e-01, 3.73210378e-01, 8.04432575e-01, 5.61890354e-01, 4.47025593e-01, 6.43155678e-01, 2.40407640e-01, 5.91631279e-01, 1.59369206e-01, 7.75799090e-01, 8.32067212e-01, 5.59791576e-02, 6.39105224e-01, 4.85274738e-01, 2.12630838e-01, 2.81431312e-02, 7.16205363e-01, 6.83885011e-01, 5.23869697e-01, 9.99418314e-01, 8.35331599e-01, 4.69877463e-02, 6.74712562e-01, 7.99273684e-01, 2.77001890e-02, 5.75809742e-01, 2.78513031e-01, 8.36209905e-01, 7.25472379e-01, 4.87173943e-01, 7.88311357e-01, 9.64676177e-01, 1.75752651e-01, 4.98112580e-01, 8.08850418e-02, 6.40981131e-01, 4.06647450e-01, 8.46539387e-01, 2.12620694e-01, 9.11012851e-01, 8.25041445e-01, 8.90065575e-01, 9.63626055e-01, 5.96689242e-01, 1.63372670e-01, 4.51640148e-01, 3.43026542e-01, 5.80658851e-01, 2.82327625e-01, 4.75535418e-01, 6.27760926e-01, 8.46314115e-01, 9.61961932e-01, 3.19806094e-01, 5.05508062e-01, 5.28102944e-01, 6.13045057e-01, 7.44714938e-01, 1.50586073e-01, 7.91878033e-01, 4.89839179e-01, 3.10496849e-01, 8.82309038e-01, 2.86922314e-01, 4.84687559e-01, 5.20838630e-01, 4.62955493e-01, 2.38185305e-01, 5.47259907e-02, 7.10916137e-01, 7.31887202e-01, 6.25602317e-01, 8.77741168e-01, 4.19881322e-01, 4.81222328e-01, 1.28224501e-01, 2.46034010e-01, 3.34971854e-01, 7.37216484e-01, 5.62134821e-02, 7.14089724e-01, 9.85549393e-01, 4.66295827e-01, 3.08722434e-03, 4.70237690e-01, 2.66524167e-01, 7.93875484e-01, 4.54795911e-02, 8.09702944e-01, 1.47709735e-02, 1.70082405e-01, 6.35905179e-01, 3.75379109e-01, 4.30315011e-01, 3.15788760e-01, 5.58065230e-01, 2.24643800e-01, 2.42142981e-01, 6.57283636e-01, 3.34921891e-01, 1.26588975e-01, 7.68064155e-01, 9.43856291e-01, 4.47518596e-01, 5.44453573e-01, 9.95764932e-01, 7.16444391e-01, 8.51019765e-01, 1.01179183e-01, 4.45473958e-01, 4.60327322e-01, 4.96895844e-02, 4.72907738e-01, 5.58987444e-01, 3.41027487e-01, 1.56175026e-01, 7.58283148e-01, 6.83600909e-01, 2.14623396e-01, 3.27348880e-01, 3.92517893e-01, 6.70418431e-01, 5.16440832e-01, 8.63140348e-01, 5.73277464e-01, 3.46608058e-01, 7.39396341e-01, 7.20852434e-01, 2.35653246e-02, 3.89935659e-01, 7.53783745e-01, 6.34563528e-01, 8.79339335e-01, 7.41599159e-02, 5.62433904e-01, 6.15553852e-01, 4.56956324e-01, 5.20047447e-01, 5.26845015e-02, 5.58471266e-01, 1.63632233e-01, 5.38936665e-02, 6.49593683e-01, 2.56838748e-01, 8.99035326e-01, 7.20847756e-01, 5.68954684e-01, 7.43684755e-01, 5.70924238e-01, 3.82318724e-01, 4.89328290e-01, 5.62208561e-01, 4.97540804e-02, 4.18011085e-01, 6.88041565e-01, 2.16234653e-01, 7.89548214e-01, 8.46136387e-01, 8.46816189e-01, 1.73842353e-01, 6.11627842e-02, 8.44440559e-01, 4.50646654e-01, 3.74785037e-01, 4.87196697e-01, 4.56276448e-01, 9.13284391e-01, 4.15715464e-01, 7.13597697e-01, 1.23641270e-02, 5.10031271e-01, 4.74601930e-02, 2.55731159e-01, 3.22090006e-01, 1.91165703e-01, 4.51170940e-01, 7.50843157e-01, 4.42420576e-01, 4.25380660e-01, 4.50667257e-01, 6.55689206e-01, 9.68257670e-02, 1.96528793e-01, 8.97343028e-01, 4.99940904e-01, 6.65504083e-01, 9.41828079e-01, 4.54397338e-01, 5.61893331e-01, 5.09839880e-01, 4.53117514e-01, 8.96804127e-02, 1.74888861e-01, 6.65641378e-01, 2.81668336e-01, 1.89532742e-01, 5.61668382e-01, 8.68330157e-02, 8.25092797e-01, 5.18106324e-01, 1.71904024e-01, 3.68385523e-01, 1.62005436e-01, 7.48507399e-01, 9.30274827e-01, 2.38198517e-01, 9.52222901e-01, 5.23587800e-01, 6.94384557e-01, 1.09338652e-01, 4.83356794e-01, 2.73050402e-01, 3.68027050e-01, 5.92366466e-01, 1.83192289e-01, 8.60376029e-01, 7.13926203e-01, 8.16750052e-01, 1.57890291e-01, 6.25691951e-01, 5.24831646e-01, 1.73873797e-01, 1.02429784e-01, 9.17488471e-01, 4.03584434e-01, 9.31170884e-01, 2.79386137e-01, 8.77745206e-01, 2.45200576e-01, 1.28896951e-01, 3.15713052e-01, 5.27874291e-01, 2.16444335e-01, 7.03883817e-01, 7.74738919e-02, 8.42422142e-01, 3.75598924e-01, 3.51002411e-01, 6.22752776e-01, 4.82407943e-01, 7.43107867e-01, 9.46182666e-01, 9.44344819e-01, 3.28124763e-01, 1.06147431e-01, 1.65102684e-01, 3.84060507e-01, 2.91057722e-01, 7.68173662e-02, 1.03543651e-01, 6.76698940e-01, 1.43141994e-01, 7.21342202e-01, 6.69471294e-03, 9.07298311e-01, 5.57080171e-01, 8.10954489e-01, 4.11120526e-01, 2.06407453e-01, 2.59590556e-01, 7.58512718e-01, 5.79873897e-01, 2.92875650e-01, 2.83686529e-01, 2.42829343e-01, 9.19323719e-01, 3.46832864e-01, 3.58238858e-01, 7.42827585e-01, 2.05760059e-01, 9.58438860e-01, 5.66326411e-01, 6.60292846e-01, 5.61095078e-02, 6.79465531e-01, 7.05118513e-01, 4.44713264e-01, 2.09732933e-01, 5.22732436e-01, 1.74396512e-01, 5.29356748e-01, 4.38475687e-01, 4.94036404e-01, 4.09785794e-01, 6.40025507e-01, 5.79371821e-01, 1.57726118e-01, 6.04572263e-01, 5.41072639e-01, 5.18847173e-01, 1.97093284e-01, 8.91767002e-01, 4.29050835e-01, 8.25490570e-01, 3.87699807e-01, 4.50705808e-01, 2.49371643e-01, 3.36074898e-01, 9.29925118e-01, 6.65393649e-01, 9.07275994e-01, 3.73075859e-01, 4.14044139e-03, 2.37463702e-01, 2.25893784e-01, 2.46900245e-01, 4.50350196e-01, 3.48618117e-01, 5.07193932e-01, 5.23435142e-01, 8.13611417e-01, 8.92715622e-01, 1.02623450e-01, 3.06088345e-01, 7.80461650e-01, 2.21453645e-01, 2.01419652e-01, 2.84254457e-01, 3.68286735e-01, 7.39358243e-01, 8.97879394e-01, 9.81599566e-01, 7.56526442e-01, 7.37645545e-01, 4.23976657e-02, 8.25922012e-01, 2.60956996e-01, 2.90702065e-01, 8.98388344e-01, 3.03733299e-01, 8.49071471e-01, 3.45835425e-01, 7.65458276e-01, 5.68094872e-01, 8.93770930e-01, 9.93161641e-01, 5.63368667e-02, 4.26548945e-01, 5.46745780e-01, 5.75674571e-01, 7.94599487e-01, 7.18935553e-02, 4.46492976e-01, 6.40240123e-01, 2.73246969e-01, 2.00465968e-01, 1.30718835e-01, 1.92492005e-01, 1.96617189e-01, 6.61271644e-01, 8.12687657e-01, 8.66342445e-01 }, -4}}; typedef ConnectComponentsTest<int, float> ConnectComponentsTestF_Int; TEST_P(ConnectComponentsTestF_Int, Result) { /** * Verify the src & dst vertices on each edge have different colors */ EXPECT_TRUE(final_edges == params.n_row - 1); } INSTANTIATE_TEST_CASE_P(ConnectComponentsTest, ConnectComponentsTestF_Int, ::testing::ValuesIn(fix_conn_inputsf2)); template <typename value_idx, typename value_t> struct MutualReachabilityFixConnectivitiesRedOp { value_t* core_dists; value_idx m; DI MutualReachabilityFixConnectivitiesRedOp() : m(0) {} MutualReachabilityFixConnectivitiesRedOp(value_t* core_dists_, value_idx m_) : core_dists(core_dists_), m(m_){}; typedef typename raft::KeyValuePair<value_idx, value_t> KVP; DI void operator()(value_idx rit, KVP* out, const KVP& other) const { if (rit < m && other.value < std::numeric_limits<value_t>::max()) { value_t core_dist_rit = core_dists[rit]; value_t core_dist_other = max(core_dist_rit, max(core_dists[other.key], other.value)); value_t core_dist_out; if (out->key > -1) { core_dist_out = max(core_dist_rit, max(core_dists[out->key], out->value)); } else { core_dist_out = out->value; } bool smaller = core_dist_other < core_dist_out; out->key = smaller ? other.key : out->key; out->value = smaller ? core_dist_other : core_dist_out; } } DI KVP operator()(value_idx rit, const KVP& a, const KVP& b) const { if (rit < m && a.key > -1) { value_t core_dist_rit = core_dists[rit]; value_t core_dist_a = max(core_dist_rit, max(core_dists[a.key], a.value)); value_t core_dist_b; if (b.key > -1) { core_dist_b = max(core_dist_rit, max(core_dists[b.key], b.value)); } else { core_dist_b = b.value; } return core_dist_a < core_dist_b ? KVP(a.key, core_dist_a) : KVP(b.key, core_dist_b); } return b; } DI void init(value_t* out, value_t maxVal) const { *out = maxVal; } DI void init(KVP* out, value_t maxVal) const { out->key = -1; out->value = maxVal; } DI void init_key(value_t& out, value_idx idx) const { return; } DI void init_key(KVP& out, value_idx idx) const { out.key = idx; } DI value_t get_value(KVP& out) const { return out.value; } DI value_t get_value(value_t& out) const { return out; } void gather(const raft::resources& handle, value_idx* map) { auto tmp_core_dists = raft::make_device_vector<value_t>(handle, m); thrust::gather(raft::resource::get_thrust_policy(handle), map, map + m, core_dists, tmp_core_dists.data_handle()); raft::copy_async( core_dists, tmp_core_dists.data_handle(), m, raft::resource::get_cuda_stream(handle)); } void scatter(const raft::resources& handle, value_idx* map) { auto tmp_core_dists = raft::make_device_vector<value_t>(handle, m); thrust::scatter(raft::resource::get_thrust_policy(handle), core_dists, core_dists + m, map, tmp_core_dists.data_handle()); raft::copy_async( core_dists, tmp_core_dists.data_handle(), m, raft::resource::get_cuda_stream(handle)); } }; template <typename value_t, typename value_idx> struct ConnectComponentsMutualReachabilityInputs { value_idx n_row; value_idx n_col; std::vector<value_t> data; std::vector<value_t> core_dists; std::vector<value_idx> colors; std::vector<value_idx> expected_rows; std::vector<value_idx> expected_cols; std::vector<value_t> expected_vals; }; template <typename value_idx, typename value_t> class ConnectComponentsEdgesTest : public ::testing::TestWithParam<ConnectComponentsMutualReachabilityInputs<value_t, value_idx>> { protected: void basicTest() { raft::resources handle; auto stream = resource::get_cuda_stream(handle); params = ::testing::TestWithParam< ConnectComponentsMutualReachabilityInputs<value_t, value_idx>>::GetParam(); raft::sparse::COO<value_t, value_idx> out_edges_unbatched(resource::get_cuda_stream(handle)); raft::sparse::COO<value_t, value_idx> out_edges_batched(resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> data(params.n_row * params.n_col, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> core_dists(params.n_row, resource::get_cuda_stream(handle)); rmm::device_uvector<value_idx> colors(params.n_row, resource::get_cuda_stream(handle)); raft::copy(data.data(), params.data.data(), data.size(), resource::get_cuda_stream(handle)); raft::copy(core_dists.data(), params.core_dists.data(), core_dists.size(), resource::get_cuda_stream(handle)); raft::copy( colors.data(), params.colors.data(), colors.size(), resource::get_cuda_stream(handle)); /** * 3. cross_component_nn to fix connectivities */ MutualReachabilityFixConnectivitiesRedOp<value_idx, value_t> red_op(core_dists.data(), params.n_row); raft::linkage::cross_component_nn<value_idx, value_t>(handle, out_edges_unbatched, data.data(), colors.data(), params.n_row, params.n_col, red_op, params.n_row, params.n_col); raft::linkage::cross_component_nn<value_idx, value_t>(handle, out_edges_batched, data.data(), colors.data(), params.n_row, params.n_col, red_op, 11, 1); ASSERT_TRUE(out_edges_unbatched.nnz == out_edges_batched.nnz && out_edges_unbatched.nnz == params.expected_rows.size()); ASSERT_TRUE(devArrMatch(out_edges_unbatched.rows(), params.expected_rows.data(), out_edges_unbatched.nnz, Compare<int>())); ASSERT_TRUE(devArrMatch(out_edges_unbatched.cols(), params.expected_cols.data(), out_edges_unbatched.nnz, Compare<int>())); ASSERT_TRUE(devArrMatch(out_edges_unbatched.vals(), params.expected_vals.data(), out_edges_unbatched.nnz, CompareApprox<float>(1e-4))); ASSERT_TRUE(devArrMatch(out_edges_batched.rows(), params.expected_rows.data(), out_edges_batched.nnz, Compare<int>())); ASSERT_TRUE(devArrMatch(out_edges_batched.cols(), params.expected_cols.data(), out_edges_batched.nnz, Compare<int>())); ASSERT_TRUE(devArrMatch(out_edges_batched.vals(), params.expected_vals.data(), out_edges_batched.nnz, CompareApprox<float>(1e-4))); } void SetUp() override { basicTest(); } void TearDown() override {} protected: ConnectComponentsMutualReachabilityInputs<value_t, value_idx> params; }; const std::vector<ConnectComponentsMutualReachabilityInputs<float, int>> mr_fix_conn_inputsf2 = { {100, 2, {-7.72642, -8.39496, 5.4534, 0.742305, -2.97867, 9.55685, 6.04267, 0.571319, -6.52184, -6.31932, 3.64934, 1.40687, -2.17793, 9.98983, 4.42021, 2.33028, 4.73696, 2.94181, -3.66019, 9.38998, -3.05358, 9.12521, -6.65217, -5.57297, -6.35769, -6.58313, -3.61553, 7.81808, -1.77073, 9.18565, -7.95052, -6.39764, -6.60294, -6.05293, -2.58121, 10.0178, -7.76348, -6.72638, -6.40639, -6.95294, -2.97262, 8.54856, -6.95673, -6.53896, -7.32614, -6.02371, -2.1478, 10.5523, -2.54502, 10.5789, -2.96984, 10.0714, 3.22451, 1.55252, -6.25396, -7.73727, -7.85431, -6.09303, -8.11658, -8.20057, -7.55965, -6.64786, 4.936, 2.23423, 4.44752, 2.27472, -5.72103, -7.70079, -0.929985, 9.78172, -3.10984, 8.72259, -2.44167, 7.58954, -2.18511, 8.6292, 5.55528, 2.30192, 4.73164, -0.0143992, -8.2573, -7.81793, -2.98837, 8.82863, 4.60517, 0.804492, -3.83738, 9.21115, -2.62485, 8.71318, 3.57758, 2.44676, -8.48711, -6.69548, -6.70645, -6.49479, -6.86663, -5.42658, 3.83139, 1.47141, 2.02013, 2.79507, 4.64499, 1.73858, -1.69667, 10.3705, -6.61974, -6.09829, -6.05757, -4.98332, -7.10309, -6.16611, -3.52203, 9.32853, -2.26724, 7.10101, 6.11777, 1.4549, -4.23412, 8.452, -6.58655, -7.59446, 3.93783, 1.64551, -7.12502, -7.63385, 2.72111, 1.94666, -7.14428, -4.15994, -6.66553, -8.12585, 4.70011, 4.43641, -7.76914, -7.69592, 4.11012, 2.48644, 4.89743, 1.89872, 4.29716, 1.17089, -6.62913, -6.53366, -8.07093, -6.22356, -2.16558, 7.25125, 4.73953, 1.46969, -5.91625, -6.46733, 5.43091, 1.06378, -6.82142, -8.02308, 6.52606, 2.14775, 3.08922, 2.04173, -2.14756, 8.36917, 3.85663, 1.65111, -1.68665, 7.79344, -5.01385, -6.40628, -2.52269, 7.95658, -2.30033, 7.05462, -1.04355, 8.78851, 3.72045, 3.5231, -3.98772, 8.29444, 4.24777, 0.509655, 4.72693, 1.67416, 5.7827, 2.7251, -3.41722, 7.60198, 5.22674, 4.16363, -3.1109, 10.8666, -3.18612, 9.62596, -1.4782, 9.94557, 4.47859, 2.37722, -5.79658, -5.82631, -3.34842, 8.70507}, {0.978428, 1.01917, 0.608673, 1.45629, 0.310713, 0.689461, 0.701126, 0.63296, 0.774788, 0.701648, 0.513282, 0.757651, 0.45638, 0.973111, 0.901396, 0.613692, 0.482497, 0.688143, 0.72428, 0.666345, 0.58232, 0.554756, 0.710315, 0.903611, 0.694115, 0.796099, 0.639759, 0.798998, 0.639839, 1.30727, 0.663729, 0.57476, 0.571348, 1.14662, 1.26518, 0.485068, 0.78207, 0.791621, 1.01678, 1.28509, 1.14715, 0.381395, 0.850507, 0.788511, 0.588341, 0.878516, 0.928669, 0.405874, 0.776421, 0.612274, 1.84963, 0.57476, 0.95226, 0.488078, 1.24868, 0.515136, 0.589378, 0.903632, 1.01678, 1.09964, 0.666345, 0.713265, 0.877168, 1.10053, 1.96887, 1.03574, 2.03728, 0.969553, 0.774788, 0.586338, 0.65168, 0.435472, 0.664396, 0.790584, 0.678637, 0.715964, 0.865494, 0.978428, 1.59242, 0.861109, 0.833259, 0.65168, 0.903632, 1.49599, 0.76347, 0.960453, 1.1848, 1.37398, 0.928957, 1.07848, 0.661798, 1.21104, 1.04579, 1.89047, 1.24288, 0.529553, 0.903611, 0.620897, 0.882467, 0.647189}, {0, 1, 2, 1, 0, 1, 2, 1, 1, 2, 2, 0, 0, 2, 2, 0, 0, 2, 0, 0, 2, 0, 0, 2, 2, 2, 1, 0, 0, 0, 0, 1, 1, 0, 2, 2, 2, 2, 1, 1, 0, 2, 1, 2, 2, 1, 0, 0, 0, 1, 1, 1, 2, 0, 0, 0, 2, 2, 1, 2, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 2, 1, 0, 1, 0, 1, 1, 2, 1, 2, 0, 2, 2, 2, 1, 2, 1, 1, 1, 2, 1, 2, 2, 2, 1, 0, 2}, {50, 54, 57, 63, 82, 87}, {57, 63, 50, 54, 87, 82}, {6.0764, 11.1843, 6.0764, 11.1843, 6.89004, 6.89004}}, {1000, 2, {-6.59634, -7.13901, -6.13753, -6.58082, 5.19821, 2.04918, -2.96856, 8.16444, -2.76879, 7.51114, -6.82261, -6.61152, 5.02008, 2.58376, 5.55621, 2.31966, 4.86379, 3.33731, 5.84639, 1.15623, -2.17159, 8.60241, -4.97844, -6.94077, -2.31014, 8.41407, 5.5582, 0.402669, 5.25265, 0.919754, 5.85298, 2.11489, -3.29245, 8.69222, -1.9621, 8.81209, -1.53408, 8.86723, -2.18227, 8.79519, 4.60519, 2.20738, -6.4759, -6.9043, -7.18766, -6.10045, -9.00148, -7.48793, 4.01674, 1.41769, -2.45347, 10.1085, -3.20892, 9.22827, -3.18612, 9.62596, 4.81977, 3.36517, 4.90693, 2.8628, -6.44269, -5.68946, -8.30144, -5.37878, 4.61485, 2.79094, -1.98726, 9.31127, -3.66019, 9.38998, -6.58607, -8.23669, -7.46015, -6.29153, 4.08468, 3.85433, -6.36842, -5.50645, -6.83602, -5.18506, -0.627173, 10.3597, 3.98846, 1.48928, -2.9968, 8.58173, -7.2144, -7.28376, -0.660242, 10.1409, -4.23528, -8.38308, -3.15984, 8.52716, -2.40987, 9.76567, -8.7548, -6.76508, 4.56971, 0.312209, -7.5487, -5.8402, -1.6096, 9.32159, 5.04813, 0.270586, -7.6525, -6.47306, -1.79758, 7.88964, -9.0153, -3.74236, -3.5715, 9.48788, -1.65154, 8.85435, -3.47412, 9.70034, 6.31245, 2.39219, 4.03851, 2.29295, -3.17098, 9.86672, -6.90693, -7.81338, -6.22373, -6.68537, -3.22204, 9.12072, -0.365254, 9.6482, -7.76712, -7.31757, 4.15669, 3.54716, 4.1937, 0.083629, -3.03896, 9.52755, -6.29293, -7.35501, -2.95926, 9.63714, 4.02709, 1.58547, 4.56828, 1.93595, 5.6242, 1.75918, -7.36237, -7.83344, 5.32177, 3.81988, -2.43183, 8.153, -1.97939, 10.4559, -3.49492, 9.51833, 3.39602, 1.28026, -2.42215, 8.71528, -3.57682, 8.87191, -2.77385, 11.7345, 5.71351, 0.946654, -6.50253, -6.90937, 4.08239, 0.603367, -5.64134, -6.85884, -2.76177, 7.7665, -2.25165, 8.93984, -3.49071, 9.47639, -1.06792, 7.57842, 5.15754, 1.24743, 3.63574, 1.20537, -6.07969, -8.49642, 4.12227, 2.19696, -7.17144, -8.4433, -1.92234, 11.2047, 3.23237, 1.19535, 3.85389, 0.641937, 4.82665, 1.21779, -7.68923, -6.45605, -7.00816, -8.76196, -5.12894, 9.83619, -5.66247, -5.35879, 3.05598, 2.73358, 6.06038, 1.40242, -1.69568, 7.78342, 5.13391, 2.23384, -2.96984, 10.0714, -5.36618, -6.2493, 5.55896, 1.6829, 3.55882, 2.58911, 5.36155, 0.844118, -0.0634456, 9.14351, 4.88368, 1.40909, -7.04675, -6.59753, -7.78333, -6.55575, 5.39881, 2.25436, -2.85189, 8.64285, -2.22821, 8.39159, 3.88591, 1.69249, -7.55481, -7.02463, 4.60032, 2.65467, -6.90615, -7.76198, -6.76005, -7.85318, 4.15044, 3.01733, -7.18884, -7.63227, 4.68874, 2.01376, 3.51716, 2.35558, -3.81367, 9.68396, 4.42644, 3.4639, 4.81758, 0.637825, -6.20705, -4.98023, -1.68603, 9.0876, -4.99504, -5.33687, -1.77073, 9.18565, 4.86433, 3.02027, 4.20538, 1.664, 4.59042, 2.64799, -3.09856, 9.86389, -3.02306, 7.95507, -6.32402, -6.79053, -7.67205, -7.18807, -8.10918, -6.38341, -1.67979, 6.80315, 4.00249, 3.16219, -2.54391, 7.84561, -3.22764, 8.80084, -2.63712, 8.05875, -2.41744, 7.02672, -6.71117, -5.56251, 5.18348, 1.60256, -7.40824, -6.29375, -4.22233, 10.3682, 4.8509, 1.87646, -2.99456, 9.09616, 5.1332, 2.15801, -2.27358, 9.78515, -6.73874, -8.64855, 4.96124, 2.39509, -3.70949, 8.67978, -4.13674, 9.06237, 2.80367, 2.48116, -0.876786, 7.58414, -3.7005, 9.67084, 6.48652, 0.903085, 6.28189, 2.98299, -6.07922, -6.12582, -5.67921, -7.537, 4.55014, 3.41329, -1.63688, 9.19763, -4.02439, 10.3812, 5.23053, 3.08187, -2.2951, 7.76855, -6.24491, -5.77041, 6.02415, 2.53708, -6.91286, -7.08823, 4.83193, 1.66405, -7.07454, -5.74634, -2.09576, 10.8911, 3.29543, 1.05452, -3.49973, 8.44799, 5.2922, 0.396778, -2.54502, 10.5789, -6.38865, -6.14523, -1.75221, 8.09212, -9.30387, -5.99606, -2.98113, 10.1032, -6.2017, -7.36802, 4.63628, 0.814805, -1.81905, 8.61307, 4.88926, 3.55062, 3.08325, 2.57918, -2.51717, 10.4942, -5.75358, -6.9315, 6.36742, 2.40949, 5.74806, 0.933264, 4.74408, 1.91058, -7.41496, -6.97064, -2.98414, 8.36096, 6.72825, 1.83358, -2.95349, 9.39159, -3.35599, 7.49944, 6.18738, 3.76905, -3.17182, 9.58488, 5.17863, 1.0525, -3.0397, 8.43847, -2.23874, 8.96405, 3.04689, 2.41364, 6.14064, 2.82339, -6.33334, -6.87369, -7.92444, -8.84647, 3.65129, 0.86958, 5.29842, 3.98337, -2.06538, 9.78892, -6.89494, -6.30082, -2.52144, 8.11703, -8.11398, -7.47257, 5.3381, 2.36666, -6.93452, -6.59456, -7.50634, -6.01772, 6.23438, 1.12621, -2.15218, 8.32138, -7.04777, -7.3522, -2.52771, 8.72563, -2.77907, 8.03552, 4.29123, 1.62391, -8.07551, -6.43551, -3.28202, 8.77747, -2.21308, 9.27534, -8.25153, -8.49367, -3.54644, 8.82395, -8.05867, -5.69243, 4.46681, 1.98875, 3.8362, 3.61229, -6.96231, -7.00186, 5.18993, 1.00483, -5.35116, -6.37227, 5.23298, 1.66362, -5.68306, -7.03864, -9.03144, -7.59926, -6.10127, -7.4313, 4.83572, 0.994797, -7.32695, -5.59909, 0.569683, 10.1339, 3.35957, 2.84563, -2.4122, 9.60944, 5.00855, 1.57983, -2.57528, 7.80327, 3.96349, 3.77411, 4.59429, 2.21651, -6.54765, -6.68961, 4.76798, 1.29212, -1.67351, 7.88458, 5.63615, 1.47941, -2.5301, 9.13161, 4.26075, 1.76959, 4.67788, 2.0932, 4.39955, 1.59835, 3.91274, 1.72565, -4.1786, 9.55765, -7.34566, -8.47481, 4.8364, 2.68217, -7.36848, -7.99973, -5.84708, -5.7534, 5.37252, 1.89245, -2.1707, 8.599, -1.3299, 9.0818, -6.79122, -5.40258, 5.56391, 1.78827, -0.194539, 7.14702, 4.60489, 3.74397, 5.50995, 2.46885, -3.98772, 8.29444, -5.21837, -7.33721, -1.63959, 10.3699, -5.92932, -5.1695, -5.88358, -7.6369, 4.11716, 3.02218, -6.54114, -7.17551, 3.97179, 2.96521, -6.75325, -4.94118, 5.26169, 0.402945, 3.25031, 0.327771, -0.44845, 10.7696, -2.15141, 9.57507, 7.04329, 1.91555, -3.74615, 7.69383, -7.52318, -5.85015, -6.80419, -8.48208, -4.57664, 8.92517, 4.57574, 2.30193, 4.84098, 3.02382, -9.43355, -5.94579, -3.52203, 9.32853, 3.43018, 2.5731, -6.15725, -7.25294, -6.69861, -8.17694, -2.40955, 8.51081, -4.82342, -7.98332, -7.10611, -6.51274, 5.86755, 0.763529, -6.56045, -5.53966, -3.61553, 7.81808, 4.3825, 0.304586, -6.52818, -5.80996, 4.59972, 0.542395, -6.90603, -6.59995, -6.3585, -6.23489, -6.01915, -7.46319, -5.38694, -7.15123, -7.83475, -6.45651, 5.89564, 1.07856, -5.15266, -7.27975, -6.97978, -7.08378, 5.83493, 0.449983, -2.62374, 10.2521, -7.34494, -6.98606, -6.79719, -8.33766, 3.54757, 1.65676, -8.40528, -5.61753, -5.85556, -6.28758, 4.66862, 3.25162, -6.26047, -4.82261, 4.61552, 4.11544, -1.36637, 9.76622, 4.2517, 2.14359, -2.45099, 7.87132, -0.376164, 7.0622, 4.34493, 3.22091, 6.95921, 2.36649, -6.70319, -7.24714, -5.56932, -5.48443, -7.43149, -4.32191, -3.23956, 9.23074, -5.77255, -7.00049, 4.96601, 0.722056, -7.88617, -5.74023, 4.18757, -0.45071, -7.12569, -7.72336, 5.27366, 2.38697, 3.93487, 1.9174, 3.19186, -0.225636, -3.41722, 7.60198, -3.08286, 8.46743, -5.87905, -7.55073, -5.26425, -7.20243, -2.97867, 9.55685, -1.23153, 8.42272, -2.33602, 9.3996, -3.33819, 8.45411, -3.58009, 9.49676, 3.78152, 2.67348, -1.54582, 9.42707, -4.04331, 10.292, 3.3452, 3.134, -2.75494, 8.74156, -3.26555, 7.59203, -7.27139, -7.80252, 3.5293, 3.72544, 6.11642, 3.35326, 4.01611, 3.8872, 4.89591, 2.95586, -7.06677, -5.89438, 4.19438, 3.42655, -6.11355, -5.65318, -7.59645, -8.74665, -5.80362, -6.8588, 3.80453, 4.11832, 5.70655, 3.14247, -4.98084, 8.21739, -1.87642, 11.285, 4.39864, 2.32523, -3.48388, 9.80137, 4.02836, 0.566509, -2.41212, 9.98293, -5.40846, -7.08943, 4.01506, 1.99926, -3.43613, 8.95476, -7.24458, -7.71932, 6.02204, 2.62188, -6.29999, -6.55431, 6.19038, 0.974816, 3.55882, 3.02632, -7.06011, -3.687, -1.55877, 8.43738, -5.14711, -4.64881, 4.7167, 0.690177, -7.90381, -5.02602, 4.17218, 2.31967, -0.643423, 9.48812, -7.95237, -6.64086, -4.05986, 9.08285, -6.24158, -6.37927, -6.6105, -7.2233, -6.21675, -5.70664, -3.29967, 9.48575, 3.41775, 2.68617, -2.24948, 8.10997, -2.24931, 9.79611, -9.0523, -6.03269, -2.2587, 9.36073, 5.20965, 2.42088, -3.10159, 8.1503, -6.67906, -5.73147, 4.0687, 2.54575, -1.24229, 8.30662, -2.09627, 8.45056, -7.87801, -6.57832, 4.72216, 3.03865, -0.929985, 9.78172, -8.56307, -7.68598, -7.05257, -5.1684, -7.09076, -7.86729, 4.61432, 3.1459, -6.34133, -5.8076, -3.82943, 10.8457, -8.46082, -5.98507, 5.34763, 1.4107, -1.68714, 10.9111, -1.67886, 8.1582, -0.623012, 9.18886, -4.21258, 8.95874, -2.16744, 10.8905, -6.57158, -7.27176, 2.14047, 4.26411, -8.44217, -7.40916, 5.29008, 1.87399, 4.31824, 4.04992, -3.77008, 9.93215, -2.72688, 10.1131, -6.14278, -7.16144, -3.92457, 8.59364, -5.92649, -6.59299, 4.68369, 1.82617, -6.89905, -7.18329, 3.95173, 4.22561, -7.66453, -6.23183, -2.44167, 7.58954, -6.36603, -7.41281, -6.45081, -6.187, -6.6125, -6.37138, 5.46036, 2.48044, -2.14756, 8.36917, -2.3889, 9.52872, 3.80752, 2.44459, -3.98778, 10.158, -6.63887, -4.27843, -8.65266, -5.61819, -7.97003, -5.46918, -5.9604, -7.54825, -0.916011, 8.50307, -3.69246, 6.97505, -7.98533, -7.09503, -2.30033, 7.05462, 4.76218, 2.51647, -7.04981, -7.33334, 3.66401, 3.02681, -2.50408, 8.7797, 7.19996, 1.87711, 4.01291, 3.78562, -0.356015, 8.24694, -0.958046, 9.12996, 4.60675, 3.76773, 6.21945, 1.45031, 4.27744, 0.8535, -4.72232, -7.48582, 6.03923, 2.8978, -3.26833, 9.16468, -7.97059, -7.29092, -2.3998, 9.74005, -2.66721, 8.58741, -7.36269, -6.73332, -7.87893, -7.38488, 4.65023, 0.661333, -4.8171, -7.94764, -4.11564, 9.21775, 4.80633, 2.46562, -2.72887, 9.3714, -5.26735, -5.5652, 4.9826, 2.42992, -6.17018, -7.3156, 4.38084, 1.77682, 5.35084, 2.41743, -2.61796, 9.416, 5.27229, 2.94572, -7.52315, -5.95227, -1.45077, 7.25555, -3.79916, 7.71921, -2.23251, 9.84147, 3.70054, 1.82908, -1.93831, 10.1499, -6.18324, -5.9248, -3.33142, 9.25797, -6.08536, -8.1344, 5.95727, 2.17077, 4.87366, 0.417274, -6.529, -6.39092, -9.24256, -7.88984, -6.36652, -7.13966, -3.90777, 9.57726, -7.06252, -5.50523, -2.26423, 8.50734, -2.84498, 10.6833, 5.0391, 2.62037, -2.74815, 8.10672, 3.35945, 3.72796, -4.11668, 9.19892, 5.66903, 2.44577, -1.63807, 8.68826, -7.42587, -6.48831, 6.17063, 3.19193, -2.28511, 9.02688, -7.10088, -7.15692, 4.46293, 1.17487, -5.91017, -6.45292, -2.26724, 7.10101, -2.43339, 8.33712, -4.63309, 8.48853, -3.31769, 8.51253, -2.49078, 10.6907, -1.30798, 8.60621, 6.30535, 2.98754, -5.79384, -6.78213, -1.93213, 8.81124, 4.55773, 3.09047, 6.37584, 2.17108, 4.3927, 1.29119, -3.2245, 9.69388, -1.69634, 9.64392, 2.799, 0.693593, -2.1426, 8.07441, -8.4505, -8.00688, 4.736, 1.51089, -2.5863, 9.35544, -2.94924, 9.14503, 6.2054, 1.90742, 5.67172, 0.487609, -5.69071, -6.17181, -8.24651, -7.10488, -7.34424, -6.67895, -6.71977, -7.90778, -1.82294, 7.40157, -9.40991, -7.16611, -4.37999, 8.66277, -1.42615, 10.0681, -2.00828, 8.03673, -7.50228, -6.6855, -5.65859, -6.29801, -8.02335, -6.77155, -3.40761, 9.50621, -2.82447, 9.77326, -1.5938, 9.34304, -3.5213, 7.35943, -3.36961, 8.62973, -7.01708, -5.92724, 5.20886, 3.60157, -1.71817, 8.1049, -2.46363, 8.36269, -2.77809, 7.90776, -2.75459, 8.26055, -2.03596, 8.94146, -4.53434, 9.20074, -7.44387, -6.69556, -6.90099, -7.62732, 3.29169, 2.71643, 6.08686, 2.16972, -2.31111, 8.86993, -5.75046, 7.9899, 4.69951, 1.32623, 4.71851, -0.025031, -6.42374, -4.71511, -8.04974, -8.68209, -3.16103, 9.06168, -6.18267, -7.21393, -7.94202, -6.4518, -7.07697, -7.03138, 3.93554, 0.564708, -1.20372, 9.03529, -7.10611, -7.83955, -7.47529, -5.50567, -6.15453, -6.36393, -2.98024, 9.24634, -7.75761, -7.70699, -3.08597, 9.76968, -8.04954, -9.75237, 5.2534, 0.950377, 5.63789, -0.923086, -5.7065, -6.51047, -8.02132, -7.07377, -8.28594, -6.96322, -7.70722, -6.79397, -2.4962, 10.4678, 5.02846, 4.46617, 4.02648, 1.6707, -0.319395, 8.20599, 4.74525, 0.639144, -1.0313, 8.49602, 4.08766, 2.6061, 3.63826, 1.69207, 2.55795, 3.66963, 5.2826, 3.30232, -1.04355, 8.78851, -6.84762, -7.63353, -4.70868, -7.056, 3.53651, -0.179721, -3.38482, 7.63149, -5.9265, -6.36702, -0.986074, 9.5532, -2.42261, 8.85861, -7.42835, -6.78726, -4.02857, 8.53005, -8.22675, -7.85172, -5.57529, -8.5426, 6.03009, 2.53098, -7.10448, -7.53011, -3.4988, 8.8885, -2.62485, 8.71318, -6.39489, -7.72647, 3.93789, 1.31027, 4.27627, 1.91622, -0.923181, 7.77647, -5.16017, 10.1058, -6.44307, -5.97617, -7.24495, -6.69543, 6.27331, 0.826824, -6.55655, -7.13246, 5.66245, 4.41292, -2.13805, 8.4103, 5.23463, 2.82659, -4.86624, -6.74357, -6.14082, -6.26474, -2.67048, 9.41834, -1.26311, 6.9409, -7.20231, -7.13094, -1.35109, 9.80595, 3.9906, 0.749229, -6.75696, -5.25543, 4.84826, -0.0685652, -7.4914, -6.91715, 4.46725, 2.85683, -2.95571, 9.87068, 6.32381, 1.51429, -6.81177, -6.02734, -2.57188, 9.96943, -4.28792, 10.5103, 3.65025, 2.91394, -7.11856, -7.24693, -6.98693, -6.43239, 4.7651, 1.54376, 4.00092, 0.65008, -7.14816, -7.7713, -7.58803, -8.39382, 4.3321, 2.19232, -7.89545, -6.81843, -2.11475, 8.5933, -0.743743, 9.41927, 3.64849, -0.18022, -1.68665, 7.79344, 4.00214, 1.44217, -6.96799, -7.25012, -1.58302, 10.9237, -6.68524, -7.23328, 4.65831, 2.32075, 4.62024, 2.52566, -4.23412, 8.452, -0.822056, 9.89593, -7.19868, -7.67614, -3.32742, 11.1067, 5.27861, 0.830165, 4.48982, 2.09875, -6.58087, -7.6319, -0.880582, 7.63418, -7.01088, -6.80326, -7.31601, -6.98972, -6.85883, -7.60811, 6.14328, 2.85053, -7.49206, -6.51861, -2.28174, 10.3214, 4.81074, 1.78919, -5.58987, -6.20693, 4.08096, 2.35038, -1.5029, 8.43739, 4.11536, 2.46254, -3.28299, 7.76963, 4.31953, 2.39734, 4.91146, 0.696421, -1.4782, 9.94557, -3.34842, 8.70507, -6.97822, -6.86126, 4.10012, 1.19486, -2.50395, 9.06127, 4.41891, 2.00006, -2.73266, 9.72829, 3.5436, 0.533119, 5.78864, 0.233456, -6.62589, -6.41242, -2.21942, 11.0897, -6.76636, -8.31839, -2.71732, 8.52129, -5.20972, -6.48544, 3.26056, 1.24224, 3.45228, 2.28299, 4.72171, 1.87428, -7.52585, -5.1048, 5.0695, 2.18086, -6.55646, -7.02771, 3.23727, 3.72275, 3.41411, 0.508795, -7.80698, -6.64174, -5.90443, -6.37902, -0.387041, 10.0468, -1.3506, 8.1936, -6.08614, -8.62864, -5.91478, -5.26453, -2.61623, 7.97904, 4.45459, 1.84335, -6.66643, -7.63208, 3.6729, 1.92546, -1.32976, 8.54511, 6.31758, 1.41958, 4.63381, 2.81166, -7.01394, -6.0693, -2.7786, 9.73183, -2.90131, 7.55077, -7.13842, -5.28146, 6.71514, 1.28398, -6.98408, -7.04893, -3.03946, 8.22141, -2.76417, 10.5183, -7.35347, -6.89456, 4.19345, 2.16726, -2.02819, 9.23817, 4.97076, 2.8067, -0.544473, 9.04955, 4.90727, 2.29487, -6.31871, -7.17559, 3.71665, 0.621485, 4.7903, 2.33813, -6.47994, -7.53147, -6.80958, -5.71823, -8.07326, -5.96096, 4.77342, 1.8207, 5.71856, 1.93466, -2.70156, 9.31583, -2.1478, 10.5523, 4.78855, 1.63608, 5.53507, 2.60834, -7.00058, -6.46058, 5.4738, 2.43235, -1.34603, 9.02452, -7.5337, -8.71074, -7.30893, -7.57253, -5.33752, -4.87402, -7.01364, -6.86542, -7.93331, -7.94791, -5.69392, -6.16116, -7.32291, -7.76491, -6.41965, -7.55783, -7.87996, -7.55785, -6.69005, -5.87906, 3.92147, 2.86809, -1.5552, 9.66568, 5.07989, 1.47112, -7.48524, -5.0541, -1.82724, 8.70402, -2.00421, 9.88004, -2.62153, 8.79332, -7.52111, -6.44819, 4.06424, 2.09518, -6.65494, -5.94752, 6.93878, 1.61033, -3.95728, 7.60682, 5.67016, 2.21196, -7.81507, -5.79413, -2.41152, 8.24128, -3.83738, 9.21115, 4.5516, 4.55288, -5.75551, -5.93258, 4.56545, 2.59384, -7.45614, -9.47115, -2.39568, 9.67642, 5.57816, 1.45712, -7.48184, -6.41134, -1.99415, 12.867, -8.35854, -6.69675, -7.52559, -7.6793, 5.7454, 3.1602, 2.94692, 1.87483, -8.77324, -6.66682, -3.21125, 8.68662, -6.25806, -7.24972, 5.17639, 1.0747, -2.44897, 11.4775, -3.30172, 8.89955, -2.85191, 8.21201, -8.85893, -6.1322, 4.08957, 1.30155, -5.88132, -7.31173, -7.10309, -7.22943, -2.46068, 8.18334, -7.01226, -7.85464, 4.75411, 2.12347, -3.42862, 10.5642, 7.16681, 1.4423, 5.42568, 2.39863, -6.00833, -8.22609, -1.7619, 9.62466, -2.49527, 8.99016, -2.98837, 8.82863, -2.97262, 8.54856, -1.34142, 9.26871, -5.99652, -6.95795, -1.87061, 7.35277, -8.68277, -8.46425, -7.01808, -8.10441, -7.04269, -7.62501, -7.69783, -6.88348, -2.19829, 10.4896, 4.67396, 1.2032, -5.58263, -6.90298, -5.69224, -4.29055, 4.77285, 1.27305, -3.33469, 8.6929, -2.54195, 8.47086, 4.46492, 1.21742, 5.41158, -0.875373, -8.68069, -7.42278, -3.88687, 8.07646, 4.6682, 2.00293, -8.29799, -8.64092, -1.86382, 10.3829, -6.51234, -5.04193, 4.54458, 2.25219, -1.93264, 9.32554, -3.06285, 7.81641, -6.90714, -5.10786, 4.69653, 2.50286, 6.43757, 2.61401, -1.85483, 8.9587, 4.60224, 3.07647, 4.4492, 2.1906, 5.02181, 2.40321, -2.22923, 7.8888, 5.68943, 1.43793, -6.71097, -6.43817, -5.00633, -5.80006, -2.43763, 8.53663, 5.72577, 2.44787, -6.57079, -5.17789, -5.77867, -4.92176, -6.57222, -6.06437, 3.96639, 2.25216, -7.95177, -9.80146, 4.92574, 2.30763, -7.6221, -8.20013, -6.4132, -6.91575, 4.01432, 2.36897, 3.0833, 1.54505, -1.99416, 9.52807, -7.85128, -8.25973, -0.86423, 8.76525, -6.31412, -8.64087, -8.07355, -6.73717, -2.52821, 8.01176, -5.82357, -6.65687, -7.08865, -7.73063, -5.56251, -6.99818, -2.12513, 8.98159, -6.89834, -7.26863, -7.92654, -6.34346, 4.86201, 1.49442, 4.92905, 4.42847, -5.57789, -5.3186, 4.34232, 3.34888, 2.64614, 2.34723, -4.10363, 8.41491, -2.18648, 8.18706, -3.39871, 8.19848, -2.66098, 9.6026, -6.95927, -6.42774, -5.61392, -7.74628, 5.60376, 4.18369, 5.28536, 4.13642, 4.8428, 0.457426, -6.33816, -6.12095, -2.4394, 8.62897, 4.56938, 2.45967, 4.0582, 0.958413, 5.62164, 1.64834, 5.73119, 2.58231, 4.66806, 1.96405, -6.71905, -6.87706, -2.18503, 8.88414, -6.03901, -6.33338, -8.38435, -6.12005, 0.0641622, 9.0735, 5.19967, 3.05395, -5.48716, -7.13016, -6.85541, -5.46789, -1.88353, 8.15713, 4.27891, 3.1325, -2.75816, 9.98586, -2.03022, 9.34795, -7.66741, -7.50096, -3.39305, 9.16801, -8.49476, -5.71537, -1.68378, 9.8278, -7.41559, -6.07205, -3.15577, 7.93274, 5.22381, 1.61388, 3.65739, 1.74854, 4.94251, 1.21889, -7.12832, -5.27276, -9.58286, -6.20223, -2.21613, 8.29993, 5.34799, 2.92987, 4.09496, 2.37231, -7.25183, -5.79136, -6.46981, -7.12137, -6.28607, -9.8205, 4.52865, 1.06926, -3.10984, 8.72259, 3.61865, 2.68153, -5.96604, -7.68329, 3.11435, 1.28126, -1.1064, 7.61243, -2.17688, 8.2658, -3.27246, 7.2094, -5.55143, -6.32388, -1.69667, 10.3705, -2.16558, 7.25125, -6.36572, -6.70053, 4.12259, 3.38252, -4.80554, -7.79949, -5.23966, -6.13798, 4.21969, 1.69139, -1.98985, 10.547, -2.52269, 7.95658, -6.75642, -6.32862, -3.51521, 7.8001, 4.70435, -0.00229688, 6.25359, 2.4267, 5.82935, 0.745562, 5.24778, 2.15978, 5.48052, 1.32055, -3.05358, 9.12521, -3.18922, 9.24654, 4.47276, 2.11988, 5.36751, 2.02512, -2.18511, 8.6292, -2.48469, 9.51228, 5.57556, 3.24472, -2.58121, 10.0178, -6.12629, -6.49895, -4.54732, 8.0062, -4.20166, 10.5438, -7.61422, -7.69036, -4.42797, 8.98777, 4.45301, 1.53344, 4.59296, 2.45021, -6.81264, -6.36417, 4.62346, 3.16156, -5.93007, -8.36501, -2.78425, 6.71237, -6.17141, -6.64689, -5.20608, 8.95999, -7.30598, -5.73166, 4.39572, 2.93726, -1.89503, 9.77179, -5.683, -7.48989, 4.80924, 0.559455, -2.17793, 9.98983, 5.23728, 2.67434, -7.03976, -6.20877, 3.90435, 3.20926, -7.78536, -7.53388, -1.00684, 9.08838, -5.26741, -5.98327, 3.28002, 2.71942, -1.47166, 8.50427, -2.32733, 9.26251, 5.16271, 1.39947, -6.59093, -6.61979, -2.44492, 7.93654, -1.05805, 9.97356, -3.1109, 10.8666, 3.38834, 3.41693, 4.83098, 2.01961, -2.74013, 9.71049, -3.34892, 8.41489, 4.94768, 0.263001, 3.57477, 1.66795, 5.78915, 1.26999, -4.81812, -5.67174, -1.88508, 9.64263, 3.69048, 4.60555, 4.03037, 1.7862, -7.4418, -7.08933}, {0.127717, 0.211407, 0.195547, 0.21633, 0.39671, 0.229008, 0.20839, 0.169236, 0.314314, 0.322473, 0.169506, 0.45499, 0.147819, 0.296502, 0.15198, 0.356444, 0.0992833, 0.220833, 0.296206, 0.178067, 0.135359, 0.189725, 0.243099, 0.519986, 0.168105, 0.273465, 0.126033, 0.18045, 0.282832, 0.193901, 0.213704, 0.425046, 0.203191, 0.228674, 0.209267, 0.355039, 0.212918, 0.315495, 0.294112, 0.257576, 0.5786, 0.186019, 0.171919, 0.171919, 0.449151, 1.34947, 0.171919, 0.16341, 0.641387, 0.342115, 0.267343, 0.246125, 0.277612, 0.181462, 0.22944, 1.95598, 0.164897, 0.235803, 0.228273, 0.314629, 0.127403, 0.241241, 0.189362, 0.151691, 0.130085, 0.526707, 0.217069, 0.282306, 0.531523, 0.177035, 0.169776, 0.20395, 0.177165, 0.146628, 0.280013, 0.223033, 0.50947, 0.184133, 0.295329, 0.183219, 0.28166, 0.179348, 0.276462, 1.00283, 0.248147, 0.214453, 0.231732, 0.170672, 0.256893, 0.133271, 0.151137, 0.500823, 0.23678, 0.376983, 0.362061, 0.140013, 0.388863, 0.398552, 0.38015, 0.190081, 0.167115, 0.206884, 0.473849, 1.05117, 0.435665, 0.323618, 0.326201, 0.32226, 0.201787, 0.246496, 0.28325, 0.226596, 0.238153, 0.277268, 0.674629, 0.179433, 0.175651, 0.154778, 0.178195, 0.192796, 0.103571, 0.227621, 0.201124, 0.160525, 0.160964, 0.240099, 0.258027, 0.134127, 0.127717, 0.341378, 0.311595, 0.282306, 0.168988, 0.40775, 0.246125, 0.583131, 0.236804, 0.238633, 0.194824, 0.169315, 0.244227, 0.249511, 0.189725, 0.305662, 0.301415, 0.658641, 0.250944, 0.151792, 0.141383, 0.143843, 0.563347, 0.184216, 0.204155, 0.221764, 0.314908, 0.144518, 0.228808, 0.255785, 0.163457, 0.424705, 0.170202, 0.312598, 0.300629, 0.532614, 0.661392, 0.228273, 0.543432, 0.257175, 0.258994, 0.281413, 0.273897, 0.246837, 0.293489, 0.25533, 0.260492, 0.213704, 0.3091, 0.17103, 0.172285, 0.241399, 0.35999, 0.372243, 0.269191, 0.390239, 0.31761, 0.200593, 0.22197, 0.752914, 0.266571, 0.13102, 0.268659, 0.293723, 0.356294, 0.296258, 0.264531, 0.15468, 0.358535, 0.243711, 0.112147, 0.121659, 0.197101, 0.515292, 0.245628, 0.279863, 0.789807, 0.195156, 0.196073, 0.149564, 0.118675, 0.389373, 0.233821, 0.176128, 0.481088, 0.360027, 0.553152, 0.208207, 0.171608, 0.160489, 0.334298, 0.139426, 0.168603, 0.266199, 0.326458, 0.103571, 0.171208, 0.130961, 0.190887, 0.177229, 0.241651, 0.115152, 0.196753, 0.481088, 0.230965, 0.354631, 0.14591, 0.328543, 0.141544, 0.195888, 0.290379, 0.245954, 0.184547, 0.575214, 0.186929, 0.28527, 0.292213, 1.20033, 0.281528, 0.15625, 0.211524, 0.186398, 0.298061, 0.147393, 0.245349, 0.164527, 0.224771, 0.222382, 0.251643, 0.148835, 0.135359, 0.204967, 0.193024, 0.486309, 0.389686, 0.211921, 0.307405, 0.38666, 0.26802, 0.16605, 0.323134, 0.268397, 0.217894, 0.974118, 0.371618, 0.156201, 0.305787, 0.339305, 0.371032, 0.381765, 0.22747, 0.24906, 0.100884, 0.253192, 0.314253, 0.388289, 0.580947, 1.00267, 0.241998, 0.489101, 0.341501, 0.247423, 0.328311, 0.440281, 0.14927, 0.244469, 0.846828, 0.191725, 0.217429, 0.123403, 0.322875, 0.145373, 0.757259, 0.190086, 0.316286, 0.268397, 0.296721, 0.440472, 0.186848, 0.232134, 0.180239, 0.219724, 0.205886, 0.250975, 0.145636, 0.312476, 0.366418, 0.128135, 0.315235, 0.264531, 0.161815, 0.31631, 0.296489, 0.37171, 0.197217, 0.195625, 0.479579, 0.443037, 0.323347, 0.193616, 0.160251, 0.8952, 0.256291, 0.593345, 0.177165, 0.409514, 0.847863, 0.111448, 0.210031, 0.251347, 0.351953, 0.705204, 0.117901, 0.182343, 0.230179, 0.83632, 0.22104, 0.145163, 0.200326, 0.23431, 0.21868, 0.253575, 0.186562, 0.192757, 0.172716, 0.27396, 0.258581, 0.327892, 0.376138, 0.223477, 0.302375, 0.145845, 0.436902, 0.421794, 0.328543, 0.19246, 0.238889, 0.254866, 0.284674, 0.457849, 0.202937, 0.392568, 0.453083, 0.782713, 0.465401, 0.178623, 0.304863, 0.190081, 0.228641, 0.255135, 0.245037, 0.217526, 0.109584, 0.276462, 0.182301, 0.38582, 0.349942, 1.3889, 0.30235, 0.796353, 0.160168, 0.643204, 0.153752, 0.410268, 0.186439, 0.256834, 0.185783, 0.0957629, 0.226596, 0.197951, 0.17123, 0.192836, 0.18405, 0.575784, 0.228874, 0.201787, 0.241209, 0.217386, 0.195751, 0.291585, 0.144531, 0.14176, 0.157635, 0.410268, 0.476338, 0.308148, 0.148077, 0.152093, 0.196791, 0.568087, 0.414026, 0.250587, 0.473463, 0.293645, 0.396768, 0.2766, 0.38664, 0.135034, 1.50827, 0.472527, 0.268418, 0.40383, 0.375914, 0.246496, 0.176474, 0.340405, 0.220833, 0.138782, 0.159009, 0.444219, 0.259582, 0.33638, 0.195586, 0.210974, 0.200288, 0.148129, 0.0974216, 0.211588, 0.280081, 0.44113, 0.773921, 0.553848, 0.448079, 0.183136, 0.380854, 0.685021, 0.308767, 0.553276, 0.181578, 0.164759, 0.313889, 0.137886, 0.545387, 0.278449, 0.736895, 0.360054, 0.358929, 0.457315, 0.343278, 0.507662, 0.280829, 0.113886, 0.23146, 0.160584, 0.192796, 0.147561, 0.241272, 0.168988, 0.730511, 0.27836, 0.179847, 0.22555, 0.418069, 0.158348, 0.128965, 0.179454, 0.126366, 0.164434, 0.273633, 0.309556, 0.500823, 0.367852, 0.192875, 0.230262, 0.32724, 0.249969, 0.142618, 0.494229, 0.36108, 0.227931, 0.23113, 0.742825, 0.190126, 0.33741, 0.280598, 0.145268, 0.378423, 0.211921, 0.183594, 0.59201, 0.279563, 0.195683, 0.248101, 0.199754, 0.342494, 0.174343, 0.14149, 0.28085, 0.175781, 0.518738, 0.17223, 0.489904, 0.181167, 0.354286, 0.297824, 0.280829, 0.219412, 0.22814, 0.195625, 0.313949, 0.294708, 0.211551, 0.236255, 0.666933, 0.204808, 0.52591, 0.180725, 0.186889, 0.246589, 0.410575, 0.338348, 0.206219, 0.361766, 0.158143, 0.280816, 0.4149, 0.773082, 0.340046, 0.369672, 0.256923, 0.167195, 0.197217, 0.252339, 0.172716, 0.191526, 0.263085, 0.345698, 0.168286, 0.243099, 0.434631, 0.22944, 0.161862, 0.206589, 0.23457, 0.181924, 0.419063, 0.183427, 0.186152, 0.236352, 0.306336, 0.149002, 1.50086, 0.188231, 0.442757, 0.485602, 0.466662, 0.17329, 0.141329, 0.180619, 0.160061, 0.192569, 0.270999, 0.117901, 0.362693, 0.217561, 0.208975, 0.233658, 0.175173, 1.10307, 0.14625, 1.31124, 0.237608, 0.286784, 0.325112, 0.2485, 0.259641, 0.553152, 0.179039, 0.780781, 0.174758, 0.297824, 0.2558, 0.235949, 0.952186, 0.356744, 0.312646, 0.189362, 0.574524, 0.705204, 0.213168, 0.225956, 0.424165, 0.169506, 0.137109, 0.352451, 0.454554, 0.653302, 0.31261, 0.194412, 0.23719, 0.137886, 0.31498, 0.199085, 0.203875, 0.597248, 1.10036, 0.196869, 0.22104, 0.451345, 0.105613, 0.683928, 0.135204, 0.25533, 0.607871, 0.219724, 0.184464, 0.725001, 0.160061, 0.333407, 0.192569, 0.234147, 0.47178, 0.161815, 0.242455, 0.215305, 0.410575, 0.242376, 0.211335, 0.462804, 0.275065, 0.126878, 0.170404, 0.179433, 0.147244, 0.109584, 0.352905, 0.158215, 0.197604, 0.172407, 0.407506, 0.645446, 0.313061, 0.165602, 0.136663, 0.55444, 0.15527, 0.133128, 0.125912, 0.340405, 0.44521, 0.122783, 0.814526, 0.243773, 0.15743, 0.266743, 0.684458, 0.22221, 0.181294, 0.193901, 0.258802, 0.167195, 0.292056, 0.132309, 0.227671, 0.117334, 0.271758, 0.146185, 0.225042, 0.225964, 0.194863, 0.290274, 0.138438, 0.196714, 0.266012, 0.267771, 0.162544, 0.244258, 0.358038, 0.522617, 0.192875, 0.45066, 0.330396, 0.223477, 0.42967, 0.350884, 0.404655, 0.123155, 0.431583, 0.191675, 0.147354, 0.609034, 0.459487, 0.187337, 0.215128, 0.604169, 0.330165, 0.494229, 0.40775, 0.167377, 0.192648, 0.234635, 0.275578, 0.253094, 0.420063, 0.228299, 0.206478, 0.20395, 0.377656, 0.317393, 0.478623, 0.159009, 0.217034, 0.300933, 0.139754, 0.153901, 0.261077, 0.22834, 0.449609, 0.157672, 0.176474, 0.285704, 0.180186, 0.212738, 0.266428, 0.388313, 0.0954637, 0.298093, 0.251643, 0.330696, 0.159572, 0.210666, 0.149411, 0.139618, 0.338472, 0.450304, 0.208793, 0.583609, 0.185865, 0.400576, 0.21626, 0.174867, 0.239144, 0.249113, 0.200402, 0.275065, 0.238793, 0.205784, 0.4475, 0.231262, 0.259082, 0.20934, 0.16806, 0.193616, 0.213811, 0.395632, 0.482465, 0.274649, 0.307405, 0.165866, 0.334275, 0.683337, 0.368825, 0.14625, 0.780742, 0.163457, 0.226596, 0.138713, 1.79155, 0.400443, 0.233658, 0.426399, 0.623024, 0.670955, 0.123588, 0.110899, 0.173751, 0.651068, 0.199983, 0.190887, 0.541435, 0.21324, 0.266571, 0.134638, 0.179348, 0.145636, 0.170929, 0.623252, 0.587738, 0.109688, 0.515314, 0.217666, 0.213311, 0.249144, 0.187947, 0.270999, 0.268311, 0.469782, 0.763609, 0.32124, 0.146315, 0.265223, 0.298694, 0.197623, 0.21349, 0.845778, 0.175466, 0.123588, 0.17223, 0.258603, 1.17119, 0.538142, 0.407675, 0.120288, 0.587238, 0.244664, 0.333956, 0.132812, 0.21399, 0.302375, 0.275882, 0.134284, 0.377555, 0.228541, 0.187307, 0.143804, 0.180545, 0.222451, 0.239638, 0.188028, 0.46334, 0.175868, 0.242392, 0.314762, 0.44473, 0.21962, 0.175966, 1.12364, 0.138837, 0.400576, 0.18184, 0.137706, 0.409763, 0.216894, 0.466662, 0.376604, 0.487155, 0.283143, 0.118547, 0.221591, 0.122783, 0.179007, 0.16628, 0.180999, 0.239845, 0.169607, 0.578402, 0.396537, 0.222288, 0.563237, 0.371238, 0.138658, 0.324336, 0.191526, 0.168603, 0.357715, 0.640905, 0.460706, 0.220902, 0.240797, 0.164062, 0.157853, 0.34457, 0.196092, 0.289353, 0.104597, 0.259641, 0.126878, 0.175781, 0.441458, 0.820108, 0.261864, 0.23431, 0.254506, 0.271955, 0.227529, 0.22834, 0.196753, 0.224906, 0.193783, 0.419481, 0.236933, 0.229706, 0.29785, 0.222947, 0.177606, 0.216911, 0.305188, 0.933438, 0.116666, 0.278483, 0.0973824, 0.271224, 0.127717, 1.28139, 0.276283, 0.180704, 0.234554, 0.285984, 0.290172, 0.49594, 0.135879, 0.436784, 0.206219, 0.342215, 0.374165, 0.182217, 0.274864, 0.625, 0.356925, 0.194324, 0.342215, 0.113012, 0.155123, 0.254207, 0.438919, 0.262548, 0.302299, 0.179528, 0.312744, 0.168513, 0.142618, 0.150543, 0.231361, 0.166004, 0.186725, 0.38848, 0.179857, 0.182301, 0.629476, 0.44113, 0.289669, 0.328543, 0.279938, 0.14625, 0.187174, 0.157635, 0.396749, 0.798931, 0.201541, 0.778619, 0.265883, 0.258027, 0.218576, 0.266571, 0.160168, 0.230303, 0.273633, 0.233298, 0.30175, 0.217069, 0.345145, 0.397901, 0.224499, 0.248101, 0.241335, 0.222947, 0.237094, 0.176518, 0.380032, 0.634775, 0.426193, 0.16362, 0.231097, 0.219898, 0.343789, 0.275578, 0.282022, 0.628542, 0.232184, 0.848367, 0.200754, 0.179177}, {0, 0, 2, 3, 3, 0, 2, 2, 2, 2, 3, 0, 3, 2, 2, 2, 3, 3, 3, 3, 2, 0, 0, 0, 2, 3, 3, 3, 2, 2, 0, 0, 2, 3, 3, 0, 0, 2, 0, 0, 3, 2, 3, 0, 3, 0, 3, 3, 0, 2, 0, 3, 2, 0, 3, 0, 3, 3, 3, 2, 2, 3, 0, 0, 3, 3, 0, 2, 2, 3, 0, 3, 2, 2, 2, 0, 2, 3, 3, 3, 2, 3, 3, 3, 2, 0, 2, 0, 3, 3, 3, 3, 2, 2, 0, 2, 0, 3, 2, 2, 2, 0, 0, 3, 0, 2, 2, 3, 2, 3, 0, 2, 2, 2, 3, 2, 0, 0, 2, 3, 3, 2, 0, 2, 0, 0, 2, 0, 2, 2, 3, 2, 2, 0, 3, 0, 3, 2, 2, 2, 3, 3, 0, 0, 0, 3, 2, 3, 3, 3, 3, 0, 2, 0, 3, 2, 3, 2, 3, 0, 2, 3, 3, 2, 3, 3, 2, 2, 0, 0, 2, 3, 3, 2, 3, 0, 2, 0, 2, 0, 3, 2, 3, 2, 3, 0, 3, 0, 3, 0, 2, 3, 2, 2, 3, 0, 2, 2, 2, 0, 3, 2, 3, 3, 2, 3, 2, 3, 3, 2, 2, 0, 0, 2, 2, 3, 0, 3, 0, 2, 0, 0, 2, 3, 0, 3, 3, 2, 0, 3, 3, 0, 3, 0, 2, 2, 0, 2, 0, 2, 0, 0, 0, 2, 0, 3, 2, 3, 2, 3, 2, 2, 0, 2, 3, 2, 3, 2, 2, 2, 2, 3, 0, 2, 0, 0, 2, 3, 3, 0, 2, 3, 2, 2, 3, 0, 3, 0, 0, 2, 0, 2, 0, 2, 2, 3, 3, 2, 3, 0, 0, 3, 2, 2, 0, 3, 2, 0, 0, 3, 0, 0, 2, 0, 3, 2, 0, 2, 0, 0, 0, 0, 0, 2, 0, 0, 2, 3, 0, 0, 2, 0, 0, 2, 0, 2, 3, 2, 3, 3, 2, 2, 0, 0, 0, 3, 0, 2, 0, 2, 0, 2, 2, 2, 3, 3, 0, 0, 3, 3, 3, 3, 3, 2, 3, 3, 2, 3, 3, 0, 2, 2, 2, 2, 0, 2, 0, 0, 0, 2, 2, 3, 3, 2, 3, 2, 3, 0, 2, 3, 0, 2, 0, 2, 2, 0, 3, 0, 2, 0, 2, 3, 0, 3, 0, 0, 0, 3, 2, 3, 3, 0, 3, 2, 3, 0, 2, 3, 3, 0, 2, 3, 0, 0, 0, 2, 0, 3, 0, 2, 3, 3, 3, 3, 3, 0, 2, 0, 2, 2, 3, 3, 0, 3, 0, 2, 0, 2, 0, 3, 0, 0, 0, 2, 3, 3, 2, 3, 0, 0, 0, 0, 3, 3, 0, 3, 2, 0, 2, 3, 2, 2, 3, 3, 2, 2, 2, 0, 2, 3, 0, 3, 3, 0, 0, 2, 0, 3, 2, 3, 0, 2, 0, 2, 2, 3, 2, 0, 3, 3, 3, 2, 3, 0, 3, 0, 2, 2, 0, 0, 0, 3, 0, 3, 3, 2, 3, 2, 3, 2, 3, 0, 2, 3, 0, 2, 0, 3, 3, 3, 3, 3, 3, 2, 0, 3, 2, 2, 2, 3, 3, 2, 3, 0, 2, 3, 3, 2, 2, 0, 0, 0, 0, 3, 0, 3, 3, 3, 0, 0, 0, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 3, 3, 0, 0, 2, 2, 3, 3, 2, 2, 0, 0, 3, 0, 0, 0, 2, 3, 0, 0, 0, 3, 0, 3, 0, 2, 2, 0, 0, 0, 0, 3, 2, 2, 3, 2, 3, 2, 2, 2, 2, 3, 0, 0, 2, 3, 0, 3, 3, 0, 3, 0, 0, 2, 0, 3, 3, 0, 2, 2, 3, 3, 0, 0, 2, 0, 2, 3, 2, 0, 0, 3, 3, 0, 3, 2, 0, 2, 0, 2, 3, 2, 0, 3, 3, 2, 0, 0, 2, 2, 0, 0, 2, 0, 3, 3, 2, 3, 2, 0, 3, 0, 2, 2, 3, 3, 0, 3, 2, 2, 0, 3, 0, 0, 0, 2, 0, 3, 2, 0, 2, 3, 2, 3, 2, 2, 3, 3, 0, 2, 3, 2, 3, 2, 2, 0, 3, 0, 3, 0, 2, 2, 2, 0, 2, 0, 2, 2, 0, 0, 3, 3, 0, 0, 3, 2, 0, 2, 3, 2, 2, 0, 3, 3, 0, 2, 0, 3, 3, 0, 2, 3, 2, 3, 2, 0, 2, 2, 0, 0, 0, 2, 2, 3, 3, 2, 2, 0, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 2, 0, 3, 3, 3, 0, 2, 0, 2, 3, 2, 0, 3, 3, 2, 0, 2, 0, 3, 2, 0, 3, 0, 0, 2, 2, 0, 3, 0, 2, 3, 3, 3, 0, 2, 0, 0, 3, 0, 2, 3, 2, 2, 0, 3, 3, 3, 3, 3, 0, 3, 0, 0, 0, 0, 3, 2, 0, 0, 2, 3, 3, 2, 2, 0, 3, 2, 0, 3, 0, 2, 3, 3, 0, 2, 2, 3, 2, 2, 2, 3, 2, 0, 0, 3, 2, 0, 0, 0, 2, 0, 2, 0, 0, 2, 2, 3, 0, 3, 0, 0, 3, 0, 0, 0, 3, 0, 0, 2, 2, 0, 2, 2, 3, 3, 3, 3, 0, 0, 2, 2, 2, 0, 3, 2, 2, 2, 2, 2, 0, 3, 0, 0, 3, 2, 0, 0, 3, 2, 3, 3, 0, 3, 0, 3, 0, 3, 2, 2, 2, 0, 0, 3, 2, 2, 0, 0, 0, 2, 3, 2, 0, 2, 3, 3, 3, 0, 3, 3, 0, 2, 0, 0, 2, 3, 3, 0, 3, 2, 2, 2, 2, 2, 3, 3, 2, 2, 3, 3, 2, 3, 0, 3, 3, 0, 3, 2, 2, 0, 2, 0, 3, 0, 3, 0, 2, 3, 0, 2, 3, 2, 0, 2, 0, 3, 0, 2, 3, 3, 2, 0, 3, 3, 3, 2, 2, 3, 3, 2, 2, 2, 0, 3, 2, 2, 0}, {271, 271, 329, 343, 387, 426, 426, 601}, {426, 601, 426, 387, 343, 271, 329, 271}, {3.70991, 4.43491, 3.76334, 9.43944, 9.43944, 3.70991, 3.76334, 4.43491}}}; typedef ConnectComponentsEdgesTest<int, float> ConnectComponentsEdgesTestF_Int; TEST_P(ConnectComponentsEdgesTestF_Int, Result) { EXPECT_TRUE(true); } INSTANTIATE_TEST_CASE_P(ConnectComponentsEdgesTest, ConnectComponentsEdgesTestF_Int, ::testing::ValuesIn(mr_fix_conn_inputsf2)); }; // namespace sparse }; // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/random/make_regression.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/core/resource/cublas_handle.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <thrust/count.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include "../test_utils.cuh" #include <raft/core/resources.hpp> #include <raft/linalg/detail/cublas_wrappers.hpp> #include <raft/linalg/subtract.cuh> #include <raft/linalg/transpose.cuh> #include <raft/random/make_regression.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> namespace raft::random { template <typename T> struct MakeRegressionInputs { T tolerance; int n_samples, n_features, n_informative, n_targets, effective_rank; T bias; bool shuffle; raft::random::GeneratorType gtype; uint64_t seed; }; template <typename T> class MakeRegressionTest : public ::testing::TestWithParam<MakeRegressionInputs<T>> { public: MakeRegressionTest() : params(::testing::TestWithParam<MakeRegressionInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), values_ret(params.n_samples * params.n_targets, stream), values_prod(params.n_samples * params.n_targets, stream) { } protected: void SetUp() override { // Noise must be zero to compare the actual and expected values T noise = (T)0.0, tail_strength = (T)0.5; rmm::device_uvector<T> data(params.n_samples * params.n_features, stream); rmm::device_uvector<T> values_cm(params.n_samples * params.n_targets, stream); rmm::device_uvector<T> coef(params.n_features * params.n_targets, stream); // Create the regression problem make_regression(handle, data.data(), values_ret.data(), params.n_samples, params.n_features, params.n_informative, stream, coef.data(), params.n_targets, params.bias, params.effective_rank, tail_strength, noise, params.shuffle, params.seed, params.gtype); // FIXME (mfh 2022/09/07) This test passes even if it doesn't call // make_regression. Please see // https://github.com/rapidsai/raft/issues/814. // Calculate the values from the data and coefficients (column-major) T alpha = (T)1.0, beta = (T)0.0; RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm(resource::get_cublas_handle(handle), CUBLAS_OP_T, CUBLAS_OP_T, params.n_samples, params.n_targets, params.n_features, &alpha, data.data(), params.n_features, coef.data(), params.n_targets, &beta, values_cm.data(), params.n_samples, stream)); // Transpose the values to row-major raft::linalg::transpose( handle, values_cm.data(), values_prod.data(), params.n_samples, params.n_targets, stream); // Add the bias raft::linalg::addScalar(values_prod.data(), values_prod.data(), params.bias, params.n_samples * params.n_targets, stream); // Count the number of zeroes in the coefficients thrust::device_ptr<T> __coef = thrust::device_pointer_cast(coef.data()); zero_count = thrust::count(__coef, __coef + params.n_features * params.n_targets, (T)0.0); } protected: raft::resources handle; cudaStream_t stream = 0; MakeRegressionInputs<T> params; rmm::device_uvector<T> values_ret, values_prod; int zero_count; }; typedef MakeRegressionTest<float> MakeRegressionTestF; const std::vector<MakeRegressionInputs<float>> inputsf_t = { {0.01f, 256, 32, 16, 1, -1, 0.f, true, raft::random::GenPC, 1234ULL}, {0.01f, 1000, 100, 47, 4, 65, 4.2f, true, raft::random::GenPC, 1234ULL}, {0.01f, 20000, 500, 450, 13, -1, -3.f, false, raft::random::GenPC, 1234ULL}, {0.01f, 256, 32, 16, 1, -1, 0.f, true, raft::random::GenPhilox, 1234ULL}, {0.01f, 1000, 100, 47, 4, 65, 4.2f, true, raft::random::GenPhilox, 1234ULL}, {0.01f, 20000, 500, 450, 13, -1, -3.f, false, raft::random::GenPhilox, 1234ULL}}; TEST_P(MakeRegressionTestF, Result) { ASSERT_TRUE(match(params.n_targets * (params.n_features - params.n_informative), zero_count, raft::Compare<int>())); ASSERT_TRUE(devArrMatch(values_ret.data(), values_prod.data(), params.n_samples, params.n_targets, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(MakeRegressionTests, MakeRegressionTestF, ::testing::ValuesIn(inputsf_t)); typedef MakeRegressionTest<double> MakeRegressionTestD; const std::vector<MakeRegressionInputs<double>> inputsd_t = { {0.01, 256, 32, 16, 1, -1, 0.0, true, raft::random::GenPC, 1234ULL}, {0.01, 1000, 100, 47, 4, 65, 4.2, true, raft::random::GenPC, 1234ULL}, {0.01, 20000, 500, 450, 13, -1, -3.0, false, raft::random::GenPC, 1234ULL}, {0.01, 256, 32, 16, 1, -1, 0.0, true, raft::random::GenPhilox, 1234ULL}, {0.01, 1000, 100, 47, 4, 65, 4.2, true, raft::random::GenPhilox, 1234ULL}, {0.01, 20000, 500, 450, 13, -1, -3.0, false, raft::random::GenPhilox, 1234ULL}}; TEST_P(MakeRegressionTestD, Result) { ASSERT_TRUE(match(params.n_targets * (params.n_features - params.n_informative), zero_count, raft::Compare<int>())); ASSERT_TRUE(devArrMatch(values_ret.data(), values_prod.data(), params.n_samples, params.n_targets, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(MakeRegressionTests, MakeRegressionTestD, ::testing::ValuesIn(inputsd_t)); template <typename T> class MakeRegressionMdspanTest : public ::testing::TestWithParam<MakeRegressionInputs<T>> { public: MakeRegressionMdspanTest() = default; protected: void SetUp() override { auto stream = resource::get_cuda_stream(handle); // Noise must be zero to compare the actual and expected values T noise = (T)0.0, tail_strength = (T)0.5; rmm::device_uvector<T> data(params.n_samples * params.n_features, stream); rmm::device_uvector<T> values_cm(params.n_samples * params.n_targets, stream); rmm::device_uvector<T> coef(params.n_features * params.n_targets, stream); using index_type = typename rmm::device_uvector<T>::index_type; using matrix_view = raft::device_matrix_view<T, raft::matrix_extent<index_type>, raft::row_major>; matrix_view out_mat(data.data(), params.n_samples, params.n_features); matrix_view values_mat(values_ret.data(), params.n_samples, params.n_targets); matrix_view coef_mat(coef.data(), params.n_features, params.n_targets); // Create the regression problem make_regression(handle, out_mat, values_mat, params.n_informative, coef_mat, params.bias, params.effective_rank, tail_strength, noise, params.shuffle, params.seed, params.gtype); // FIXME (mfh 2022/09/07) This test passes even if it doesn't call // make_regression. Please see // https://github.com/rapidsai/raft/issues/814. // Calculate the values from the data and coefficients (column-major) T alpha{}; T beta{}; RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm(resource::get_cublas_handle(handle), CUBLAS_OP_T, CUBLAS_OP_T, params.n_samples, params.n_targets, params.n_features, &alpha, data.data(), params.n_features, coef.data(), params.n_targets, &beta, values_cm.data(), params.n_samples, stream)); // Transpose the values to row-major raft::linalg::transpose( handle, values_cm.data(), values_prod.data(), params.n_samples, params.n_targets, stream); // Add the bias raft::linalg::addScalar(values_prod.data(), values_prod.data(), params.bias, params.n_samples * params.n_targets, stream); // Count the number of zeroes in the coefficients thrust::device_ptr<T> __coef = thrust::device_pointer_cast(coef.data()); constexpr T ZERO{}; zero_count = thrust::count(__coef, __coef + params.n_features * params.n_targets, ZERO); } private: MakeRegressionInputs<T> params{::testing::TestWithParam<MakeRegressionInputs<T>>::GetParam()}; raft::resources handle; rmm::device_uvector<T> values_ret{params.n_samples * params.n_targets, resource::get_cuda_stream(handle)}; rmm::device_uvector<T> values_prod{params.n_samples * params.n_targets, resource::get_cuda_stream(handle)}; int zero_count = -1; }; using MakeRegressionMdspanTestF = MakeRegressionTest<float>; TEST_P(MakeRegressionMdspanTestF, Result) { ASSERT_TRUE(match(params.n_targets * (params.n_features - params.n_informative), zero_count, raft::Compare<int>())); ASSERT_TRUE(devArrMatch(values_ret.data(), values_prod.data(), params.n_samples, params.n_targets, raft::CompareApprox<float>(params.tolerance), resource::get_cuda_stream(handle))); } INSTANTIATE_TEST_CASE_P(MakeRegressionMdspanTests, MakeRegressionMdspanTestF, ::testing::ValuesIn(inputsf_t)); using MakeRegressionMdspanTestD = MakeRegressionTest<double>; TEST_P(MakeRegressionMdspanTestD, Result) { ASSERT_TRUE(match(params.n_targets * (params.n_features - params.n_informative), zero_count, raft::Compare<int>())); ASSERT_TRUE(devArrMatch(values_ret.data(), values_prod.data(), params.n_samples, params.n_targets, raft::CompareApprox<double>(params.tolerance), resource::get_cuda_stream(handle))); } INSTANTIATE_TEST_CASE_P(MakeRegressionMdspanTests, MakeRegressionMdspanTestD, ::testing::ValuesIn(inputsd_t)); } // end namespace raft::random
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/random/permute.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <algorithm> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/random/permute.cuh> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <vector> namespace raft { namespace random { template <typename T> struct PermInputs { int N, D; bool needPerms, needShuffle, rowMajor; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const PermInputs<T>& dims) { return os; } template <typename T> class PermTest : public ::testing::TestWithParam<PermInputs<T>> { public: using test_data_type = T; protected: PermTest() : in(0, resource::get_cuda_stream(handle)), out(0, resource::get_cuda_stream(handle)), outPerms(0, resource::get_cuda_stream(handle)) { } void SetUp() override { auto stream = resource::get_cuda_stream(handle); params = ::testing::TestWithParam<PermInputs<T>>::GetParam(); // forcefully set needPerms, since we need it for unit-testing! if (params.needShuffle) { params.needPerms = true; } raft::random::RngState r(params.seed); int N = params.N; int D = params.D; int len = N * D; if (params.needPerms) { outPerms.resize(N, stream); outPerms_ptr = outPerms.data(); } if (params.needShuffle) { in.resize(len, stream); out.resize(len, stream); in_ptr = in.data(); out_ptr = out.data(); uniform(handle, r, in_ptr, len, T(-1.0), T(1.0)); } permute(outPerms_ptr, out_ptr, in_ptr, D, N, params.rowMajor, stream); resource::sync_stream(handle); } protected: raft::resources handle; PermInputs<T> params; rmm::device_uvector<T> in, out; T* in_ptr = nullptr; T* out_ptr = nullptr; rmm::device_uvector<int> outPerms; int* outPerms_ptr = nullptr; }; template <typename T> class PermMdspanTest : public ::testing::TestWithParam<PermInputs<T>> { public: using test_data_type = T; protected: PermMdspanTest() : in(0, resource::get_cuda_stream(handle)), out(0, resource::get_cuda_stream(handle)), outPerms(0, resource::get_cuda_stream(handle)) { } private: using index_type = int; template <class ElementType, class Layout> using matrix_view_t = raft::device_matrix_view<ElementType, index_type, Layout>; template <class ElementType> using vector_view_t = raft::device_vector_view<ElementType, index_type>; protected: void SetUp() override { auto stream = resource::get_cuda_stream(handle); params = ::testing::TestWithParam<PermInputs<T>>::GetParam(); // forcefully set needPerms, since we need it for unit-testing! if (params.needShuffle) { params.needPerms = true; } raft::random::RngState r(params.seed); int N = params.N; int D = params.D; int len = N * D; if (params.needPerms) { outPerms.resize(N, stream); outPerms_ptr = outPerms.data(); } if (params.needShuffle) { in.resize(len, stream); out.resize(len, stream); in_ptr = in.data(); out_ptr = out.data(); uniform(handle, r, in_ptr, len, T(-1.0), T(1.0)); } auto set_up_views_and_test = [&](auto layout) { using layout_type = std::decay_t<decltype(layout)>; matrix_view_t<const T, layout_type> in_view(in_ptr, N, D); std::optional<matrix_view_t<T, layout_type>> out_view; if (out_ptr != nullptr) { out_view.emplace(out_ptr, N, D); } std::optional<vector_view_t<index_type>> outPerms_view; if (outPerms_ptr != nullptr) { outPerms_view.emplace(outPerms_ptr, N); } permute(handle, in_view, outPerms_view, out_view); // None of these three permute calls should have an effect. // The point is to test whether the function can deduce the // element type of outPerms if given nullopt. std::optional<matrix_view_t<T, layout_type>> out_view_empty; std::optional<vector_view_t<index_type>> outPerms_view_empty; permute(handle, in_view, std::nullopt, out_view_empty); permute(handle, in_view, outPerms_view_empty, std::nullopt); permute(handle, in_view, std::nullopt, std::nullopt); }; if (params.rowMajor) { set_up_views_and_test(raft::row_major{}); } else { set_up_views_and_test(raft::col_major{}); } resource::sync_stream(handle); } protected: raft::resources handle; PermInputs<T> params; rmm::device_uvector<T> in, out; T* in_ptr = nullptr; T* out_ptr = nullptr; rmm::device_uvector<int> outPerms; int* outPerms_ptr = nullptr; }; template <typename T, typename L> ::testing::AssertionResult devArrMatchRange( const T* actual, size_t size, T start, L eq_compare, bool doSort = true, cudaStream_t stream = 0) { std::vector<T> act_h(size); raft::update_host<T>(&(act_h[0]), actual, size, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); if (doSort) std::sort(act_h.begin(), act_h.end()); for (size_t i(0); i < size; ++i) { auto act = act_h[i]; auto expected = start + i; if (!eq_compare(expected, act)) { return ::testing::AssertionFailure() << "actual=" << act << " != expected=" << expected << " @" << i; } } return ::testing::AssertionSuccess(); } template <typename T, typename L> ::testing::AssertionResult devArrMatchShuffle(const int* perms, const T* out, const T* in, int D, int N, bool rowMajor, L eq_compare, cudaStream_t stream = 0) { std::vector<int> h_perms(N); raft::update_host<int>(&(h_perms[0]), perms, N, stream); std::vector<T> h_out(N * D), h_in(N * D); raft::update_host<T>(&(h_out[0]), out, N * D, stream); raft::update_host<T>(&(h_in[0]), in, N * D, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); for (int i = 0; i < N; ++i) { for (int j = 0; j < D; ++j) { int outPos = rowMajor ? i * D + j : j * N + i; int inPos = rowMajor ? h_perms[i] * D + j : j * N + h_perms[i]; auto act = h_out[outPos]; auto expected = h_in[inPos]; if (!eq_compare(expected, act)) { return ::testing::AssertionFailure() << "actual=" << act << " != expected=" << expected << " @" << i << ", " << j; } } } return ::testing::AssertionSuccess(); } const std::vector<PermInputs<float>> inputsf = { // only generate permutations {32, 8, true, false, true, 1234ULL}, {32, 8, true, false, true, 1234567890ULL}, {1024, 32, true, false, true, 1234ULL}, {1024, 32, true, false, true, 1234567890ULL}, {2 * 1024, 32, true, false, true, 1234ULL}, {2 * 1024, 32, true, false, true, 1234567890ULL}, {2 * 1024 + 500, 32, true, false, true, 1234ULL}, {2 * 1024 + 500, 32, true, false, true, 1234567890ULL}, {100000, 32, true, false, true, 1234ULL}, {100000, 32, true, false, true, 1234567890ULL}, {100001, 33, true, false, true, 1234567890ULL}, // permute and shuffle the data row major {32, 8, true, true, true, 1234ULL}, {32, 8, true, true, true, 1234567890ULL}, {1024, 32, true, true, true, 1234ULL}, {1024, 32, true, true, true, 1234567890ULL}, {2 * 1024, 32, true, true, true, 1234ULL}, {2 * 1024, 32, true, true, true, 1234567890ULL}, {2 * 1024 + 500, 32, true, true, true, 1234ULL}, {2 * 1024 + 500, 32, true, true, true, 1234567890ULL}, {100000, 32, true, true, true, 1234ULL}, {100000, 32, true, true, true, 1234567890ULL}, {100001, 31, true, true, true, 1234567890ULL}, // permute and shuffle the data column major {32, 8, true, true, false, 1234ULL}, {32, 8, true, true, false, 1234567890ULL}, {1024, 32, true, true, false, 1234ULL}, {1024, 32, true, true, false, 1234567890ULL}, {2 * 1024, 32, true, true, false, 1234ULL}, {2 * 1024, 32, true, true, false, 1234567890ULL}, {2 * 1024 + 500, 32, true, true, false, 1234ULL}, {2 * 1024 + 500, 32, true, true, false, 1234567890ULL}, {100000, 32, true, true, false, 1234ULL}, {100000, 32, true, true, false, 1234567890ULL}, {100001, 33, true, true, false, 1234567890ULL}}; #define _PERMTEST_BODY(DATA_TYPE) \ do { \ if (params.needPerms) { \ ASSERT_TRUE(devArrMatchRange(outPerms_ptr, params.N, 0, raft::Compare<int>())); \ } \ if (params.needShuffle) { \ ASSERT_TRUE(devArrMatchShuffle(outPerms_ptr, \ out_ptr, \ in_ptr, \ params.D, \ params.N, \ params.rowMajor, \ raft::Compare<DATA_TYPE>())); \ } \ } while (false) using PermTestF = PermTest<float>; TEST_P(PermTestF, Result) { using test_data_type = PermTestF::test_data_type; _PERMTEST_BODY(test_data_type); } INSTANTIATE_TEST_CASE_P(PermTests, PermTestF, ::testing::ValuesIn(inputsf)); using PermMdspanTestF = PermMdspanTest<float>; TEST_P(PermMdspanTestF, Result) { using test_data_type = PermTestF::test_data_type; _PERMTEST_BODY(test_data_type); } INSTANTIATE_TEST_CASE_P(PermMdspanTests, PermMdspanTestF, ::testing::ValuesIn(inputsf)); const std::vector<PermInputs<double>> inputsd = { // only generate permutations {32, 8, true, false, true, 1234ULL}, {32, 8, true, false, true, 1234567890ULL}, {1024, 32, true, false, true, 1234ULL}, {1024, 32, true, false, true, 1234567890ULL}, {2 * 1024, 32, true, false, true, 1234ULL}, {2 * 1024, 32, true, false, true, 1234567890ULL}, {2 * 1024 + 500, 32, true, false, true, 1234ULL}, {2 * 1024 + 500, 32, true, false, true, 1234567890ULL}, {100000, 32, true, false, true, 1234ULL}, {100000, 32, true, false, true, 1234567890ULL}, {100001, 33, true, false, true, 1234567890ULL}, // permute and shuffle the data row major {32, 8, true, true, true, 1234ULL}, {32, 8, true, true, true, 1234567890ULL}, {1024, 32, true, true, true, 1234ULL}, {1024, 32, true, true, true, 1234567890ULL}, {2 * 1024, 32, true, true, true, 1234ULL}, {2 * 1024, 32, true, true, true, 1234567890ULL}, {2 * 1024 + 500, 32, true, true, true, 1234ULL}, {2 * 1024 + 500, 32, true, true, true, 1234567890ULL}, {100000, 32, true, true, true, 1234ULL}, {100000, 32, true, true, true, 1234567890ULL}, {100001, 31, true, true, true, 1234567890ULL}, // permute and shuffle the data column major {32, 8, true, true, false, 1234ULL}, {32, 8, true, true, false, 1234567890ULL}, {1024, 32, true, true, false, 1234ULL}, {1024, 32, true, true, false, 1234567890ULL}, {2 * 1024, 32, true, true, false, 1234ULL}, {2 * 1024, 32, true, true, false, 1234567890ULL}, {2 * 1024 + 500, 32, true, true, false, 1234ULL}, {2 * 1024 + 500, 32, true, true, false, 1234567890ULL}, {100000, 32, true, true, false, 1234ULL}, {100000, 32, true, true, false, 1234567890ULL}, {100001, 33, true, true, false, 1234567890ULL}}; using PermTestD = PermTest<double>; TEST_P(PermTestD, Result) { using test_data_type = PermTestF::test_data_type; _PERMTEST_BODY(test_data_type); } INSTANTIATE_TEST_CASE_P(PermTests, PermTestD, ::testing::ValuesIn(inputsd)); using PermMdspanTestD = PermMdspanTest<double>; TEST_P(PermMdspanTestD, Result) { using test_data_type = PermTestF::test_data_type; _PERMTEST_BODY(test_data_type); } INSTANTIATE_TEST_CASE_P(PermMdspanTests, PermMdspanTestD, ::testing::ValuesIn(inputsd)); } // end namespace random } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/random/multi_variable_gaussian.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <cmath> #include <gtest/gtest.h> #include <iostream> #include <raft/core/resource/cublas_handle.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/cusolver_dn_handle.hpp> #include <raft/core/resources.hpp> #include <raft/random/multi_variable_gaussian.cuh> #include <raft/util/cudart_utils.hpp> #include <random> #include <rmm/device_uvector.hpp> // mvg.h takes in column-major matrices (as in Fortran) #define IDX2C(i, j, ld) (j * ld + i) namespace raft::random { // helper kernels /// @todo Duplicate called vctwiseAccumulate in utils.h (Kalman Filters, // i think that is much better to use., more general) template <typename T> RAFT_KERNEL En_KF_accumulate(const int nPoints, const int dim, const T* X, T* x) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int col = idx % dim; int row = idx / dim; if (col < dim && row < nPoints) raft::myAtomicAdd(x + col, X[idx]); } template <typename T> RAFT_KERNEL En_KF_normalize(const int divider, const int dim, T* x) { int xi = threadIdx.x + blockDim.x * blockIdx.x; if (xi < dim) x[xi] = x[xi] / divider; } template <typename T> RAFT_KERNEL En_KF_dif(const int nPoints, const int dim, const T* X, const T* x, T* X_diff) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int col = idx % dim; int row = idx / dim; if (col < dim && row < nPoints) X_diff[idx] = X[idx] - x[col]; } // for specialising tests enum Correlation : unsigned char { CORRELATED, // = 0 UNCORRELATED }; template <typename T> struct MVGInputs { T tolerance; typename detail::multi_variable_gaussian<T>::Decomposer method; Correlation corr; int dim, nPoints; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MVGInputs<T>& dims) { return os; } template <typename T> class MVGTest : public ::testing::TestWithParam<MVGInputs<T>> { public: MVGTest() : params(::testing::TestWithParam<MVGInputs<T>>::GetParam()), workspace_d(0, resource::get_cuda_stream(handle)), P_d(0, resource::get_cuda_stream(handle)), x_d(0, resource::get_cuda_stream(handle)), X_d(0, resource::get_cuda_stream(handle)), Rand_cov(0, resource::get_cuda_stream(handle)), Rand_mean(0, resource::get_cuda_stream(handle)) { } protected: void SetUp() override { // getting params params = ::testing::TestWithParam<MVGInputs<T>>::GetParam(); dim = params.dim; nPoints = params.nPoints; method = params.method; corr = params.corr; tolerance = params.tolerance; auto cublasH = resource::get_cublas_handle(handle); auto cusolverH = resource::get_cusolver_dn_handle(handle); auto stream = resource::get_cuda_stream(handle); // preparing to store stuff P.resize(dim * dim); x.resize(dim); X.resize(dim * nPoints); P_d.resize(dim * dim, stream); X_d.resize(nPoints * dim, stream); x_d.resize(dim, stream); Rand_cov.resize(dim * dim, stream); Rand_mean.resize(dim, stream); // generating random mean and cov. srand(params.seed); for (int j = 0; j < dim; j++) x.data()[j] = rand() % 100 + 5.0f; // for random Cov. martix std::default_random_engine generator(params.seed); std::uniform_real_distribution<T> distribution(0.0, 1.0); // P (developing a +ve definite symm matrix) for (int j = 0; j < dim; j++) { for (int i = 0; i < j + 1; i++) { T k = distribution(generator); if (corr == UNCORRELATED) k = 0.0; P.data()[IDX2C(i, j, dim)] = k; P.data()[IDX2C(j, i, dim)] = k; if (i == j) P.data()[IDX2C(i, j, dim)] += dim; } } // porting inputs to gpu raft::update_device(P_d.data(), P.data(), dim * dim, stream); raft::update_device(x_d.data(), x.data(), dim, stream); // initializing the mvg mvg = new detail::multi_variable_gaussian<T>(handle, dim, method); std::size_t o = mvg->get_workspace_size(); // give the workspace area to mvg workspace_d.resize(o, stream); mvg->set_workspace(workspace_d.data()); // get gaussians in X_d | P_d is destroyed. mvg->give_gaussian(nPoints, P_d.data(), X_d.data(), x_d.data()); // saving the mean of the randoms in Rand_mean //@todo can be swapped with a API that calculates mean RAFT_CUDA_TRY(cudaMemset(Rand_mean.data(), 0, dim * sizeof(T))); dim3 block = (64); dim3 grid = (raft::ceildiv(nPoints * dim, (int)block.x)); En_KF_accumulate<<<grid, block, 0, stream>>>(nPoints, dim, X_d.data(), Rand_mean.data()); RAFT_CUDA_TRY(cudaPeekAtLastError()); grid = (raft::ceildiv(dim, (int)block.x)); En_KF_normalize<<<grid, block, 0, stream>>>(nPoints, dim, Rand_mean.data()); RAFT_CUDA_TRY(cudaPeekAtLastError()); // storing the error wrt random point mean in X_d grid = (raft::ceildiv(dim * nPoints, (int)block.x)); En_KF_dif<<<grid, block, 0, stream>>>(nPoints, dim, X_d.data(), Rand_mean.data(), X_d.data()); RAFT_CUDA_TRY(cudaPeekAtLastError()); // finding the cov matrix, placing in Rand_cov T alfa = 1.0 / (nPoints - 1), beta = 0.0; RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm(cublasH, CUBLAS_OP_N, CUBLAS_OP_T, dim, dim, nPoints, &alfa, X_d.data(), dim, X_d.data(), dim, &beta, Rand_cov.data(), dim, stream)); // restoring cov provided into P_d raft::update_device(P_d.data(), P.data(), dim * dim, stream); } void TearDown() override { // deleting mvg delete mvg; } protected: raft::resources handle; MVGInputs<T> params; rmm::device_uvector<T> workspace_d, P_d, x_d, X_d, Rand_cov, Rand_mean; std::vector<T> P, x, X; int dim, nPoints; typename detail::multi_variable_gaussian<T>::Decomposer method; Correlation corr; detail::multi_variable_gaussian<T>* mvg = NULL; T tolerance; }; // end of MVGTest class template <typename T> class MVGMdspanTest : public ::testing::TestWithParam<MVGInputs<T>> { private: static auto old_enum_to_new_enum(typename detail::multi_variable_gaussian<T>::Decomposer method) { if (method == detail::multi_variable_gaussian<T>::chol_decomp) { return multi_variable_gaussian_decomposition_method::CHOLESKY; } else if (method == detail::multi_variable_gaussian<T>::jacobi) { return multi_variable_gaussian_decomposition_method::JACOBI; } else { return multi_variable_gaussian_decomposition_method::QR; } } public: MVGMdspanTest() : workspace_d(0, resource::get_cuda_stream(handle)), P_d(0, resource::get_cuda_stream(handle)), x_d(0, resource::get_cuda_stream(handle)), X_d(0, resource::get_cuda_stream(handle)), Rand_cov(0, resource::get_cuda_stream(handle)), Rand_mean(0, resource::get_cuda_stream(handle)) { } void SetUp() override { params = ::testing::TestWithParam<MVGInputs<T>>::GetParam(); dim = params.dim; nPoints = params.nPoints; auto method = old_enum_to_new_enum(params.method); corr = params.corr; tolerance = params.tolerance; auto cublasH = resource::get_cublas_handle(handle); auto cusolverH = resource::get_cusolver_dn_handle(handle); auto stream = resource::get_cuda_stream(handle); P.resize(dim * dim); x.resize(dim); X.resize(dim * nPoints); P_d.resize(dim * dim, stream); X_d.resize(nPoints * dim, stream); x_d.resize(dim, stream); Rand_cov.resize(dim * dim, stream); Rand_mean.resize(dim, stream); srand(params.seed); for (int j = 0; j < dim; j++) x.data()[j] = rand() % 100 + 5.0f; std::default_random_engine generator(params.seed); std::uniform_real_distribution<T> distribution(0.0, 1.0); // P (symmetric positive definite matrix) for (int j = 0; j < dim; j++) { for (int i = 0; i < j + 1; i++) { T k = distribution(generator); if (corr == UNCORRELATED) k = 0.0; P.data()[IDX2C(i, j, dim)] = k; P.data()[IDX2C(j, i, dim)] = k; if (i == j) P.data()[IDX2C(i, j, dim)] += dim; } } raft::update_device(P_d.data(), P.data(), dim * dim, stream); raft::update_device(x_d.data(), x.data(), dim, stream); std::optional<raft::device_vector_view<const T, int>> x_view(std::in_place, x_d.data(), dim); raft::device_matrix_view<T, int, raft::col_major> P_view(P_d.data(), dim, dim); raft::device_matrix_view<T, int, raft::col_major> X_view(X_d.data(), dim, nPoints); rmm::mr::device_memory_resource* mem_resource_ptr = rmm::mr::get_current_device_resource(); ASSERT_TRUE(mem_resource_ptr != nullptr); raft::random::multi_variable_gaussian( handle, *mem_resource_ptr, x_view, P_view, X_view, method); // saving the mean of the randoms in Rand_mean //@todo can be swapped with a API that calculates mean RAFT_CUDA_TRY(cudaMemset(Rand_mean.data(), 0, dim * sizeof(T))); dim3 block = (64); dim3 grid = (raft::ceildiv(nPoints * dim, (int)block.x)); En_KF_accumulate<<<grid, block, 0, stream>>>(nPoints, dim, X_d.data(), Rand_mean.data()); RAFT_CUDA_TRY(cudaPeekAtLastError()); grid = (raft::ceildiv(dim, (int)block.x)); En_KF_normalize<<<grid, block, 0, stream>>>(nPoints, dim, Rand_mean.data()); RAFT_CUDA_TRY(cudaPeekAtLastError()); // storing the error wrt random point mean in X_d grid = (raft::ceildiv(dim * nPoints, (int)block.x)); En_KF_dif<<<grid, block, 0, stream>>>(nPoints, dim, X_d.data(), Rand_mean.data(), X_d.data()); RAFT_CUDA_TRY(cudaPeekAtLastError()); // finding the cov matrix, placing in Rand_cov T alfa = 1.0 / (nPoints - 1), beta = 0.0; RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm(cublasH, CUBLAS_OP_N, CUBLAS_OP_T, dim, dim, nPoints, &alfa, X_d.data(), dim, X_d.data(), dim, &beta, Rand_cov.data(), dim, stream)); // restoring cov provided into P_d raft::update_device(P_d.data(), P.data(), dim * dim, stream); } protected: raft::resources handle; MVGInputs<T> params; std::vector<T> P, x, X; rmm::device_uvector<T> workspace_d, P_d, x_d, X_d, Rand_cov, Rand_mean; int dim, nPoints; Correlation corr; T tolerance; }; // end of MVGTest class ///@todo find out the reason that Un-correlated covs are giving problems (in qr) // Declare your inputs const std::vector<MVGInputs<float>> inputsf = { {0.3f, detail::multi_variable_gaussian<float>::Decomposer::chol_decomp, Correlation::CORRELATED, 5, 30000, 6ULL}, {0.1f, detail::multi_variable_gaussian<float>::Decomposer::chol_decomp, Correlation::UNCORRELATED, 5, 30000, 6ULL}, {0.25f, detail::multi_variable_gaussian<float>::Decomposer::jacobi, Correlation::CORRELATED, 5, 30000, 6ULL}, {0.1f, detail::multi_variable_gaussian<float>::Decomposer::jacobi, Correlation::UNCORRELATED, 5, 30000, 6ULL}, {0.2f, detail::multi_variable_gaussian<float>::Decomposer::qr, Correlation::CORRELATED, 5, 30000, 6ULL}, // { 0.2f, multi_variable_gaussian<float>::Decomposer::qr, // Correlation::UNCORRELATED, 5, 30000, 6ULL} }; const std::vector<MVGInputs<double>> inputsd = { {0.25, detail::multi_variable_gaussian<double>::Decomposer::chol_decomp, Correlation::CORRELATED, 10, 3000000, 6ULL}, {0.1, detail::multi_variable_gaussian<double>::Decomposer::chol_decomp, Correlation::UNCORRELATED, 10, 3000000, 6ULL}, {0.25, detail::multi_variable_gaussian<double>::Decomposer::jacobi, Correlation::CORRELATED, 10, 3000000, 6ULL}, {0.1, detail::multi_variable_gaussian<double>::Decomposer::jacobi, Correlation::UNCORRELATED, 10, 3000000, 6ULL}, {0.2, detail::multi_variable_gaussian<double>::Decomposer::qr, Correlation::CORRELATED, 10, 3000000, 6ULL}, // { 0.2, multi_variable_gaussian<double>::Decomposer::qr, // Correlation::UNCORRELATED, 10, 3000000, 6ULL} }; // make the tests using MVGTestF = MVGTest<float>; using MVGTestD = MVGTest<double>; TEST_P(MVGTestF, MeanIsCorrectF) { EXPECT_TRUE(raft::devArrMatch(x_d.data(), Rand_mean.data(), dim, raft::CompareApprox<float>(tolerance), resource::get_cuda_stream(handle))) << " in MeanIsCorrect"; } TEST_P(MVGTestF, CovIsCorrectF) { EXPECT_TRUE(raft::devArrMatch(P_d.data(), Rand_cov.data(), dim, dim, raft::CompareApprox<float>(tolerance), resource::get_cuda_stream(handle))) << " in CovIsCorrect"; } TEST_P(MVGTestD, MeanIsCorrectD) { EXPECT_TRUE(raft::devArrMatch(x_d.data(), Rand_mean.data(), dim, raft::CompareApprox<double>(tolerance), resource::get_cuda_stream(handle))) << " in MeanIsCorrect"; } TEST_P(MVGTestD, CovIsCorrectD) { EXPECT_TRUE(raft::devArrMatch(P_d.data(), Rand_cov.data(), dim, dim, raft::CompareApprox<double>(tolerance), resource::get_cuda_stream(handle))) << " in CovIsCorrect"; } using MVGMdspanTestF = MVGMdspanTest<float>; using MVGMdspanTestD = MVGMdspanTest<double>; TEST_P(MVGMdspanTestF, MeanIsCorrectF) { EXPECT_TRUE(raft::devArrMatch(x_d.data(), Rand_mean.data(), dim, raft::CompareApprox<float>(tolerance), resource::get_cuda_stream(handle))) << " in MeanIsCorrect"; } TEST_P(MVGMdspanTestF, CovIsCorrectF) { EXPECT_TRUE(raft::devArrMatch(P_d.data(), Rand_cov.data(), dim, dim, raft::CompareApprox<float>(tolerance), resource::get_cuda_stream(handle))) << " in CovIsCorrect"; } TEST_P(MVGMdspanTestD, MeanIsCorrectD) { EXPECT_TRUE(raft::devArrMatch(x_d.data(), Rand_mean.data(), dim, raft::CompareApprox<double>(tolerance), resource::get_cuda_stream(handle))) << " in MeanIsCorrect"; } TEST_P(MVGMdspanTestD, CovIsCorrectD) { EXPECT_TRUE(raft::devArrMatch(P_d.data(), Rand_cov.data(), dim, dim, raft::CompareApprox<double>(tolerance), resource::get_cuda_stream(handle))) << " in CovIsCorrect"; } // call the tests INSTANTIATE_TEST_CASE_P(MVGTests, MVGTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MVGTests, MVGTestD, ::testing::ValuesIn(inputsd)); // call the tests INSTANTIATE_TEST_CASE_P(MVGMdspanTests, MVGMdspanTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MVGMdspanTests, MVGMdspanTestD, ::testing::ValuesIn(inputsd)); }; // end of namespace raft::random
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/random/rng_discrete.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <algorithm> #include <cmath> #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/linalg/add.cuh> #include <raft/linalg/unary_op.cuh> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <vector> namespace raft { namespace random { /* In this test we generate pseudo-random integers following a probability distribution defined by * an array of weights, such that the probability of the integer i is p_i=w_i/sum(w). A histogram of * the generated integers is compared to the expected probabilities. The histogram is normalized, * i.e divided by the number of drawn integers n=sampled_len*n_repeat. The expected value for the * index i of the histogram is E_i=p_i, the standard deviation sigma_i=sqrt(p_i*(1-p_i)/n). * * Weights are constructed as a sparse vector containing mostly zeros and a small number of non-zero * values. The test tolerance used to compare the actual and expected histograms is * eps=max(sigma_i). For the test to be relevant, the tolerance must be small w.r.t the non-zero * probabilities. Hence, n_repeat, sampled_len and nnz must be chosen accordingly. The test * automatically computes the tolerance and will fail if it is estimated too high for the test to be * relevant. */ template <typename IdxT> struct RngDiscreteInputs { IdxT n_repeat; IdxT sampled_len; IdxT len; IdxT nnz; GeneratorType gtype; unsigned long long int seed; }; template <typename WeightT, typename IdxT> ::std::ostream& operator<<(::std::ostream& os, const RngDiscreteInputs<IdxT>& d) { return os << "{" << d.n_repeat << ", " << d.sampled_len << ", " << d.len << ", " << d.nnz << "}"; } template <typename LabelT, typename IdxT> void update_count( const LabelT* labels, IdxT* count, IdxT sampled_len, IdxT len, const cudaStream_t& stream) { IdxT num_levels = len + 1; IdxT lower_level = 0; IdxT upper_level = len; rmm::device_uvector<IdxT> temp_count(len, stream); size_t temp_storage_bytes = 0; RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(nullptr, temp_storage_bytes, labels, temp_count.data(), num_levels, lower_level, upper_level, sampled_len, stream)); rmm::device_uvector<char> workspace(temp_storage_bytes, stream); RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(workspace.data(), temp_storage_bytes, labels, temp_count.data(), num_levels, lower_level, upper_level, sampled_len, stream)); raft::linalg::add(count, count, temp_count.data(), len, stream); } template <typename IdxT> void normalize_count( float* histogram, const IdxT* count, float scale, IdxT len, const cudaStream_t& stream) { raft::linalg::unaryOp( histogram, count, len, [scale] __device__(const IdxT& cnt) { return static_cast<float>(cnt) / scale; }, stream); } template <typename OutT, typename WeightT, typename IdxT> class RngDiscreteTest : public ::testing::TestWithParam<RngDiscreteInputs<IdxT>> { public: RngDiscreteTest() : params(::testing::TestWithParam<RngDiscreteInputs<IdxT>>::GetParam()), stream(resource::get_cuda_stream(handle)), out(params.sampled_len, stream), weights(params.len, stream), histogram(params.len, stream), exp_histogram(params.len) { } protected: void SetUp() override { tolerance = 0.0f; std::vector<WeightT> h_weights(params.len, WeightT{0}); std::mt19937 gen(params.seed); std::uniform_real_distribution dis(WeightT{0.2}, WeightT{2.0}); WeightT total_weight = WeightT{0}; for (int i = 0; i < params.nnz; i++) { h_weights[i] = dis(gen); total_weight += h_weights[i]; } float min_p = 1.f; for (int i = 0; i < params.nnz; i++) { float p = static_cast<float>(h_weights[i] / total_weight); float n = static_cast<float>(params.n_repeat * params.sampled_len); float sigma = std::sqrt(p * (1.f - p) / n); tolerance = std::max(tolerance, 4.f * sigma); min_p = std::min(min_p, p); } EXPECT_TRUE(tolerance < 0.5f * min_p) << "Test tolerance (" << tolerance << ") is too high. Use more samples, more " "repetitions or less non-zero weights."; std::shuffle(h_weights.begin(), h_weights.end(), gen); raft::copy(weights.data(), h_weights.data(), params.len, stream); RngState r(params.seed, params.gtype); raft::device_vector_view<OutT, IdxT> out_view(out.data(), out.size()); auto weights_view = raft::make_device_vector_view<const WeightT, IdxT>(weights.data(), weights.size()); rmm::device_uvector<IdxT> count(params.len, stream); RAFT_CUDA_TRY(cudaMemsetAsync(count.data(), 0, params.len * sizeof(IdxT), stream)); for (int iter = 0; iter < params.n_repeat; iter++) { discrete(handle, r, out_view, weights_view); update_count(out.data(), count.data(), params.sampled_len, params.len, stream); } float scale = static_cast<float>(params.sampled_len * params.n_repeat); normalize_count(histogram.data(), count.data(), scale, params.len, stream); // Compute the expected normalized histogram for (IdxT i = 0; i < params.len; i++) { exp_histogram[i] = h_weights[i] / total_weight; } } protected: raft::resources handle; cudaStream_t stream; RngDiscreteInputs<IdxT> params; float tolerance; rmm::device_uvector<OutT> out; rmm::device_uvector<WeightT> weights; rmm::device_uvector<float> histogram; std::vector<float> exp_histogram; }; const std::vector<RngDiscreteInputs<int>> inputs_i32 = { {1, 10000, 5, 5, GenPC, 123ULL}, {1, 10000, 10, 7, GenPC, 456ULL}, {1000, 100, 10000, 20, GenPC, 123ULL}, {1, 10000, 5, 5, GenPhilox, 1234ULL}, }; const std::vector<RngDiscreteInputs<int64_t>> inputs_i64 = { {1, 10000, 5, 5, GenPC, 123ULL}, {1, 10000, 10, 7, GenPC, 456ULL}, {1000, 100, 10000, 20, GenPC, 123ULL}, {1, 10000, 5, 5, GenPhilox, 1234ULL}, }; #define RNG_DISCRETE_TEST(test_type, test_name, test_inputs) \ typedef RAFT_DEPAREN(test_type) test_name; \ TEST_P(test_name, Result) \ { \ ASSERT_TRUE(devArrMatchHost(exp_histogram.data(), \ histogram.data(), \ exp_histogram.size(), \ CompareApprox<float>(tolerance), \ stream)); \ } \ INSTANTIATE_TEST_CASE_P(ReduceTests, test_name, ::testing::ValuesIn(test_inputs)) RNG_DISCRETE_TEST((RngDiscreteTest<int, float, int>), RngDiscreteTestI32FI32, inputs_i32); RNG_DISCRETE_TEST((RngDiscreteTest<uint32_t, float, int>), RngDiscreteTestU32FI32, inputs_i32); RNG_DISCRETE_TEST((RngDiscreteTest<int64_t, float, int>), RngDiscreteTestI64FI32, inputs_i32); RNG_DISCRETE_TEST((RngDiscreteTest<int, double, int>), RngDiscreteTestI32DI32, inputs_i32); // Disable IdxT=int64_t test due to CUB error: https://github.com/NVIDIA/cub/issues/192 // RNG_DISCRETE_TEST((RngDiscreteTest<int, float, int64_t>), RngDiscreteTestI32FI64, inputs_i64); } // namespace random } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/random/sample_without_replacement.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/random/rng.cuh> #include <raft/random/sample_without_replacement.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <set> #include <vector> namespace raft { namespace random { using namespace raft::random; // Terminology: // SWoR - Sample Without Replacement template <typename T> struct SWoRInputs { int len, sampledLen; int largeWeightIndex; T largeWeight; GeneratorType gtype; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const SWoRInputs<T>& dims) { return os; } template <typename T> class SWoRTest : public ::testing::TestWithParam<SWoRInputs<T>> { public: SWoRTest() : params(::testing::TestWithParam<SWoRInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), in(params.len, stream), wts(params.len, stream), out(params.sampledLen, stream), outIdx(params.sampledLen, stream) { } protected: void SetUp() override { RngState r(params.seed, params.gtype); h_outIdx.resize(params.sampledLen); uniform(handle, r, in.data(), params.len, T(-1.0), T(1.0)); uniform(handle, r, wts.data(), params.len, T(1.0), T(2.0)); if (params.largeWeightIndex >= 0) { update_device(wts.data() + params.largeWeightIndex, &params.largeWeight, 1, stream); } sampleWithoutReplacement( handle, r, out.data(), outIdx.data(), in.data(), wts.data(), params.sampledLen, params.len); update_host(h_outIdx.data(), outIdx.data(), params.sampledLen, stream); resource::sync_stream(handle, stream); } protected: raft::resources handle; cudaStream_t stream; SWoRInputs<T> params; rmm::device_uvector<T> in, out, wts; rmm::device_uvector<int> outIdx; std::vector<int> h_outIdx; }; template <typename T> class SWoRMdspanTest : public ::testing::TestWithParam<SWoRInputs<T>> { public: SWoRMdspanTest() : params(::testing::TestWithParam<SWoRInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), in(params.len, stream), wts(params.len, stream), out(params.sampledLen, stream), out2(params.sampledLen, stream), outIdx(params.sampledLen, stream), outIdx2(params.sampledLen, stream) { } protected: void SetUp() override { RngState r(params.seed, params.gtype); h_outIdx.resize(params.sampledLen); uniform(handle, r, in.data(), params.len, T(-1.0), T(1.0)); uniform(handle, r, wts.data(), params.len, T(1.0), T(2.0)); if (params.largeWeightIndex >= 0) { update_device(wts.data() + params.largeWeightIndex, &params.largeWeight, 1, stream); } { using index_type = int; using output_view = raft::device_vector_view<T, index_type>; output_view out_view{out.data(), out.size()}; ASSERT_TRUE(out_view.extent(0) == params.sampledLen); using output_idxs_view = raft::device_vector_view<index_type, index_type>; std::optional<output_idxs_view> outIdx_view{std::in_place, outIdx.data(), outIdx.size()}; ASSERT_TRUE(outIdx_view.value().extent(0) == params.sampledLen); using input_view = raft::device_vector_view<const T, index_type>; input_view in_view{in.data(), in.size()}; ASSERT_TRUE(in_view.extent(0) == params.len); using weights_view = raft::device_vector_view<const T, index_type>; std::optional<weights_view> wts_view{std::in_place, wts.data(), wts.size()}; ASSERT_TRUE(wts_view.value().extent(0) == params.len); sample_without_replacement(handle, r, in_view, wts_view, out_view, outIdx_view); output_view out2_view{out2.data(), out2.size()}; ASSERT_TRUE(out2_view.extent(0) == params.sampledLen); std::optional<output_idxs_view> outIdx2_view{std::in_place, outIdx2.data(), outIdx2.size()}; ASSERT_TRUE(outIdx2_view.value().extent(0) == params.sampledLen); // For now, just test that these calls compile. sample_without_replacement(handle, r, in_view, wts_view, out2_view, std::nullopt); sample_without_replacement(handle, r, in_view, std::nullopt, out2_view, outIdx2_view); sample_without_replacement(handle, r, in_view, std::nullopt, out2_view, std::nullopt); } update_host(h_outIdx.data(), outIdx.data(), params.sampledLen, stream); resource::sync_stream(handle, stream); } protected: raft::resources handle; cudaStream_t stream; SWoRInputs<T> params; rmm::device_uvector<T> in, out, wts, out2; rmm::device_uvector<int> outIdx, outIdx2; std::vector<int> h_outIdx; }; const std::vector<SWoRInputs<float>> inputsf = {{1024, 512, -1, 0.f, GenPhilox, 1234ULL}, {1024, 1024, -1, 0.f, GenPhilox, 1234ULL}, {1024, 512 + 1, -1, 0.f, GenPhilox, 1234ULL}, {1024, 1024 - 1, -1, 0.f, GenPhilox, 1234ULL}, {1024, 512 + 2, -1, 0.f, GenPhilox, 1234ULL}, {1024, 1024 - 2, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 512, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 1024, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 512 + 1, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 1024 + 1, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 512 + 2, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 1024 - 2, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 512, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 1024, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 512 + 1, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 1024 + 1, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 512 + 2, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 1024 + 2, -1, 0.f, GenPhilox, 1234ULL}, {1024, 512, 10, 100000.f, GenPhilox, 1234ULL}, {1024, 512, -1, 0.f, GenPC, 1234ULL}, {1024, 1024, -1, 0.f, GenPC, 1234ULL}, {1024, 512 + 1, -1, 0.f, GenPC, 1234ULL}, {1024, 1024 - 1, -1, 0.f, GenPC, 1234ULL}, {1024, 512 + 2, -1, 0.f, GenPC, 1234ULL}, {1024, 1024 - 2, -1, 0.f, GenPC, 1234ULL}, {1024 + 1, 512, -1, 0.f, GenPC, 1234ULL}, {1024 + 1, 1024, -1, 0.f, GenPC, 1234ULL}, {1024 + 1, 512 + 1, -1, 0.f, GenPC, 1234ULL}, {1024 + 1, 1024 + 1, -1, 0.f, GenPC, 1234ULL}, {1024 + 1, 512 + 2, -1, 0.f, GenPC, 1234ULL}, {1024 + 1, 1024 - 2, -1, 0.f, GenPC, 1234ULL}, {1024 + 2, 512, -1, 0.f, GenPC, 1234ULL}, {1024 + 2, 1024, -1, 0.f, GenPC, 1234ULL}, {1024 + 2, 512 + 1, -1, 0.f, GenPC, 1234ULL}, {1024 + 2, 1024 + 1, -1, 0.f, GenPC, 1234ULL}, {1024 + 2, 512 + 2, -1, 0.f, GenPC, 1234ULL}, {1024 + 2, 1024 + 2, -1, 0.f, GenPC, 1234ULL}, {1024, 512, 10, 100000.f, GenPC, 1234ULL}}; // This needs to be a macro because it has to live in the scope // of the class whose name is the first parameter of TEST_P. // // We test the following. // // 1. Output indices are in the given range. // 2. Output indices do not repeat. // 3. If there's a skewed distribution, the top index should // correspond to the particular item with a large weight. #define _RAFT_SWOR_TEST_CONTENTS() \ do { \ std::set<int> occurrence; \ for (int i = 0; i < params.sampledLen; ++i) { \ auto val = h_outIdx[i]; \ ASSERT_TRUE(0 <= val && val < params.len) \ << "out-of-range index @i=" << i << " val=" << val << " sampledLen=" << params.sampledLen; \ ASSERT_TRUE(occurrence.find(val) == occurrence.end()) \ << "repeated index @i=" << i << " idx=" << val; \ occurrence.insert(val); \ } \ if (params.largeWeightIndex >= 0) { \ ASSERT_TRUE((h_outIdx[0] == params.largeWeightIndex) || \ (h_outIdx[1] == params.largeWeightIndex) || \ (h_outIdx[2] == params.largeWeightIndex)); \ } \ } while (false) using SWoRTestF = SWoRTest<float>; TEST_P(SWoRTestF, Result) { _RAFT_SWOR_TEST_CONTENTS(); } INSTANTIATE_TEST_SUITE_P(SWoRTests, SWoRTestF, ::testing::ValuesIn(inputsf)); using SWoRMdspanTestF = SWoRMdspanTest<float>; TEST_P(SWoRMdspanTestF, Result) { _RAFT_SWOR_TEST_CONTENTS(); } INSTANTIATE_TEST_SUITE_P(SWoRTests2, SWoRMdspanTestF, ::testing::ValuesIn(inputsf)); const std::vector<SWoRInputs<double>> inputsd = {{1024, 512, -1, 0.0, GenPhilox, 1234ULL}, {1024, 1024, -1, 0.0, GenPhilox, 1234ULL}, {1024, 512 + 1, -1, 0.0, GenPhilox, 1234ULL}, {1024, 1024 - 1, -1, 0.0, GenPhilox, 1234ULL}, {1024, 512 + 2, -1, 0.0, GenPhilox, 1234ULL}, {1024, 1024 - 2, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 512, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 1024, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 512 + 1, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 1024 + 1, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 512 + 2, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 1024 - 2, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 512, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 1024, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 512 + 1, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 1024 + 1, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 512 + 2, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 1024 + 2, -1, 0.0, GenPhilox, 1234ULL}, {1024, 512, 10, 100000.0, GenPhilox, 1234ULL}, {1024, 512, -1, 0.0, GenPC, 1234ULL}, {1024, 1024, -1, 0.0, GenPC, 1234ULL}, {1024, 512 + 1, -1, 0.0, GenPC, 1234ULL}, {1024, 1024 - 1, -1, 0.0, GenPC, 1234ULL}, {1024, 512 + 2, -1, 0.0, GenPC, 1234ULL}, {1024, 1024 - 2, -1, 0.0, GenPC, 1234ULL}, {1024 + 1, 512, -1, 0.0, GenPC, 1234ULL}, {1024 + 1, 1024, -1, 0.0, GenPC, 1234ULL}, {1024 + 1, 512 + 1, -1, 0.0, GenPC, 1234ULL}, {1024 + 1, 1024 + 1, -1, 0.0, GenPC, 1234ULL}, {1024 + 1, 512 + 2, -1, 0.0, GenPC, 1234ULL}, {1024 + 1, 1024 - 2, -1, 0.0, GenPC, 1234ULL}, {1024 + 2, 512, -1, 0.0, GenPC, 1234ULL}, {1024 + 2, 1024, -1, 0.0, GenPC, 1234ULL}, {1024 + 2, 512 + 1, -1, 0.0, GenPC, 1234ULL}, {1024 + 2, 1024 + 1, -1, 0.0, GenPC, 1234ULL}, {1024 + 2, 512 + 2, -1, 0.0, GenPC, 1234ULL}, {1024 + 2, 1024 + 2, -1, 0.0, GenPC, 1234ULL}, {1024, 512, 10, 100000.0, GenPC, 1234ULL}}; using SWoRTestD = SWoRTest<double>; TEST_P(SWoRTestD, Result) { _RAFT_SWOR_TEST_CONTENTS(); } INSTANTIATE_TEST_SUITE_P(SWoRTests, SWoRTestD, ::testing::ValuesIn(inputsd)); using SWoRMdspanTestD = SWoRMdspanTest<double>; TEST_P(SWoRMdspanTestD, Result) { _RAFT_SWOR_TEST_CONTENTS(); } INSTANTIATE_TEST_SUITE_P(SWoRTests2, SWoRMdspanTestD, ::testing::ValuesIn(inputsd)); } // namespace random } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/random/make_blobs.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <cub/cub.cuh> #include <gtest/gtest.h> #include <raft/core/device_mdarray.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/random/make_blobs.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> namespace raft { namespace random { template <typename T> RAFT_KERNEL meanKernel(T* out, int* lens, const T* data, const int* labels, int nrows, int ncols, int nclusters, bool row_major) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int rowid = row_major ? tid / ncols : tid % nrows; int colid = row_major ? tid % ncols : tid / nrows; if (rowid < nrows && colid < ncols) { T val = data[tid]; int label = labels[rowid]; int idx = row_major ? label * ncols + colid : colid * nclusters + label; raft::myAtomicAdd(out + idx * 2, val); raft::myAtomicAdd(out + idx * 2 + 1, val * val); if (colid == 0) { raft::myAtomicAdd(lens + label, 1); } } } template <typename T> RAFT_KERNEL compute_mean_var( T* out, const T* stats, int* lens, int nrows, int ncols, bool row_major) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int rowid = row_major ? tid / ncols : tid % nrows; int colid = row_major ? tid % ncols : tid / nrows; int stride = nrows * ncols; if (rowid < nrows && colid < ncols) { int len = lens[rowid]; auto mean = stats[tid * 2] / len; out[tid] = mean; out[tid + stride] = (stats[tid * 2 + 1] / len) - (mean * mean); } } template <typename T> struct MakeBlobsInputs { T tolerance; int rows, cols, n_clusters; T std; bool shuffle; raft::random::GeneratorType gtype; uint64_t seed; }; template <typename T, typename layout> class MakeBlobsTest : public ::testing::TestWithParam<MakeBlobsInputs<T>> { public: MakeBlobsTest() : params(::testing::TestWithParam<MakeBlobsInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), mu_vec(make_device_matrix<T, int, layout>(handle, params.n_clusters, params.cols)), mean_var(make_device_vector<T, int>(handle, 2 * params.n_clusters * params.cols)) { } protected: void SetUp() override { // Tests are configured with their expected test-values sigma. For example, // 4 x sigma indicates the test shouldn't fail 99.9% of the time. num_sigma = 50; auto len = params.rows * params.cols; raft::random::RngState r(params.seed, params.gtype); auto data = make_device_matrix<T, int, layout>(handle, params.rows, params.cols); auto labels = make_device_vector<int, int>(handle, params.rows); auto stats = make_device_vector<T, int>(handle, 2 * params.n_clusters * params.cols); auto lens = make_device_vector<int, int>(handle, params.n_clusters); RAFT_CUDA_TRY(cudaMemsetAsync(stats.data_handle(), 0, stats.extent(0) * sizeof(T), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(lens.data_handle(), 0, lens.extent(0) * sizeof(int), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(mean_var.data_handle(), 0, mean_var.size() * sizeof(T), stream)); uniform(handle, r, mu_vec.data_handle(), params.cols * params.n_clusters, T(-10.0), T(10.0)); make_blobs<T, int, layout>(handle, data.view(), labels.view(), params.n_clusters, std::make_optional(mu_vec.view()), std::nullopt, params.std, params.shuffle, T(-10.0), T(10.0), params.seed, params.gtype); bool row_major = std::is_same<layout, raft::layout_c_contiguous>::value; static const int threads = 128; meanKernel<T><<<raft::ceildiv(len, threads), threads, 0, stream>>>(stats.data_handle(), lens.data_handle(), data.data_handle(), labels.data_handle(), params.rows, params.cols, params.n_clusters, row_major); int len1 = params.n_clusters * params.cols; compute_mean_var<T> <<<raft::ceildiv(len1, threads), threads, 0, stream>>>(mean_var.data_handle(), stats.data_handle(), lens.data_handle(), params.n_clusters, params.cols, row_major); } void check() { int len = params.n_clusters * params.cols; auto compare = raft::CompareApprox<T>(num_sigma * params.tolerance); ASSERT_TRUE(raft::devArrMatch(mu_vec.data_handle(), mean_var.data_handle(), len, compare)); ASSERT_TRUE(raft::devArrMatch(params.std, mean_var.data_handle() + len, len, compare)); } protected: raft::resources handle; MakeBlobsInputs<T> params; cudaStream_t stream = 0; device_vector<T, int> mean_var; device_matrix<T, int, layout> mu_vec; int num_sigma; }; typedef MakeBlobsTest<float, raft::layout_c_contiguous> MakeBlobsTestF_RowMajor; typedef MakeBlobsTest<float, raft::layout_f_contiguous> MakeBlobsTestF_ColMajor; const std::vector<MakeBlobsInputs<float>> inputsf_t = { {0.0055, 1024, 32, 3, 1.f, false, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.f, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, false, raft::random::GenPC, 1234ULL}, {0.011, 1024, 8, 3, 1.f, false, raft::random::GenPC, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, true, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.f, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.f, true, raft::random::GenPC, 1234ULL}, {0.011, 1024, 8, 3, 1.f, true, raft::random::GenPC, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, false, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.f, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, false, raft::random::GenPC, 1234ULL}, {0.011, 5003, 8, 5, 1.f, false, raft::random::GenPC, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, true, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.f, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.f, true, raft::random::GenPC, 1234ULL}, {0.011, 5003, 8, 5, 1.f, true, raft::random::GenPC, 1234ULL}, }; TEST_P(MakeBlobsTestF_RowMajor, Result) { check(); } INSTANTIATE_TEST_CASE_P(MakeBlobsTests, MakeBlobsTestF_RowMajor, ::testing::ValuesIn(inputsf_t)); TEST_P(MakeBlobsTestF_ColMajor, Result) { check(); } INSTANTIATE_TEST_CASE_P(MakeBlobsTests, MakeBlobsTestF_ColMajor, ::testing::ValuesIn(inputsf_t)); typedef MakeBlobsTest<double, raft::layout_c_contiguous> MakeBlobsTestD_RowMajor; typedef MakeBlobsTest<double, raft::layout_f_contiguous> MakeBlobsTestD_ColMajor; const std::vector<MakeBlobsInputs<double>> inputsd_t = { {0.0055, 1024, 32, 3, 1.0, false, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.0, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, false, raft::random::GenPC, 1234ULL}, {0.011, 1024, 8, 3, 1.0, false, raft::random::GenPC, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, true, raft::random::GenPhilox, 1234ULL}, {0.011, 1024, 8, 3, 1.0, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 1024, 32, 3, 1.0, true, raft::random::GenPC, 1234ULL}, {0.011, 1024, 8, 3, 1.0, true, raft::random::GenPC, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, false, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.0, false, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, false, raft::random::GenPC, 1234ULL}, {0.011, 5003, 8, 5, 1.0, false, raft::random::GenPC, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, true, raft::random::GenPhilox, 1234ULL}, {0.011, 5003, 8, 5, 1.0, true, raft::random::GenPhilox, 1234ULL}, {0.0055, 5003, 32, 5, 1.0, true, raft::random::GenPC, 1234ULL}, {0.011, 5003, 8, 5, 1.0, true, raft::random::GenPC, 1234ULL}, }; TEST_P(MakeBlobsTestD_RowMajor, Result) { check(); } INSTANTIATE_TEST_CASE_P(MakeBlobsTests, MakeBlobsTestD_RowMajor, ::testing::ValuesIn(inputsd_t)); TEST_P(MakeBlobsTestD_ColMajor, Result) { check(); } INSTANTIATE_TEST_CASE_P(MakeBlobsTests, MakeBlobsTestD_ColMajor, ::testing::ValuesIn(inputsd_t)); } // end namespace random } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/random/rng_pcg_host_api.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" namespace raft { namespace random { // CPT - Calls Per Thread, How many calls to custom_next is made by a single thread // IPC - Items Per Call, How many items are returned by a single call to custom_next (usually IPC = // 1 or 2) template <typename DType, typename ParamType, int CPT, int IPC> __host__ __device__ void single_thread_fill(DType* buffer, DeviceState<PCGenerator> r, ParamType params, const size_t total_threads, const size_t len, const size_t tid) { PCGenerator gen(r, tid); for (size_t i = 0; i < CPT; i++) { DType val[IPC]; size_t index = (tid * CPT * IPC) + i * IPC; custom_next(gen, val, params, index, total_threads); for (int j = 0; j < IPC; j++) { if (index + j < len) { buffer[index + j] = val[j]; } } } } template <typename DType, typename ParamType, int CPT, int IPC> RAFT_KERNEL pcg_device_kernel(DType* buffer, DeviceState<PCGenerator> r, ParamType params, const size_t total_threads, const size_t len) { int tid = int(blockIdx.x) * blockDim.x + threadIdx.x; single_thread_fill<DType, ParamType, CPT, IPC>(buffer, r, params, total_threads, len, tid); } template <typename ParamType, typename DataType, int CPT, int IPC> class HostApiTest { public: HostApiTest() : stream(resource::get_cuda_stream(handle)), d_buffer(0, stream) { len = total_threads * CPT * IPC; d_buffer.resize(len, stream); h_buffer.resize(len); } void FillBuffers(uint64_t seed) { RngState r(seed, GenPC); DeviceState<PCGenerator> d_state(r); pcg_device_kernel<DataType, ParamType, CPT, IPC><<<n_blocks, n_threads, 0, stream>>>( d_buffer.data(), d_state, dist_params, total_threads, len); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); for (size_t tid = 0; tid < len; tid++) { single_thread_fill<DataType, ParamType, CPT, IPC>( h_buffer.data(), d_state, dist_params, total_threads, len, tid); } } void SetParams(ParamType _dist_params) { dist_params = _dist_params; } void test() { ASSERT_TRUE(devArrMatchHost( h_buffer.data(), d_buffer.data(), len, raft::CompareApprox<double>(1e-5), stream)); } ParamType dist_params; raft::resources handle; cudaStream_t stream; static const int n_blocks = 128; static const int n_threads = 64; static const size_t total_threads = size_t(n_blocks) * n_threads; size_t len; rmm::device_uvector<DataType> d_buffer; std::vector<DataType> h_buffer; }; // This Wrapper class is needed because gtest typed test allows single type per class template <typename T> class TestW : public testing::Test { protected: void SetUp() override { test_obj.SetParams(p); test_obj.FillBuffers(seed); } public: void TestFillBuffer() { test_obj.test(); } T test_obj; using ParamType = decltype(T::dist_params); static ParamType p; const uint64_t seed = 42; }; TYPED_TEST_SUITE_P(TestW); TYPED_TEST_P(TestW, host_api_test) { this->TestFillBuffer(); } REGISTER_TYPED_TEST_SUITE_P(TestW, host_api_test); using InvariantT = HostApiTest<InvariantDistParams<int>, int, 16, 1>; template <> InvariantDistParams<int> TestW<InvariantT>::p = {.const_val = 123456}; using UniformT = HostApiTest<UniformDistParams<double>, double, 16, 1>; template <> UniformDistParams<double> TestW<UniformT>::p = {.start = 0.0, .end = 1.0}; using UniformInt32T = HostApiTest<UniformIntDistParams<uint32_t, uint32_t>, uint32_t, 16, 1>; template <> UniformIntDistParams<uint32_t, uint32_t> TestW<UniformInt32T>::p = { .start = 0, .end = 100000, .diff = 100000}; using UniformInt64T = HostApiTest<UniformIntDistParams<uint64_t, uint64_t>, uint64_t, 16, 1>; template <> UniformIntDistParams<uint64_t, uint64_t> TestW<UniformInt64T>::p = { .start = 0, .end = 100000, .diff = 100000}; using NormalT = HostApiTest<NormalDistParams<double>, double, 16, 2>; template <> NormalDistParams<double> TestW<NormalT>::p = {.mu = 0.5, .sigma = 0.5}; using NormalIntT = HostApiTest<NormalIntDistParams<uint32_t>, uint32_t, 16, 2>; template <> NormalIntDistParams<uint32_t> TestW<NormalIntT>::p = {.mu = 10000000, .sigma = 10000}; using BernoulliT = HostApiTest<BernoulliDistParams<double>, double, 16, 1>; template <> BernoulliDistParams<double> TestW<BernoulliT>::p = {.prob = 0.7}; using ScaledBernoulliT = HostApiTest<ScaledBernoulliDistParams<double>, double, 16, 1>; template <> ScaledBernoulliDistParams<double> TestW<ScaledBernoulliT>::p = {.prob = 0.7, .scale = 0.5}; using GumbelT = HostApiTest<GumbelDistParams<double>, double, 16, 1>; template <> GumbelDistParams<double> TestW<GumbelT>::p = {.mu = 0.7, .beta = 0.5}; using LogNormalT = HostApiTest<LogNormalDistParams<double>, double, 16, 2>; template <> LogNormalDistParams<double> TestW<LogNormalT>::p = {.mu = 0.5, .sigma = 0.5}; using LogisticT = HostApiTest<LogisticDistParams<double>, double, 16, 1>; template <> LogisticDistParams<double> TestW<LogisticT>::p = {.mu = 0.2, .scale = 0.3}; using ExponentialT = HostApiTest<ExponentialDistParams<double>, double, 16, 1>; template <> ExponentialDistParams<double> TestW<ExponentialT>::p = {.lambda = 1.6}; using RayleighT = HostApiTest<RayleighDistParams<double>, double, 16, 1>; template <> RayleighDistParams<double> TestW<RayleighT>::p = {.sigma = 1.6}; using LaplaceT = HostApiTest<LaplaceDistParams<double>, double, 16, 1>; template <> LaplaceDistParams<double> TestW<LaplaceT>::p = {.mu = 0.2, .scale = 0.3}; using TypeList = testing::Types<InvariantT, UniformT, UniformInt32T, UniformInt64T, NormalT, NormalIntT, BernoulliT, ScaledBernoulliT, GumbelT, LogisticT, ExponentialT, RayleighT, LaplaceT>; INSTANTIATE_TYPED_TEST_SUITE_P(Rng, TestW, TypeList); } // namespace random } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/random/rmat_rectangular_generator.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cub/cub.cuh> #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <sys/timeb.h> #include <vector> #include "../test_utils.cuh" #include <raft/core/resources.hpp> #include <raft/random/rmat_rectangular_generator.cuh> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> namespace raft { namespace random { // Courtesy: cuGraph unit-tests struct RmatInputs { size_t r_scale; size_t c_scale; size_t n_edges; bool theta_array; uint64_t seed; float eps; }; template <typename OutT, typename InT> RAFT_KERNEL normalize_kernel( OutT* theta, const InT* in_vals, size_t max_scale, size_t r_scale, size_t c_scale) { size_t idx = threadIdx.x; if (idx < max_scale) { auto a = OutT(in_vals[4 * idx]); auto b = OutT(in_vals[4 * idx + 1]); auto c = OutT(in_vals[4 * idx + 2]); auto d = OutT(in_vals[4 * idx + 3]); auto sum = a + b + c + d; a /= sum; b /= sum; c /= sum; d /= sum; theta[4 * idx] = a; theta[4 * idx + 1] = b; theta[4 * idx + 2] = c; theta[4 * idx + 3] = d; } } // handle rectangular cases correctly template <typename OutT> RAFT_KERNEL handle_rect_kernel(OutT* theta, size_t max_scale, size_t r_scale, size_t c_scale) { size_t idx = threadIdx.x; if (idx < max_scale) { auto a = theta[4 * idx]; auto b = theta[4 * idx + 1]; auto c = theta[4 * idx + 2]; auto d = theta[4 * idx + 3]; if (idx >= r_scale) { a += c; c = OutT(0); b += d; d = OutT(0); } if (idx >= c_scale) { a += b; b = OutT(0); c += d; d = OutT(0); } theta[4 * idx] = a; theta[4 * idx + 1] = b; theta[4 * idx + 2] = c; theta[4 * idx + 3] = d; } } // for a single probability distribution across depths, just replicate the theta's! // this will keep the test code simpler template <typename OutT> RAFT_KERNEL theta_kernel(OutT* theta, size_t max_scale, size_t r_scale, size_t c_scale) { size_t idx = threadIdx.x; if (idx != 0 && idx < max_scale) { auto a = theta[0]; auto b = theta[1]; auto c = theta[2]; auto d = theta[3]; if (idx >= r_scale) { a += c; c = OutT(0); b += d; d = OutT(0); } if (idx >= c_scale) { a += b; b = OutT(0); c += d; d = OutT(0); } theta[4 * idx] = a; theta[4 * idx + 1] = b; theta[4 * idx + 2] = c; theta[4 * idx + 3] = d; } } template <typename OutT, typename InT> void normalize(OutT* theta, const InT* in_vals, size_t max_scale, size_t r_scale, size_t c_scale, bool handle_rect, bool theta_array, cudaStream_t stream) { // one threadblock with 256 threads is more than enough as the 'scale' parameters // won't be that large! normalize_kernel<OutT, InT><<<1, 256, 0, stream>>>(theta, in_vals, max_scale, r_scale, c_scale); RAFT_CUDA_TRY(cudaGetLastError()); if (handle_rect) { handle_rect_kernel<<<1, 256, 0, stream>>>(theta, max_scale, r_scale, c_scale); RAFT_CUDA_TRY(cudaGetLastError()); } if (!theta_array) { theta_kernel<<<1, 256, 0, stream>>>(theta, max_scale, r_scale, c_scale); RAFT_CUDA_TRY(cudaGetLastError()); } } RAFT_KERNEL compute_hist( int* hist, const size_t* out, size_t len, size_t max_scale, size_t r_scale, size_t c_scale) { size_t idx = (threadIdx.x + blockIdx.x * blockDim.x) * 2; if (idx + 1 < len) { auto src = out[idx], dst = out[idx + 1]; for (size_t j = 0; j < max_scale; ++j) { bool src_bit = j < r_scale ? src & (1 << (r_scale - j - 1)) : 0; bool dst_bit = j < c_scale ? dst & (1 << (c_scale - j - 1)) : 0; auto idx = j * 4 + src_bit * 2 + dst_bit; atomicAdd(hist + idx, 1); } } } class RmatGenTest : public ::testing::TestWithParam<RmatInputs> { public: RmatGenTest() : handle{}, stream{resource::get_cuda_stream(handle)}, params{::testing::TestWithParam<RmatInputs>::GetParam()}, out{params.n_edges * 2, stream}, out_src{params.n_edges, stream}, out_dst{params.n_edges, stream}, theta{0, stream}, h_theta{}, state{params.seed, GeneratorType::GenPC}, max_scale{std::max(params.r_scale, params.c_scale)} { theta.resize(4 * max_scale, stream); uniform<float>(handle, state, theta.data(), theta.size(), 0.0f, 1.0f); normalize<float, float>(theta.data(), theta.data(), max_scale, params.r_scale, params.c_scale, params.r_scale != params.c_scale, params.theta_array, stream); h_theta.resize(theta.size()); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); raft::update_host(h_theta.data(), theta.data(), theta.size(), stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } protected: void SetUp() override { if (params.theta_array) { rmat_rectangular_gen(out.data(), out_src.data(), out_dst.data(), theta.data(), params.r_scale, params.c_scale, params.n_edges, stream, state); } else { rmat_rectangular_gen(out.data(), out_src.data(), out_dst.data(), h_theta[0], h_theta[1], h_theta[2], params.r_scale, params.c_scale, params.n_edges, stream, state); } RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } void validate() { rmm::device_uvector<int> hist{theta.size(), stream}; RAFT_CUDA_TRY(cudaMemsetAsync(hist.data(), 0, hist.size() * sizeof(int), stream)); compute_hist<<<raft::ceildiv<size_t>(out.size() / 2, 256), 256, 0, stream>>>( hist.data(), out.data(), out.size(), max_scale, params.r_scale, params.c_scale); RAFT_CUDA_TRY(cudaGetLastError()); rmm::device_uvector<float> computed_theta{theta.size(), stream}; normalize<float, int>(computed_theta.data(), hist.data(), max_scale, params.r_scale, params.c_scale, false, true, stream); RAFT_CUDA_TRY(cudaGetLastError()); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); ASSERT_TRUE(devArrMatchHost( h_theta.data(), computed_theta.data(), theta.size(), CompareApprox<float>(params.eps))); } protected: raft::resources handle; cudaStream_t stream; RmatInputs params; rmm::device_uvector<size_t> out, out_src, out_dst; rmm::device_uvector<float> theta; std::vector<float> h_theta; RngState state; size_t max_scale; }; class RmatGenMdspanTest : public ::testing::TestWithParam<RmatInputs> { public: RmatGenMdspanTest() : handle{}, stream{resource::get_cuda_stream(handle)}, params{::testing::TestWithParam<RmatInputs>::GetParam()}, out{params.n_edges * 2, stream}, out_src{params.n_edges, stream}, out_dst{params.n_edges, stream}, theta{0, stream}, h_theta{}, state{params.seed, GeneratorType::GenPC}, max_scale{std::max(params.r_scale, params.c_scale)} { theta.resize(4 * max_scale, stream); uniform<float>(handle, state, theta.data(), theta.size(), 0.0f, 1.0f); normalize<float, float>(theta.data(), theta.data(), max_scale, params.r_scale, params.c_scale, params.r_scale != params.c_scale, params.theta_array, stream); h_theta.resize(theta.size()); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); raft::update_host(h_theta.data(), theta.data(), theta.size(), stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } protected: void SetUp() override { using index_type = size_t; using out_view_type = raft::device_mdspan<index_type, raft::extents<index_type, raft::dynamic_extent, 2>, raft::row_major>; out_view_type out_view(out.data(), out.size()); using out_src_view_type = raft::device_vector_view<index_type, index_type>; out_src_view_type out_src_view(out_src.data(), out_src.size()); using out_dst_view_type = raft::device_vector_view<index_type, index_type>; out_dst_view_type out_dst_view(out_dst.data(), out_dst.size()); if (params.theta_array) { raft::device_vector_view<const float, index_type> theta_view(theta.data(), theta.size()); rmat_rectangular_gen(handle, state, theta_view, out_view, out_src_view, out_dst_view, params.r_scale, params.c_scale); } else { rmat_rectangular_gen(handle, state, out_view, out_src_view, out_dst_view, h_theta[0], h_theta[1], h_theta[2], params.r_scale, params.c_scale); } RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } void validate() { rmm::device_uvector<int> hist{theta.size(), stream}; RAFT_CUDA_TRY(cudaMemsetAsync(hist.data(), 0, hist.size() * sizeof(int), stream)); compute_hist<<<raft::ceildiv<size_t>(out.size() / 2, 256), 256, 0, stream>>>( hist.data(), out.data(), out.size(), max_scale, params.r_scale, params.c_scale); RAFT_CUDA_TRY(cudaGetLastError()); rmm::device_uvector<float> computed_theta{theta.size(), stream}; normalize<float, int>(computed_theta.data(), hist.data(), max_scale, params.r_scale, params.c_scale, false, true, stream); RAFT_CUDA_TRY(cudaGetLastError()); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); ASSERT_TRUE(devArrMatchHost( h_theta.data(), computed_theta.data(), theta.size(), CompareApprox<float>(params.eps))); } protected: raft::resources handle; cudaStream_t stream; RmatInputs params; rmm::device_uvector<size_t> out, out_src, out_dst; rmm::device_uvector<float> theta; std::vector<float> h_theta; RngState state; size_t max_scale; }; static const float TOLERANCE = 0.01f; const std::vector<RmatInputs> inputs = { // square adjacency {16, 16, 100000, false, 123456ULL, TOLERANCE}, {16, 16, 100000, true, 123456ULL, TOLERANCE}, {16, 16, 200000, false, 123456ULL, TOLERANCE}, {16, 16, 200000, true, 123456ULL, TOLERANCE}, {18, 18, 100000, false, 123456ULL, TOLERANCE}, {18, 18, 100000, true, 123456ULL, TOLERANCE}, {18, 18, 200000, false, 123456ULL, TOLERANCE}, {18, 18, 200000, true, 123456ULL, TOLERANCE}, {16, 16, 100000, false, 456789ULL, TOLERANCE}, {16, 16, 100000, true, 456789ULL, TOLERANCE}, {16, 16, 200000, false, 456789ULL, TOLERANCE}, {16, 16, 200000, true, 456789ULL, TOLERANCE}, {18, 18, 100000, false, 456789ULL, TOLERANCE}, {18, 18, 100000, true, 456789ULL, TOLERANCE}, {18, 18, 200000, false, 456789ULL, TOLERANCE}, {18, 18, 200000, true, 456789ULL, TOLERANCE}, // rectangular adjacency {16, 18, 200000, false, 123456ULL, TOLERANCE}, {16, 18, 200000, true, 123456ULL, TOLERANCE}, {18, 16, 200000, false, 123456ULL, TOLERANCE}, {18, 16, 200000, true, 123456ULL, TOLERANCE}, {16, 18, 200000, false, 456789ULL, TOLERANCE}, {16, 18, 200000, true, 456789ULL, TOLERANCE}, {18, 16, 200000, false, 456789ULL, TOLERANCE}, {18, 16, 200000, true, 456789ULL, TOLERANCE}}; TEST_P(RmatGenTest, Result) { validate(); } INSTANTIATE_TEST_SUITE_P(RmatGenTests, RmatGenTest, ::testing::ValuesIn(inputs)); TEST_P(RmatGenMdspanTest, Result) { validate(); } INSTANTIATE_TEST_SUITE_P(RmatGenMdspanTests, RmatGenMdspanTest, ::testing::ValuesIn(inputs)); } // namespace random } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/random/rng.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <memory> #include <raft/core/resource/cuda_stream.hpp> #include <sys/timeb.h> #include "../test_utils.cuh" #include <cub/cub.cuh> #include <gtest/gtest.h> #include <raft/core/resources.hpp> #include <raft/random/rng.cuh> #include <raft/stats/mean.cuh> #include <raft/stats/stddev.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> namespace raft { namespace random { using namespace raft::random; enum RandomType { RNG_Normal, RNG_LogNormal, RNG_Uniform, RNG_Gumbel, RNG_Logistic, RNG_Exp, RNG_Rayleigh, RNG_Laplace }; template <typename T, int TPB> RAFT_KERNEL meanKernel(T* out, const T* data, int len) { typedef cub::BlockReduce<T, TPB> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int tid = threadIdx.x + blockIdx.x * blockDim.x; T val = tid < len ? data[tid] : T(0); T x = BlockReduce(temp_storage).Sum(val); __syncthreads(); T xx = BlockReduce(temp_storage).Sum(val * val); __syncthreads(); if (threadIdx.x == 0) { raft::myAtomicAdd(out, x); raft::myAtomicAdd(out + 1, xx); } } template <typename T> struct RngInputs { int len; // Meaning of 'start' and 'end' parameter for various distributions // // Uniform Normal/Log-Normal Gumbel Logistic Laplace Exponential Rayleigh // start start mean mean mean mean lambda sigma // end end sigma beta scale scale Unused Unused T start, end; RandomType type; GeneratorType gtype; uint64_t seed; }; // In this test we generate pseudo-random values that follow various probability distributions such // as Normal, Laplace etc. To check the correctness of generated random variates we compute two // measures, mean and variance from the generated data. The computed values are matched against // their theoretically expected values for the corresponding distribution. The computed mean and // variance are statistical variables themselves and follow a Normal distribution. Which means, // there is 99+% chance that the computed values fall in the 3-sigma (standard deviation) interval // [theoretical_value - 3*sigma, theoretical_value + 3*sigma]. The values are practically // guaranteed to fall in the 4-sigma interval. Reference standard deviation of the computed // mean/variance distribution is calculated here // https://gist.github.com/vinaydes/cee04f50ff7e3365759603d39b7e079b Maximum standard deviation // observed here is ~1.5e-2, thus we use this as sigma in our test. // N O T E: Before adding any new test case below, make sure to calculate standard deviation for the // test parameters using above notebook. constexpr int NUM_SIGMA = 4; constexpr double MAX_SIGMA = 1.5e-2; template <typename T> class RngTest : public ::testing::TestWithParam<RngInputs<T>> { public: RngTest() : params(::testing::TestWithParam<RngInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), data(0, stream), stats(2, stream) { data.resize(params.len, stream); RAFT_CUDA_TRY(cudaMemsetAsync(stats.data(), 0, 2 * sizeof(T), stream)); } protected: void SetUp() override { RngState r(params.seed, params.gtype); switch (params.type) { case RNG_Normal: normal(handle, r, data.data(), params.len, params.start, params.end); break; case RNG_LogNormal: lognormal(handle, r, data.data(), params.len, params.start, params.end); break; case RNG_Uniform: uniform(handle, r, data.data(), params.len, params.start, params.end); break; case RNG_Gumbel: gumbel(handle, r, data.data(), params.len, params.start, params.end); break; case RNG_Logistic: logistic(handle, r, data.data(), params.len, params.start, params.end); break; case RNG_Exp: exponential(handle, r, data.data(), params.len, params.start); break; case RNG_Rayleigh: rayleigh(handle, r, data.data(), params.len, params.start); break; case RNG_Laplace: laplace(handle, r, data.data(), params.len, params.start, params.end); break; }; static const int threads = 128; meanKernel<T, threads><<<raft::ceildiv(params.len, threads), threads, 0, stream>>>( stats.data(), data.data(), params.len); update_host<T>(h_stats, stats.data(), 2, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); h_stats[0] /= params.len; h_stats[1] = (h_stats[1] / params.len) - (h_stats[0] * h_stats[0]); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } void getExpectedMeanVar(T meanvar[2]) { switch (params.type) { case RNG_Normal: meanvar[0] = params.start; meanvar[1] = params.end * params.end; break; case RNG_LogNormal: { auto var = params.end * params.end; auto mu = params.start; meanvar[0] = raft::exp(mu + var * T(0.5)); meanvar[1] = (raft::exp(var) - T(1.0)) * raft::exp(T(2.0) * mu + var); break; } case RNG_Uniform: meanvar[0] = (params.start + params.end) * T(0.5); meanvar[1] = params.end - params.start; meanvar[1] = meanvar[1] * meanvar[1] / T(12.0); break; case RNG_Gumbel: { auto gamma = T(0.577215664901532); meanvar[0] = params.start + params.end * gamma; meanvar[1] = T(3.1415) * T(3.1415) * params.end * params.end / T(6.0); break; } case RNG_Logistic: meanvar[0] = params.start; meanvar[1] = T(3.1415) * T(3.1415) * params.end * params.end / T(3.0); break; case RNG_Exp: meanvar[0] = T(1.0) / params.start; meanvar[1] = meanvar[0] * meanvar[0]; break; case RNG_Rayleigh: meanvar[0] = params.start * raft::sqrt(T(3.1415 / 2.0)); meanvar[1] = ((T(4.0) - T(3.1415)) / T(2.0)) * params.start * params.start; break; case RNG_Laplace: meanvar[0] = params.start; meanvar[1] = T(2.0) * params.end * params.end; break; }; } protected: raft::resources handle; cudaStream_t stream; RngInputs<T> params; rmm::device_uvector<T> data, stats; T h_stats[2]; // mean, var }; template <typename T> class RngMdspanTest : public ::testing::TestWithParam<RngInputs<T>> { public: RngMdspanTest() : params(::testing::TestWithParam<RngInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), data(0, stream), stats(2, stream) { data.resize(params.len, stream); RAFT_CUDA_TRY(cudaMemsetAsync(stats.data(), 0, 2 * sizeof(T), stream)); } protected: void SetUp() override { RngState r(params.seed, params.gtype); raft::device_vector_view<T> data_view(data.data(), data.size()); const auto len = data_view.extent(0); switch (params.type) { case RNG_Normal: normal(handle, r, data_view, params.start, params.end); break; case RNG_LogNormal: lognormal(handle, r, data_view, params.start, params.end); break; case RNG_Uniform: uniform(handle, r, data_view, params.start, params.end); break; case RNG_Gumbel: gumbel(handle, r, data_view, params.start, params.end); break; case RNG_Logistic: logistic(handle, r, data_view, params.start, params.end); break; case RNG_Exp: exponential(handle, r, data_view, params.start); break; case RNG_Rayleigh: rayleigh(handle, r, data_view, params.start); break; case RNG_Laplace: laplace(handle, r, data_view, params.start, params.end); break; }; static const int threads = 128; meanKernel<T, threads><<<raft::ceildiv(params.len, threads), threads, 0, stream>>>( stats.data(), data.data(), params.len); update_host<T>(h_stats, stats.data(), 2, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); h_stats[0] /= params.len; h_stats[1] = (h_stats[1] / params.len) - (h_stats[0] * h_stats[0]); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } void getExpectedMeanVar(T meanvar[2]) { switch (params.type) { case RNG_Normal: meanvar[0] = params.start; meanvar[1] = params.end * params.end; break; case RNG_LogNormal: { auto var = params.end * params.end; auto mu = params.start; meanvar[0] = raft::exp(mu + var * T(0.5)); meanvar[1] = (raft::exp(var) - T(1.0)) * raft::exp(T(2.0) * mu + var); break; } case RNG_Uniform: meanvar[0] = (params.start + params.end) * T(0.5); meanvar[1] = params.end - params.start; meanvar[1] = meanvar[1] * meanvar[1] / T(12.0); break; case RNG_Gumbel: { auto gamma = T(0.577215664901532); meanvar[0] = params.start + params.end * gamma; meanvar[1] = T(3.1415) * T(3.1415) * params.end * params.end / T(6.0); break; } case RNG_Logistic: meanvar[0] = params.start; meanvar[1] = T(3.1415) * T(3.1415) * params.end * params.end / T(3.0); break; case RNG_Exp: meanvar[0] = T(1.0) / params.start; meanvar[1] = meanvar[0] * meanvar[0]; break; case RNG_Rayleigh: meanvar[0] = params.start * raft::sqrt(T(3.1415 / 2.0)); meanvar[1] = ((T(4.0) - T(3.1415)) / T(2.0)) * params.start * params.start; break; case RNG_Laplace: meanvar[0] = params.start; meanvar[1] = T(2.0) * params.end * params.end; break; }; } protected: raft::resources handle; cudaStream_t stream; RngInputs<T> params; rmm::device_uvector<T> data, stats; T h_stats[2]; // mean, var }; const std::vector<RngInputs<float>> inputsf = { // Test with Philox {1024 * 1024, 3.0f, 1.3f, RNG_Normal, GenPhilox, 1234ULL}, {1024 * 1024, 1.2f, 0.1f, RNG_LogNormal, GenPhilox, 1234ULL}, {1024 * 1024, 1.2f, 5.5f, RNG_Uniform, GenPhilox, 1234ULL}, {1024 * 1024, 0.1f, 1.3f, RNG_Gumbel, GenPhilox, 1234ULL}, {1024 * 1024, 1.6f, 0.0f, RNG_Exp, GenPhilox, 1234ULL}, {1024 * 1024, 1.6f, 0.0f, RNG_Rayleigh, GenPhilox, 1234ULL}, {1024 * 1024, 2.6f, 1.3f, RNG_Laplace, GenPhilox, 1234ULL}, // Test with PCG {1024 * 1024, 3.0f, 1.3f, RNG_Normal, GenPC, 1234ULL}, {1024 * 1024, 1.2f, 0.1f, RNG_LogNormal, GenPC, 1234ULL}, {1024 * 1024, 1.2f, 5.5f, RNG_Uniform, GenPC, 1234ULL}, {1024 * 1024, 0.1f, 1.3f, RNG_Gumbel, GenPC, 1234ULL}, {1024 * 1024, 1.6f, 0.0f, RNG_Exp, GenPC, 1234ULL}, {1024 * 1024, 1.6f, 0.0f, RNG_Rayleigh, GenPC, 1234ULL}, {1024 * 1024, 2.6f, 1.3f, RNG_Laplace, GenPC, 1234ULL}}; #define _RAFT_RNG_TEST_BODY(VALUE_TYPE) \ do { \ VALUE_TYPE meanvar[2]; \ getExpectedMeanVar(meanvar); \ ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<VALUE_TYPE>(NUM_SIGMA * MAX_SIGMA))); \ ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<VALUE_TYPE>(NUM_SIGMA * MAX_SIGMA))); \ } while (false) using RngTestF = RngTest<float>; TEST_P(RngTestF, Result) { _RAFT_RNG_TEST_BODY(float); } INSTANTIATE_TEST_SUITE_P(RngTests, RngTestF, ::testing::ValuesIn(inputsf)); using RngMdspanTestF = RngMdspanTest<float>; TEST_P(RngMdspanTestF, Result) { _RAFT_RNG_TEST_BODY(float); } INSTANTIATE_TEST_SUITE_P(RngMdspanTests, RngMdspanTestF, ::testing::ValuesIn(inputsf)); const std::vector<RngInputs<double>> inputsd = { // Test with Philox {1024 * 1024, 3.0f, 1.3f, RNG_Normal, GenPhilox, 1234ULL}, {1024 * 1024, 1.2f, 0.1f, RNG_LogNormal, GenPhilox, 1234ULL}, {1024 * 1024, 1.2f, 5.5f, RNG_Uniform, GenPhilox, 1234ULL}, {1024 * 1024, 0.1f, 1.3f, RNG_Gumbel, GenPhilox, 1234ULL}, {1024 * 1024, 1.6f, 0.0f, RNG_Exp, GenPhilox, 1234ULL}, {1024 * 1024, 1.6f, 0.0f, RNG_Rayleigh, GenPhilox, 1234ULL}, {1024 * 1024, 2.6f, 1.3f, RNG_Laplace, GenPhilox, 1234ULL}, // Test with PCG {1024 * 1024, 3.0f, 1.3f, RNG_Normal, GenPC, 1234ULL}, {1024 * 1024, 1.2f, 0.1f, RNG_LogNormal, GenPC, 1234ULL}, {1024 * 1024, 1.2f, 5.5f, RNG_Uniform, GenPC, 1234ULL}, {1024 * 1024, 0.1f, 1.3f, RNG_Gumbel, GenPC, 1234ULL}, {1024 * 1024, 1.6f, 0.0f, RNG_Exp, GenPC, 1234ULL}, {1024 * 1024, 1.6f, 0.0f, RNG_Rayleigh, GenPC, 1234ULL}, {1024 * 1024, 2.6f, 1.3f, RNG_Laplace, GenPC, 1234ULL}}; using RngTestD = RngTest<double>; TEST_P(RngTestD, Result) { _RAFT_RNG_TEST_BODY(double); } INSTANTIATE_TEST_SUITE_P(RngTests, RngTestD, ::testing::ValuesIn(inputsd)); using RngMdspanTestD = RngMdspanTest<double>; TEST_P(RngMdspanTestD, Result) { _RAFT_RNG_TEST_BODY(double); } INSTANTIATE_TEST_SUITE_P(RngMdspanTests, RngMdspanTestD, ::testing::ValuesIn(inputsd)); // ---------------------------------------------------------------------- // // Test for expected variance in mean calculations template <typename T> T quick_mean(const std::vector<T>& d) { T acc = T(0); for (const auto& di : d) { acc += di; } return acc / d.size(); } template <typename T> T quick_std(const std::vector<T>& d) { T acc = T(0); T d_mean = quick_mean(d); for (const auto& di : d) { acc += ((di - d_mean) * (di - d_mean)); } return std::sqrt(acc / (d.size() - 1)); } template <typename T> std::ostream& operator<<(std::ostream& out, const std::vector<T>& v) { if (!v.empty()) { out << '['; std::copy(v.begin(), v.end(), std::ostream_iterator<T>(out, ", ")); out << "\b\b]"; } return out; } // The following tests the two random number generators by checking that the measured mean error is // close to the well-known analytical result(sigma/sqrt(n_samples)). To compute the mean error, we // a number of experiments computing the mean, giving us a distribution of the mean itself. The // mean error is simply the standard deviation of this distribution (the standard deviation of the // mean). TEST(Rng, MeanError) { timeb time_struct; ftime(&time_struct); int seed = time_struct.millitm; int num_samples = 1024; int num_experiments = 1024; int len = num_samples * num_experiments; raft::resources handle; auto stream = resource::get_cuda_stream(handle); rmm::device_uvector<float> data(len, stream); rmm::device_uvector<float> mean_result(num_experiments, stream); rmm::device_uvector<float> std_result(num_experiments, stream); for (auto rtype : {GenPhilox, GenPC}) { RngState r(seed, rtype); normal(handle, r, data.data(), len, 3.3f, 0.23f); // uniform(r, data, len, -1.0, 2.0); raft::stats::mean( mean_result.data(), data.data(), num_samples, num_experiments, false, false, stream); raft::stats::stddev(std_result.data(), data.data(), mean_result.data(), num_samples, num_experiments, false, false, stream); std::vector<float> h_mean_result(num_experiments); std::vector<float> h_std_result(num_experiments); update_host(h_mean_result.data(), mean_result.data(), num_experiments, stream); update_host(h_std_result.data(), std_result.data(), num_experiments, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); auto d_mean = quick_mean(h_mean_result); // std-dev of mean; also known as mean error auto d_std_of_mean = quick_std(h_mean_result); auto d_std = quick_mean(h_std_result); auto d_std_of_mean_analytical = d_std / std::sqrt(num_samples); // std::cout << "measured mean error: " << d_std_of_mean << "\n"; // std::cout << "expected mean error: " << d_std/std::sqrt(num_samples) << "\n"; auto diff_expected_vs_measured_mean_error = std::abs(d_std_of_mean - d_std / std::sqrt(num_samples)); ASSERT_TRUE((diff_expected_vs_measured_mean_error / d_std_of_mean_analytical < 0.5)) << "Failed with seed: " << seed << "\nrtype: " << rtype; } // std::cout << "mean_res:" << h_mean_result << "\n"; } template <typename T, int len, int scale> class ScaledBernoulliTest : public ::testing::Test { public: ScaledBernoulliTest() : stream(resource::get_cuda_stream(handle)), data(len, stream) {} protected: void SetUp() override { RAFT_CUDA_TRY(cudaStreamCreate(&stream)); RngState r(42); scaled_bernoulli(handle, r, data.data(), len, T(0.5), T(scale)); } void rangeCheck() { auto h_data = std::make_unique<T[]>(len); update_host(h_data.get(), data.data(), len, stream); ASSERT_TRUE(std::none_of( h_data.get(), h_data.get() + len, [](const T& a) { return a < -scale || a > scale; })); } raft::resources handle; cudaStream_t stream; rmm::device_uvector<T> data; }; template <typename T, int len, int scale> class ScaledBernoulliMdspanTest : public ::testing::Test { public: ScaledBernoulliMdspanTest() : stream(resource::get_cuda_stream(handle)), data(len, stream) {} protected: void SetUp() override { RAFT_CUDA_TRY(cudaStreamCreate(&stream)); RngState r(42); raft::device_vector_view<T, int> data_view(data.data(), data.size()); scaled_bernoulli(handle, r, data_view, T(0.5), T(scale)); } void rangeCheck() { auto h_data = std::make_unique<T[]>(len); update_host(h_data.get(), data.data(), len, stream); ASSERT_TRUE(std::none_of( h_data.get(), h_data.get() + len, [](const T& a) { return a < -scale || a > scale; })); } raft::resources handle; cudaStream_t stream; rmm::device_uvector<T> data; }; using ScaledBernoulliTest1 = ScaledBernoulliTest<float, 500, 35>; TEST_F(ScaledBernoulliTest1, RangeCheck) { rangeCheck(); } using ScaledBernoulliMdspanTest1 = ScaledBernoulliMdspanTest<float, 500, 35>; TEST_F(ScaledBernoulliMdspanTest1, RangeCheck) { rangeCheck(); } using ScaledBernoulliTest2 = ScaledBernoulliTest<double, 100, 220>; TEST_F(ScaledBernoulliTest2, RangeCheck) { rangeCheck(); } using ScaledBernoulliMdspanTest2 = ScaledBernoulliMdspanTest<double, 100, 220>; TEST_F(ScaledBernoulliMdspanTest2, RangeCheck) { rangeCheck(); } template <typename T, int len> class BernoulliTest : public ::testing::Test { public: BernoulliTest() : stream(resource::get_cuda_stream(handle)), data(len, stream) {} protected: void SetUp() override { RngState r(42); bernoulli(handle, r, data.data(), len, T(0.5)); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } void trueFalseCheck() { // both true and false values must be present bool* h_data = new bool[len]; update_host(h_data, data.data(), len, stream); ASSERT_TRUE(std::any_of(h_data, h_data + len, [](bool a) { return a; })); ASSERT_TRUE(std::any_of(h_data, h_data + len, [](bool a) { return !a; })); delete[] h_data; } raft::resources handle; cudaStream_t stream; rmm::device_uvector<bool> data; }; template <typename T, int len> class BernoulliMdspanTest : public ::testing::Test { public: BernoulliMdspanTest() : stream(resource::get_cuda_stream(handle)), data(len, stream) {} protected: void SetUp() override { RngState r(42); raft::device_vector_view<bool, int> data_view(data.data(), data.size()); bernoulli(handle, r, data_view, T(0.5)); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } void trueFalseCheck() { // both true and false values must be present auto h_data = std::make_unique<bool[]>(len); update_host(h_data.get(), data.data(), len, stream); ASSERT_TRUE(std::any_of(h_data.get(), h_data.get() + len, [](bool a) { return a; })); ASSERT_TRUE(std::any_of(h_data.get(), h_data.get() + len, [](bool a) { return !a; })); } raft::resources handle; cudaStream_t stream; rmm::device_uvector<bool> data; }; using BernoulliTest1 = BernoulliTest<float, 1000>; TEST_F(BernoulliTest1, TrueFalseCheck) { trueFalseCheck(); } using BernoulliMdspanTest1 = BernoulliMdspanTest<float, 1000>; TEST_F(BernoulliMdspanTest1, TrueFalseCheck) { trueFalseCheck(); } using BernoulliTest2 = BernoulliTest<double, 1000>; TEST_F(BernoulliTest2, TrueFalseCheck) { trueFalseCheck(); } using BernoulliMdspanTest2 = BernoulliMdspanTest<double, 1000>; TEST_F(BernoulliMdspanTest2, TrueFalseCheck) { trueFalseCheck(); } /** Rng::normalTable tests */ template <typename T> struct RngNormalTableInputs { T tolerance; int rows, cols; T mu, sigma; GeneratorType gtype; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const RngNormalTableInputs<T>& dims) { return os; } template <typename T> class RngNormalTableTest : public ::testing::TestWithParam<RngNormalTableInputs<T>> { public: RngNormalTableTest() : params(::testing::TestWithParam<RngNormalTableInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), data(params.rows * params.cols, stream), stats(2, stream), mu_vec(params.cols, stream) { RAFT_CUDA_TRY(cudaMemsetAsync(stats.data(), 0, 2 * sizeof(T), stream)); } protected: void SetUp() override { // Tests are configured with their expected test-values sigma. For example, // 4 x sigma indicates the test shouldn't fail 99.9% of the time. num_sigma = 10; int len = params.rows * params.cols; RngState r(params.seed, params.gtype); fill(handle, r, mu_vec.data(), params.cols, params.mu); T* sigma_vec = nullptr; normalTable( handle, r, data.data(), params.rows, params.cols, mu_vec.data(), sigma_vec, params.sigma); static const int threads = 128; meanKernel<T, threads> <<<raft::ceildiv(len, threads), threads, 0, stream>>>(stats.data(), data.data(), len); update_host<T>(h_stats, stats.data(), 2, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); h_stats[0] /= len; h_stats[1] = (h_stats[1] / len) - (h_stats[0] * h_stats[0]); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } void getExpectedMeanVar(T meanvar[2]) { meanvar[0] = params.mu; meanvar[1] = params.sigma * params.sigma; } protected: raft::resources handle; cudaStream_t stream; RngNormalTableInputs<T> params; rmm::device_uvector<T> data, stats, mu_vec; T h_stats[2]; // mean, var int num_sigma; }; template <typename T> class RngNormalTableMdspanTest : public ::testing::TestWithParam<RngNormalTableInputs<T>> { public: RngNormalTableMdspanTest() : params(::testing::TestWithParam<RngNormalTableInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), data(params.rows * params.cols, stream), stats(2, stream), mu_vec(params.cols, stream) { RAFT_CUDA_TRY(cudaMemsetAsync(stats.data(), 0, 2 * sizeof(T), stream)); } protected: void SetUp() override { // Tests are configured with their expected test-values sigma. For example, // 4 x sigma indicates the test shouldn't fail 99.9% of the time. num_sigma = 10; int len = params.rows * params.cols; RngState r(params.seed, params.gtype); raft::device_matrix_view<T, int, raft::row_major> data_view( data.data(), params.rows, params.cols); raft::device_vector_view<const T, int> mu_vec_view(mu_vec.data(), params.cols); raft::device_vector_view<T, int> mu_vec_nc_view(mu_vec.data(), params.cols); std::variant<raft::device_vector_view<const T, int>, T> sigma_var(params.sigma); fill(handle, r, params.mu, mu_vec_nc_view); normalTable(handle, r, mu_vec_view, sigma_var, data_view); static const int threads = 128; meanKernel<T, threads> <<<raft::ceildiv(len, threads), threads, 0, stream>>>(stats.data(), data.data(), len); update_host<T>(h_stats, stats.data(), 2, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); h_stats[0] /= len; h_stats[1] = (h_stats[1] / len) - (h_stats[0] * h_stats[0]); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } void getExpectedMeanVar(T meanvar[2]) { meanvar[0] = params.mu; meanvar[1] = params.sigma * params.sigma; } protected: raft::resources handle; cudaStream_t stream; RngNormalTableInputs<T> params; rmm::device_uvector<T> data, stats, mu_vec; T h_stats[2]; // mean, var int num_sigma; }; const std::vector<RngNormalTableInputs<float>> inputsf_t = { {0.0055, 32, 1024, 1.f, 1.f, GenPhilox, 1234ULL}, {0.011, 8, 1024, 1.f, 1.f, GenPhilox, 1234ULL}, {0.0055, 32, 1024, 1.f, 1.f, GenPC, 1234ULL}, {0.011, 8, 1024, 1.f, 1.f, GenPC, 1234ULL}}; using RngNormalTableTestF = RngNormalTableTest<float>; TEST_P(RngNormalTableTestF, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(num_sigma * params.tolerance))); ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(num_sigma * params.tolerance))); } INSTANTIATE_TEST_SUITE_P(RngNormalTableTests, RngNormalTableTestF, ::testing::ValuesIn(inputsf_t)); using RngNormalTableMdspanTestF = RngNormalTableMdspanTest<float>; TEST_P(RngNormalTableMdspanTestF, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(num_sigma * params.tolerance))); ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(num_sigma * params.tolerance))); } INSTANTIATE_TEST_SUITE_P(RngNormalTableMdspanTests, RngNormalTableMdspanTestF, ::testing::ValuesIn(inputsf_t)); const std::vector<RngNormalTableInputs<double>> inputsd_t = { {0.0055, 32, 1024, 1.0, 1.0, GenPhilox, 1234ULL}, {0.011, 8, 1024, 1.0, 1.0, GenPhilox, 1234ULL}, {0.0055, 32, 1024, 1.0, 1.0, GenPC, 1234ULL}, {0.011, 8, 1024, 1.0, 1.0, GenPC, 1234ULL}}; using RngNormalTableTestD = RngNormalTableTest<double>; TEST_P(RngNormalTableTestD, Result) { double meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<double>(num_sigma * params.tolerance))); ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<double>(num_sigma * params.tolerance))); } INSTANTIATE_TEST_SUITE_P(RngNormalTableTests, RngNormalTableTestD, ::testing::ValuesIn(inputsd_t)); using RngNormalTableMdspanTestD = RngNormalTableMdspanTest<double>; TEST_P(RngNormalTableMdspanTestD, Result) { double meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<double>(num_sigma * params.tolerance))); ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<double>(num_sigma * params.tolerance))); } INSTANTIATE_TEST_SUITE_P(RngNormalTableMdspanTests, RngNormalTableMdspanTestD, ::testing::ValuesIn(inputsd_t)); struct RngAffineInputs { int n; unsigned long long seed; }; class RngAffineTest : public ::testing::TestWithParam<RngAffineInputs> { protected: void SetUp() override { params = ::testing::TestWithParam<RngAffineInputs>::GetParam(); RngState r(params.seed); affine_transform_params(r, params.n, a, b); } void check() { ASSERT_TRUE(gcd(a, params.n) == 1); ASSERT_TRUE(0 <= b && b < params.n); } private: RngAffineInputs params; int a, b; }; // RngAffineTest const std::vector<RngAffineInputs> inputs_affine = { {100, 123456ULL}, {100, 1234567890ULL}, {101, 123456ULL}, {101, 1234567890ULL}, {7, 123456ULL}, {7, 1234567890ULL}, {2568, 123456ULL}, {2568, 1234567890ULL}, }; TEST_P(RngAffineTest, Result) { check(); } INSTANTIATE_TEST_SUITE_P(RngAffineTests, RngAffineTest, ::testing::ValuesIn(inputs_affine)); } // namespace random } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/random/rng_int.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <cub/cub.cuh> #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> namespace raft { namespace random { using namespace raft::random; enum RandomType { RNG_Uniform }; template <typename T, int TPB> RAFT_KERNEL meanKernel(float* out, const T* data, int len) { typedef cub::BlockReduce<float, TPB> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int tid = threadIdx.x + blockIdx.x * blockDim.x; float val = tid < len ? data[tid] : T(0); float x = BlockReduce(temp_storage).Sum(val); __syncthreads(); float xx = BlockReduce(temp_storage).Sum(val * val); __syncthreads(); if (threadIdx.x == 0) { raft::myAtomicAdd(out, x); raft::myAtomicAdd(out + 1, xx); } } template <typename T> struct RngInputs { float tolerance; int len; // start, end: for uniform // mean, sigma: for normal/lognormal // mean, beta: for gumbel // mean, scale: for logistic and laplace // lambda: for exponential // sigma: for rayleigh T start, end; RandomType type; GeneratorType gtype; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const RngInputs<T>& dims) { return os; } template <typename T> class RngTest : public ::testing::TestWithParam<RngInputs<T>> { public: RngTest() : params(::testing::TestWithParam<RngInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), data(0, stream), stats(2, stream) { data.resize(params.len, stream); RAFT_CUDA_TRY(cudaMemsetAsync(stats.data(), 0, 2 * sizeof(float), stream)); } protected: void SetUp() override { RngState r(params.seed, params.gtype); switch (params.type) { case RNG_Uniform: uniformInt(handle, r, data.data(), params.len, params.start, params.end); break; }; static const int threads = 128; meanKernel<T, threads><<<raft::ceildiv(params.len, threads), threads, 0, stream>>>( stats.data(), data.data(), params.len); update_host<float>(h_stats, stats.data(), 2, stream); resource::sync_stream(handle, stream); h_stats[0] /= params.len; h_stats[1] = (h_stats[1] / params.len) - (h_stats[0] * h_stats[0]); resource::sync_stream(handle, stream); } void getExpectedMeanVar(float meanvar[2]) { switch (params.type) { case RNG_Uniform: meanvar[0] = (params.start + params.end) * 0.5f; meanvar[1] = params.end - params.start; meanvar[1] = meanvar[1] * meanvar[1] / 12.f; break; }; } protected: raft::resources handle; cudaStream_t stream; RngInputs<T> params; rmm::device_uvector<T> data; rmm::device_uvector<float> stats; float h_stats[2]; // mean, var }; template <typename T> class RngMdspanTest : public ::testing::TestWithParam<RngInputs<T>> { public: RngMdspanTest() : params(::testing::TestWithParam<RngInputs<T>>::GetParam()), stream(resource::get_cuda_stream(handle)), data(0, stream), stats(2, stream) { data.resize(params.len, stream); RAFT_CUDA_TRY(cudaMemsetAsync(stats.data(), 0, 2 * sizeof(float), stream)); } protected: void SetUp() override { RngState r(params.seed, params.gtype); raft::device_vector_view<T> data_view(data.data(), data.size()); switch (params.type) { case RNG_Uniform: uniformInt(handle, r, data_view, params.start, params.end); break; }; static const int threads = 128; meanKernel<T, threads><<<raft::ceildiv(params.len, threads), threads, 0, stream>>>( stats.data(), data.data(), params.len); update_host<float>(h_stats, stats.data(), 2, stream); resource::sync_stream(handle, stream); h_stats[0] /= params.len; h_stats[1] = (h_stats[1] / params.len) - (h_stats[0] * h_stats[0]); resource::sync_stream(handle, stream); } void getExpectedMeanVar(float meanvar[2]) { switch (params.type) { case RNG_Uniform: meanvar[0] = (params.start + params.end) * 0.5f; meanvar[1] = params.end - params.start; meanvar[1] = meanvar[1] * meanvar[1] / 12.f; break; }; } protected: raft::resources handle; cudaStream_t stream; RngInputs<T> params; rmm::device_uvector<T> data; rmm::device_uvector<float> stats; float h_stats[2]; // mean, var }; const std::vector<RngInputs<uint32_t>> inputs_u32 = { {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL}}; using RngTestU32 = RngTest<uint32_t>; TEST_P(RngTestU32, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance))); ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(RngTests, RngTestU32, ::testing::ValuesIn(inputs_u32)); using RngMdspanTestU32 = RngMdspanTest<uint32_t>; TEST_P(RngMdspanTestU32, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance))); ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(RngMdspanTests, RngMdspanTestU32, ::testing::ValuesIn(inputs_u32)); const std::vector<RngInputs<uint64_t>> inputs_u64 = { {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL}}; using RngTestU64 = RngTest<uint64_t>; TEST_P(RngTestU64, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance))); ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(RngTests, RngTestU64, ::testing::ValuesIn(inputs_u64)); using RngMdspanTestU64 = RngMdspanTest<uint64_t>; TEST_P(RngMdspanTestU64, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance))); ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(RngMdspanTests, RngMdspanTestU64, ::testing::ValuesIn(inputs_u64)); const std::vector<RngInputs<int32_t>> inputs_s32 = { {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL}}; using RngTestS32 = RngTest<int32_t>; TEST_P(RngTestS32, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance))); ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(RngTests, RngTestS32, ::testing::ValuesIn(inputs_s32)); using RngMdspanTestS32 = RngMdspanTest<int32_t>; TEST_P(RngMdspanTestS32, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance))); ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(RngMdspanTests, RngMdspanTestS32, ::testing::ValuesIn(inputs_s32)); const std::vector<RngInputs<int64_t>> inputs_s64 = { {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPC, 1234ULL}}; using RngTestS64 = RngTest<int64_t>; TEST_P(RngTestS64, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance))); ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(RngTests, RngTestS64, ::testing::ValuesIn(inputs_s64)); using RngMdspanTestS64 = RngMdspanTest<int64_t>; TEST_P(RngMdspanTestS64, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE(match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance))); ASSERT_TRUE(match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(RngMdspanTests, RngMdspanTestS64, ::testing::ValuesIn(inputs_s64)); } // namespace random } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_canberra.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceCanberra : public DistanceTest<raft::distance::DistanceType::Canberra, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceCanberra<float> DistanceCanberraF; TEST_P(DistanceCanberraF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceCanberraF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001, 1024, 1024, 32, false, 1234ULL}, {0.001, 1024, 32, 1024, false, 1234ULL}, {0.001, 32, 1024, 1024, false, 1234ULL}, {0.003, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceCanberra<double> DistanceCanberraD; TEST_P(DistanceCanberraD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceCanberraD, ::testing::ValuesIn(inputsd)); class BigMatrixCanberra : public BigMatrixDistanceTest<raft::distance::DistanceType::Canberra> {}; TEST_F(BigMatrixCanberra, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_inner_product.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceInnerProduct : public DistanceTest<raft::distance::DistanceType::InnerProduct, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 10, 5, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceInnerProduct<float> DistanceInnerProductF; TEST_P(DistanceInnerProductF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceInnerProductF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceInnerProduct<double> DistanceInnerProductD; TEST_P(DistanceInnerProductD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceInnerProductD, ::testing::ValuesIn(inputsd)); class BigMatrixInnerProduct : public BigMatrixDistanceTest<raft::distance::DistanceType::InnerProduct> {}; TEST_F(BigMatrixInnerProduct, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_adj_distance_instance.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #undef RAFT_EXPLICIT_INSTANTIATE_ONLY #include "dist_adj_threshold.cuh" #include <cstdint> #include <raft/distance/distance-inl.cuh> #define instantiate_raft_distance_distance(DT, DataT, AccT, OutT, FinalLambda, IdxT) \ template void raft::distance::distance<DT, DataT, AccT, OutT, FinalLambda, IdxT>( \ raft::resources const& handle, \ const DataT* x, \ const DataT* y, \ OutT* dist, \ IdxT m, \ IdxT n, \ IdxT k, \ void* workspace, \ size_t worksize, \ FinalLambda fin_op, \ bool isRowMajor, \ DataT metric_arg) instantiate_raft_distance_distance(raft::distance::DistanceType::L2Expanded, float, float, uint8_t, raft::distance::threshold_float, int); instantiate_raft_distance_distance(raft::distance::DistanceType::L2Expanded, double, double, uint8_t, raft::distance::threshold_double, int); #undef instantiate_raft_distance_distance #define instantiate_raft_distance_getWorkspaceSize(DistT, DataT, AccT, OutT, IdxT) \ template size_t raft::distance::getWorkspaceSize<DistT, DataT, AccT, OutT, IdxT>( \ const DataT* x, const DataT* y, IdxT m, IdxT n, IdxT k) instantiate_raft_distance_getWorkspaceSize( raft::distance::DistanceType::L2Expanded, float, float, uint8_t, int); instantiate_raft_distance_getWorkspaceSize( raft::distance::DistanceType::L2Expanded, double, double, uint8_t, int); #undef instantiate_raft_distance_getWorkspaceSize
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_adj.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "dist_adj_threshold.cuh" #include <raft/distance/distance.cuh> #define instantiate_raft_distance_distance(DT, DataT, AccT, OutT, FinalLambda, IdxT) \ extern template void raft::distance::distance<DT, DataT, AccT, OutT, FinalLambda, IdxT>( \ raft::resources const& handle, \ const DataT* x, \ const DataT* y, \ OutT* dist, \ IdxT m, \ IdxT n, \ IdxT k, \ void* workspace, \ size_t worksize, \ FinalLambda fin_op, \ bool isRowMajor, \ DataT metric_arg) instantiate_raft_distance_distance(raft::distance::DistanceType::L2Expanded, float, float, uint8_t, raft::distance::threshold_float, int); instantiate_raft_distance_distance(raft::distance::DistanceType::L2Expanded, double, double, uint8_t, raft::distance::threshold_double, int); #undef instantiate_raft_distance_distance #define instantiate_raft_distance_getWorkspaceSize(DistT, DataT, AccT, OutT, IdxT) \ extern template size_t raft::distance::getWorkspaceSize<DistT, DataT, AccT, OutT, IdxT>( \ const DataT* x, const DataT* y, IdxT m, IdxT n, IdxT k) instantiate_raft_distance_getWorkspaceSize( raft::distance::DistanceType::L2Expanded, float, float, uint8_t, int); instantiate_raft_distance_getWorkspaceSize( raft::distance::DistanceType::L2Expanded, double, double, uint8_t, int); #undef instantiate_raft_distance_getWorkspaceSize #define instantiate_raft_distance_getWorkspaceSize(DistT, DataT, AccT, OutT, IdxT) \ extern template size_t raft::distance::getWorkspaceSize<DistT, DataT, AccT, OutT, IdxT>( \ const DataT* x, const DataT* y, IdxT m, IdxT n, IdxT k) instantiate_raft_distance_getWorkspaceSize( raft::distance::DistanceType::L2Expanded, float, float, uint8_t, int); instantiate_raft_distance_getWorkspaceSize( raft::distance::DistanceType::L2Expanded, double, double, uint8_t, int); #undef instantiate_raft_distance_getWorkspaceSize
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/gram.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "gram_base.cuh" #include <gtest/gtest.h> #include <iostream> #include <memory> #include <raft/distance/distance_types.hpp> #include <raft/distance/kernels.cuh> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> namespace raft::distance::kernels { struct GramMatrixInputs { int n1; // feature vectors in matrix 1 int n2; // featuer vectors in matrix 2 int n_cols; // number of elements in a feature vector bool is_row_major; KernelParams kernel; int ld1; int ld2; int ld_out; // We will generate random input using the dimensions given here. // The reference output is calculated by a custom kernel. }; std::ostream& operator<<(std::ostream& os, const GramMatrixInputs& p) { std::vector<std::string> kernel_names{"linear", "poly", "rbf", "tanh"}; os << "/" << p.n1 << "x" << p.n2 << "x" << p.n_cols << "/" << (p.is_row_major ? "RowMajor/" : "ColMajor/") << kernel_names[p.kernel.kernel] << "/ld_" << p.ld1 << "x" << p.ld2 << "x" << p.ld_out; return os; } const std::vector<GramMatrixInputs> inputs = { {42, 137, 2, false, {KernelType::LINEAR}}, {42, 137, 2, true, {KernelType::LINEAR}}, {42, 137, 2, false, {KernelType::LINEAR}, 64, 179, 181}, {42, 137, 2, true, {KernelType::LINEAR}, 64, 179, 181}, {137, 42, 2, false, {KernelType::POLYNOMIAL, 2, 0.5, 2.4}}, {137, 42, 2, true, {KernelType::POLYNOMIAL, 2, 0.5, 2.4}}, {137, 42, 2, false, {KernelType::POLYNOMIAL, 2, 0.5, 2.4}, 159, 73, 144}, {137, 42, 2, true, {KernelType::POLYNOMIAL, 2, 0.5, 2.4}, 159, 73, 144}, {42, 137, 2, false, {KernelType::TANH, 0, 0.5, 2.4}}, {42, 137, 2, true, {KernelType::TANH, 0, 0.5, 2.4}}, {42, 137, 2, false, {KernelType::TANH, 0, 0.5, 2.4}, 64, 155, 49}, {42, 137, 2, true, {KernelType::TANH, 0, 0.5, 2.4}, 64, 155, 143}, {3, 4, 2, false, {KernelType::RBF, 0, 0.5}}, {42, 137, 2, false, {KernelType::RBF, 0, 0.5}}, {42, 137, 2, true, {KernelType::RBF, 0, 0.5}}, // Distance kernel does not support LD parameter yet. //{42, 137, 2, false, {KernelType::RBF, 0, 0.5}, 64, 155, 49}, // {42, 137, 2, true, {KernelType::RBF, 0, 0.5}, 64, 155, 143}, }; template <typename math_t> class GramMatrixTest : public ::testing::TestWithParam<GramMatrixInputs> { protected: GramMatrixTest() : params(GetParam()), handle(), x1(0, resource::get_cuda_stream(handle)), x2(0, resource::get_cuda_stream(handle)), gram(0, resource::get_cuda_stream(handle)), gram_host(0) { auto stream = resource::get_cuda_stream(handle); if (params.ld1 == 0) { params.ld1 = params.is_row_major ? params.n_cols : params.n1; } if (params.ld2 == 0) { params.ld2 = params.is_row_major ? params.n_cols : params.n2; } if (params.ld_out == 0) { params.ld_out = params.is_row_major ? params.n2 : params.n1; } // Derive the size of the output from the offset of the last element. size_t size = get_offset(params.n1 - 1, params.n_cols - 1, params.ld1, params.is_row_major) + 1; x1.resize(size, stream); size = get_offset(params.n2 - 1, params.n_cols - 1, params.ld2, params.is_row_major) + 1; x2.resize(size, stream); size = get_offset(params.n1 - 1, params.n2 - 1, params.ld_out, params.is_row_major) + 1; gram.resize(size, stream); RAFT_CUDA_TRY(cudaMemsetAsync(gram.data(), 0, gram.size() * sizeof(math_t), stream)); gram_host.resize(gram.size()); std::fill(gram_host.begin(), gram_host.end(), 0); raft::random::RngState rng(42137ULL); raft::random::uniform(handle, rng, x1.data(), x1.size(), math_t(0), math_t(1)); raft::random::uniform(handle, rng, x2.data(), x2.size(), math_t(0), math_t(1)); } ~GramMatrixTest() override {} void runTest() { std::unique_ptr<GramMatrixBase<math_t>> kernel = std::unique_ptr<GramMatrixBase<math_t>>(KernelFactory<math_t>::create(params.kernel)); auto x1_span = params.is_row_major ? raft::make_device_strided_matrix_view<const math_t, int, raft::layout_c_contiguous>( x1.data(), params.n1, params.n_cols, params.ld1) : raft::make_device_strided_matrix_view<const math_t, int, raft::layout_f_contiguous>( x1.data(), params.n1, params.n_cols, params.ld1); auto x2_span = params.is_row_major ? raft::make_device_strided_matrix_view<const math_t, int, raft::layout_c_contiguous>( x2.data(), params.n2, params.n_cols, params.ld2) : raft::make_device_strided_matrix_view<const math_t, int, raft::layout_f_contiguous>( x2.data(), params.n2, params.n_cols, params.ld2); auto out_span = params.is_row_major ? raft::make_device_strided_matrix_view<math_t, int, raft::layout_c_contiguous>( gram.data(), params.n1, params.n2, params.ld_out) : raft::make_device_strided_matrix_view<math_t, int, raft::layout_f_contiguous>( gram.data(), params.n1, params.n2, params.ld_out); (*kernel)(handle, x1_span, x2_span, out_span); auto stream = resource::get_cuda_stream(handle); naiveGramMatrixKernel(params.n1, params.n2, params.n_cols, x1, x2, gram_host.data(), params.ld1, params.ld2, params.ld_out, params.is_row_major, params.kernel, stream, handle); ASSERT_TRUE(raft::devArrMatchHost( gram_host.data(), gram.data(), gram.size(), raft::CompareApprox<math_t>(1e-6f), stream)); } GramMatrixInputs params; raft::resources handle; rmm::device_uvector<math_t> x1; rmm::device_uvector<math_t> x2; rmm::device_uvector<math_t> gram; std::vector<math_t> gram_host; }; typedef GramMatrixTest<float> GramMatrixTestFloat; typedef GramMatrixTest<double> GramMatrixTestDouble; TEST_P(GramMatrixTestFloat, Gram) { runTest(); } INSTANTIATE_TEST_SUITE_P(GramMatrixTests, GramMatrixTestFloat, ::testing::ValuesIn(inputs)); }; // end namespace raft::distance::kernels
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_kl_divergence.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceKLDivergence : public DistanceTest<raft::distance::DistanceType::KLDivergence, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceKLDivergence<float> DistanceKLDivergenceF; TEST_P(DistanceKLDivergenceF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceKLDivergenceF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001, 1024, 1024, 32, false, 1234ULL}, {0.001, 1024, 32, 1024, false, 1234ULL}, {0.001, 32, 1024, 1024, false, 1234ULL}, {0.003, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceKLDivergence<double> DistanceKLDivergenceD; TEST_P(DistanceKLDivergenceD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceKLDivergenceD, ::testing::ValuesIn(inputsd)); class BigMatrixKLDivergence : public BigMatrixDistanceTest<raft::distance::DistanceType::KLDivergence> {}; TEST_F(BigMatrixKLDivergence, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_jensen_shannon.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceJensenShannon : public DistanceTest<raft::distance::DistanceType::JensenShannon, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceJensenShannon<float> DistanceJensenShannonF; TEST_P(DistanceJensenShannonF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceJensenShannonF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001, 1024, 1024, 32, false, 1234ULL}, {0.001, 1024, 32, 1024, false, 1234ULL}, {0.001, 32, 1024, 1024, false, 1234ULL}, {0.003, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceJensenShannon<double> DistanceJensenShannonD; TEST_P(DistanceJensenShannonD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceJensenShannonD, ::testing::ValuesIn(inputsd)); class BigMatrixJensenShannon : public BigMatrixDistanceTest<raft::distance::DistanceType::JensenShannon> {}; TEST_F(BigMatrixJensenShannon, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_adj.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/distance/distance.cuh> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include "dist_adj.cuh" namespace raft { namespace distance { template <typename DataType> RAFT_KERNEL naiveDistanceAdjKernel(uint8_t* dist, const DataType* x, const DataType* y, int m, int n, int k, DataType eps, bool isRowMajor) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) return; DataType acc = DataType(0); for (int i = 0; i < k; ++i) { int xidx = isRowMajor ? i + midx * k : i * m + midx; int yidx = isRowMajor ? i + nidx * k : i * n + nidx; auto diff = x[xidx] - y[yidx]; acc += diff * diff; } int outidx = isRowMajor ? midx * n + nidx : midx + m * nidx; dist[outidx] = acc <= eps; } template <typename DataType> void naiveDistanceAdj(uint8_t* dist, const DataType* x, const DataType* y, int m, int n, int k, DataType eps, bool isRowMajor, cudaStream_t stream) { static const dim3 TPB(16, 32, 1); dim3 nblks(raft::ceildiv(m, (int)TPB.x), raft::ceildiv(n, (int)TPB.y), 1); naiveDistanceAdjKernel<DataType><<<nblks, TPB, 0, stream>>>(dist, x, y, m, n, k, eps, isRowMajor); RAFT_CUDA_TRY(cudaPeekAtLastError()); } template <typename DataType> struct DistanceAdjInputs { DataType eps; int m, n, k; bool isRowMajor; unsigned long long int seed; }; template <typename DataType> ::std::ostream& operator<<(::std::ostream& os, const DistanceAdjInputs<DataType>& dims) { return os; } template <typename DataType> class DistanceAdjTest : public ::testing::TestWithParam<DistanceAdjInputs<DataType>> { public: DistanceAdjTest() : params(::testing::TestWithParam<DistanceAdjInputs<DataType>>::GetParam()), stream(resource::get_cuda_stream(handle)), dist(params.m * params.n, stream), dist_ref(params.m * params.n, stream) { } void SetUp() override { raft::random::RngState r(params.seed); int m = params.m; int n = params.n; int k = params.k; bool isRowMajor = params.isRowMajor; rmm::device_uvector<DataType> x(m * k, stream); rmm::device_uvector<DataType> y(n * k, stream); uniform(handle, r, x.data(), m * k, DataType(-1.0), DataType(1.0)); uniform(handle, r, y.data(), n * k, DataType(-1.0), DataType(1.0)); DataType threshold = params.eps; naiveDistanceAdj(dist_ref.data(), x.data(), y.data(), m, n, k, threshold, isRowMajor, stream); size_t worksize = raft::distance:: getWorkspaceSize<raft::distance::DistanceType::L2Expanded, DataType, DataType, uint8_t>( x.data(), y.data(), m, n, k); rmm::device_uvector<char> workspace(worksize, stream); using threshold_final_op_ = threshold_final_op<DataType, DataType, uint8_t, int>; threshold_final_op_ threshold_op(threshold); raft::distance::distance<raft::distance::DistanceType::L2Expanded, DataType, DataType, uint8_t, threshold_final_op_>(handle, x.data(), y.data(), dist.data(), m, n, k, workspace.data(), worksize, threshold_op, isRowMajor); resource::sync_stream(handle, stream); } void TearDown() override {} protected: DistanceAdjInputs<DataType> params; // We use uint8_t even if the output in this test is a bool because // cutlass doesn't support bool as output buffer yet. In cuda // sizeof(bool) is 1 byte hence it doesn't increase // memory consumption if we use uint8_t instead of bool. rmm::device_uvector<uint8_t> dist_ref; rmm::device_uvector<uint8_t> dist; raft::resources handle; cudaStream_t stream; }; const std::vector<DistanceAdjInputs<float>> inputsf = { {0.01f, 1024, 1024, 32, true, 1234ULL}, {0.1f, 1024, 1024, 32, true, 1234ULL}, {1.0f, 1024, 1024, 32, true, 1234ULL}, {10.0f, 1024, 1024, 32, true, 1234ULL}, {0.01f, 1024, 1024, 32, false, 1234ULL}, {0.1f, 1024, 1024, 32, false, 1234ULL}, {1.0f, 1024, 1024, 32, false, 1234ULL}, {10.0f, 1024, 1024, 32, false, 1234ULL}, }; typedef DistanceAdjTest<float> DistanceAdjTestF; TEST_P(DistanceAdjTestF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch(dist_ref.data(), dist.data(), m, n, raft::Compare<uint8_t>(), stream)); } INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceAdjInputs<double>> inputsd = { {0.01, 1024, 1024, 32, true, 1234ULL}, {0.1, 1024, 1024, 32, true, 1234ULL}, {1.0, 1024, 1024, 32, true, 1234ULL}, {10.0, 1024, 1024, 32, true, 1234ULL}, {0.01, 1024, 1024, 32, false, 1234ULL}, {0.1, 1024, 1024, 32, false, 1234ULL}, {1.0, 1024, 1024, 32, false, 1234ULL}, {10.0, 1024, 1024, 32, false, 1234ULL}, }; typedef DistanceAdjTest<double> DistanceAdjTestD; TEST_P(DistanceAdjTestD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch(dist_ref.data(), dist.data(), m, n, raft::Compare<uint8_t>(), stream)); } INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestD, ::testing::ValuesIn(inputsd)); } // namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/masked_nn.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.h" #include <gtest/gtest.h> #include <iostream> #include <raft/core/device_mdarray.hpp> #include <raft/core/device_mdspan.hpp> #include <raft/core/kvp.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/distance/detail/masked_nn.cuh> #include <raft/distance/masked_nn.cuh> #include <raft/linalg/norm.cuh> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <raft/util/itertools.hpp> namespace raft::distance::masked_nn { // The adjacency pattern determines what distances get computed. enum AdjacencyPattern { checkerboard = 0, // adjacency matrix looks like a checkerboard (half the distances are computed) checkerboard_4 = 1, // checkerboard with tiles of size 4x4 checkerboard_64 = 2, // checkerboard with tiles of size 64x64 all_true = 3, // no distance computations can be skipped all_false = 4 // all distance computations can be skipped }; // Kernels: // - init_adj: to initialize the adjacency kernel with a specific adjacency pattern // - referenceKernel: to produce the ground-truth output RAFT_KERNEL init_adj(AdjacencyPattern pattern, int n, raft::device_matrix_view<bool, int, raft::layout_c_contiguous> adj, raft::device_vector_view<int, int, raft::layout_c_contiguous> group_idxs) { int m = adj.extent(0); int num_groups = adj.extent(1); for (int idx_m = blockIdx.y * blockDim.y + threadIdx.y; idx_m < m; idx_m += blockDim.y * gridDim.y) { for (int idx_g = blockIdx.x * blockDim.x + threadIdx.x; idx_g < num_groups; idx_g += blockDim.x * gridDim.x) { switch (pattern) { case checkerboard: adj(idx_m, idx_g) = (idx_m + idx_g) % 2; break; case checkerboard_4: adj(idx_m, idx_g) = (idx_m / 4 + idx_g) % 2; break; case checkerboard_64: adj(idx_m, idx_g) = (idx_m / 64 + idx_g) % 2; break; case all_true: adj(idx_m, idx_g) = true; break; case all_false: adj(idx_m, idx_g) = false; break; default: assert(false && "unknown pattern"); } } } // Each group is of size n / num_groups. // // - group_idxs[j] indicates the start of group j + 1 (i.e. is the inclusive // scan of the group lengths) // // - The first group always starts at index zero, so we do not store it. // // - The group_idxs[num_groups - 1] should always equal n. if (blockIdx.y == 0 && threadIdx.y == 0) { const int g_stride = blockDim.x * gridDim.x; for (int idx_g = blockIdx.x * blockDim.x + threadIdx.x; idx_g < num_groups; idx_g += g_stride) { group_idxs(idx_g) = (idx_g + 1) * (n / num_groups); } group_idxs(num_groups - 1) = n; } } template <typename DataT, typename ReduceOpT, int NWARPS> __launch_bounds__(32 * NWARPS, 2) RAFT_KERNEL referenceKernel(raft::KeyValuePair<int, DataT>* min, DataT* x, DataT* y, bool* adj, int* group_idxs, int m, int n, int k, int num_groups, bool sqrt, int* workspace, DataT maxVal) { const int m_stride = blockDim.y * gridDim.y; const int m_offset = threadIdx.y + blockIdx.y * blockDim.y; const int n_stride = blockDim.x * gridDim.x; const int n_offset = threadIdx.x + blockIdx.x * blockDim.x; for (int m_grid = 0; m_grid < m; m_grid += m_stride) { for (int n_grid = 0; n_grid < n; n_grid += n_stride) { int midx = m_grid + m_offset; int nidx = n_grid + n_offset; // Do a reverse linear search to determine the group index. int group_idx = 0; for (int i = num_groups; 0 <= i; --i) { if (nidx < group_idxs[i]) { group_idx = i; } } const bool include_dist = adj[midx * num_groups + group_idx] && midx < m && nidx < n; // Compute L2 metric. DataT acc = DataT(0); for (int i = 0; i < k; ++i) { int xidx = i + midx * k; int yidx = i + nidx * k; auto diff = x[xidx] - y[yidx]; acc += diff * diff; } if (sqrt) { acc = raft::sqrt(acc); } ReduceOpT redOp; typedef cub::WarpReduce<raft::KeyValuePair<int, DataT>> WarpReduce; __shared__ typename WarpReduce::TempStorage temp[NWARPS]; int warpId = threadIdx.x / raft::WarpSize; raft::KeyValuePair<int, DataT> tmp; tmp.key = include_dist ? nidx : -1; tmp.value = include_dist ? acc : maxVal; tmp = WarpReduce(temp[warpId]).Reduce(tmp, raft::distance::KVPMinReduce<int, DataT>{}); if (threadIdx.x % raft::WarpSize == 0 && midx < m) { while (atomicCAS(workspace + midx, 0, 1) == 1) ; __threadfence(); redOp(midx, min + midx, tmp); __threadfence(); atomicCAS(workspace + midx, 1, 0); } __syncthreads(); } } } // Structs // - Params: holds parameters for test case // - Inputs: holds the inputs to the functions under test (x, y, adj, group_idxs). Is generated from // the inputs. struct Params { double tolerance; int m, n, k, num_groups; bool sqrt; unsigned long long int seed; AdjacencyPattern pattern; }; inline auto operator<<(std::ostream& os, const Params& p) -> std::ostream& { os << "m: " << p.m << ", n: " << p.n << ", k: " << p.k << ", num_groups: " << p.num_groups << ", sqrt: " << p.sqrt << ", seed: " << p.seed << ", tol: " << p.tolerance; return os; } template <typename DataT> struct Inputs { using IdxT = int; raft::device_matrix<DataT, IdxT> x, y; raft::device_matrix<bool, IdxT> adj; raft::device_vector<IdxT, IdxT> group_idxs; Inputs(const raft::handle_t& handle, const Params& p) : x{raft::make_device_matrix<DataT, IdxT>(handle, p.m, p.k)}, y{raft::make_device_matrix<DataT, IdxT>(handle, p.n, p.k)}, adj{raft::make_device_matrix<bool, IdxT>(handle, p.m, p.num_groups)}, group_idxs{raft::make_device_vector<IdxT, IdxT>(handle, p.num_groups)} { // Initialize x, y raft::random::RngState r(p.seed); uniform(handle, r, x.data_handle(), p.m * p.k, DataT(-1.0), DataT(1.0)); uniform(handle, r, y.data_handle(), p.n * p.k, DataT(-1.0), DataT(1.0)); // Initialize adj, group_idxs. dim3 block(32, 32); dim3 grid(10, 10); init_adj<<<grid, block, 0, resource::get_cuda_stream(handle)>>>( p.pattern, p.n, adj.view(), group_idxs.view()); RAFT_CUDA_TRY(cudaGetLastError()); } }; template <typename DataT, typename OutT = raft::KeyValuePair<int, DataT>> auto reference(const raft::handle_t& handle, Inputs<DataT> inp, const Params& p) -> raft::device_vector<OutT, int> { int m = inp.x.extent(0); int n = inp.y.extent(0); int k = inp.x.extent(1); int num_groups = inp.group_idxs.extent(0); if (m == 0 || n == 0 || k == 0 || num_groups == 0) { return raft::make_device_vector<OutT, int>(handle, 0); } // Initialize workspace auto stream = resource::get_cuda_stream(handle); rmm::device_uvector<char> workspace(p.m * sizeof(int), stream); RAFT_CUDA_TRY(cudaMemsetAsync(workspace.data(), 0, sizeof(int) * m, stream)); // Initialize output auto out = raft::make_device_vector<OutT, int>(handle, m); auto blks = raft::ceildiv(m, 256); MinAndDistanceReduceOp<int, DataT> op; raft::distance::detail::initKernel<DataT, raft::KeyValuePair<int, DataT>, int> <<<blks, 256, 0, stream>>>(out.data_handle(), m, std::numeric_limits<DataT>::max(), op); RAFT_CUDA_TRY(cudaGetLastError()); // Launch reference kernel const int nwarps = 16; static const dim3 TPB(32, nwarps, 1); dim3 nblks(1, 200, 1); referenceKernel<DataT, decltype(op), nwarps> <<<nblks, TPB, 0, stream>>>(out.data_handle(), inp.x.data_handle(), inp.y.data_handle(), inp.adj.data_handle(), inp.group_idxs.data_handle(), m, n, k, num_groups, p.sqrt, (int*)workspace.data(), std::numeric_limits<DataT>::max()); RAFT_CUDA_TRY(cudaGetLastError()); return out; } template <typename DataT, typename OutT = raft::KeyValuePair<int, DataT>> auto run_masked_nn(const raft::handle_t& handle, Inputs<DataT> inp, const Params& p) -> raft::device_vector<OutT, int> { // Compute norms: auto x_norm = raft::make_device_vector<DataT, int>(handle, p.m); auto y_norm = raft::make_device_vector<DataT, int>(handle, p.n); raft::linalg::norm(handle, std::as_const(inp.x).view(), x_norm.view(), raft::linalg::L2Norm, raft::linalg::Apply::ALONG_ROWS); raft::linalg::norm(handle, std::as_const(inp.y).view(), y_norm.view(), raft::linalg::L2Norm, raft::linalg::Apply::ALONG_ROWS); // Create parameters for masked_l2_nn using IdxT = int; using RedOpT = MinAndDistanceReduceOp<int, DataT>; using PairRedOpT = raft::distance::KVPMinReduce<int, DataT>; using ParamT = raft::distance::masked_l2_nn_params<RedOpT, PairRedOpT>; bool init_out = true; ParamT masked_l2_params{RedOpT{}, PairRedOpT{}, p.sqrt, init_out}; // Create output auto out = raft::make_device_vector<OutT, IdxT, raft::layout_c_contiguous>(handle, p.m); // Launch kernel raft::distance::masked_l2_nn<DataT, OutT, IdxT>(handle, masked_l2_params, inp.x.view(), inp.y.view(), x_norm.view(), y_norm.view(), inp.adj.view(), inp.group_idxs.view(), out.view()); resource::sync_stream(handle); return out; } template <typename T> struct CompareApproxAbsKVP { typedef typename raft::KeyValuePair<int, T> KVP; CompareApproxAbsKVP(T eps_) : eps(eps_) {} bool operator()(const KVP& a, const KVP& b) const { T diff = raft::abs(raft::abs(a.value) - raft::abs(b.value)); T m = std::max(raft::abs(a.value), raft::abs(b.value)); T ratio = m >= eps ? diff / m : diff; return (ratio <= eps); } private: T eps; }; template <typename K, typename V, typename L> ::testing::AssertionResult devArrMatch(const raft::KeyValuePair<K, V>* expected, const raft::KeyValuePair<K, V>* actual, size_t size, L eq_compare, cudaStream_t stream = 0) { typedef typename raft::KeyValuePair<K, V> KVP; std::shared_ptr<KVP> exp_h(new KVP[size]); std::shared_ptr<KVP> act_h(new KVP[size]); raft::update_host<KVP>(exp_h.get(), expected, size, stream); raft::update_host<KVP>(act_h.get(), actual, size, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); for (size_t i(0); i < size; ++i) { auto exp = exp_h.get()[i]; auto act = act_h.get()[i]; if (!eq_compare(exp, act)) { return ::testing::AssertionFailure() << "actual=" << act.key << "," << act.value << " != expected=" << exp.key << "," << exp.value << " @" << i; } } return ::testing::AssertionSuccess(); } inline auto gen_params() -> std::vector<Params> { // Regular powers of two auto regular = raft::util::itertools::product<Params>({0.001f}, // tolerance {32, 64, 512}, // m {32, 64, 512}, // n {8, 32}, // k {2, 32}, // num_groups {true, false}, // sqrt {1234ULL}, // seed {AdjacencyPattern::all_true, AdjacencyPattern::checkerboard, AdjacencyPattern::checkerboard_64, AdjacencyPattern::all_false}); // Irregular sizes to check tiling and bounds checking auto irregular = raft::util::itertools::product<Params>({0.001f}, // tolerance {511, 512, 513}, // m {127, 128, 129}, // n {5}, // k {3, 9}, // num_groups {true, false}, // sqrt {1234ULL}, // seed {AdjacencyPattern::all_true, AdjacencyPattern::checkerboard, AdjacencyPattern::checkerboard_64}); regular.insert(regular.end(), irregular.begin(), irregular.end()); return regular; } class MaskedL2NNTest : public ::testing::TestWithParam<Params> { // Empty. }; // TEST_P(MaskedL2NNTest, ReferenceCheckFloat) { using DataT = float; // Get parameters; create handle and input data. Params p = GetParam(); raft::handle_t handle{}; Inputs<DataT> inputs{handle, p}; // Calculate reference and test output auto out_reference = reference(handle, inputs, p); auto out_fast = run_masked_nn(handle, inputs, p); // Check for differences. ASSERT_TRUE(devArrMatch(out_reference.data_handle(), out_fast.data_handle(), p.m, CompareApproxAbsKVP<DataT>(p.tolerance), resource::get_cuda_stream(handle))); } // This test checks whether running the masked_l2_nn twice returns the same // output. TEST_P(MaskedL2NNTest, DeterminismCheck) { using DataT = float; // Get parameters; create handle and input data. Params p = GetParam(); raft::handle_t handle{}; Inputs<DataT> inputs{handle, p}; // Calculate reference and test output auto out1 = run_masked_nn(handle, inputs, p); auto out2 = run_masked_nn(handle, inputs, p); // Check for differences. ASSERT_TRUE(devArrMatch(out1.data_handle(), out2.data_handle(), p.m, CompareApproxAbsKVP<DataT>(p.tolerance), resource::get_cuda_stream(handle))); } TEST_P(MaskedL2NNTest, ReferenceCheckDouble) { using DataT = double; // Get parameters; create handle and input data. Params p = GetParam(); raft::handle_t handle{}; Inputs<DataT> inputs{handle, p}; // Calculate reference and test output auto out_reference = reference(handle, inputs, p); auto out_fast = run_masked_nn(handle, inputs, p); // Check for differences. ASSERT_TRUE(devArrMatch(out_reference.data_handle(), out_fast.data_handle(), p.m, CompareApproxAbsKVP<DataT>(p.tolerance), resource::get_cuda_stream(handle))); } INSTANTIATE_TEST_CASE_P(MaskedL2NNTests, MaskedL2NNTest, ::testing::ValuesIn(gen_params())); } // end namespace raft::distance::masked_nn
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_l1.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceUnexpL1 : public DistanceTest<raft::distance::DistanceType::L1, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceUnexpL1<float> DistanceUnexpL1F; TEST_P(DistanceUnexpL1F, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceUnexpL1F, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001, 1024, 1024, 32, false, 1234ULL}, {0.001, 1024, 32, 1024, false, 1234ULL}, {0.001, 32, 1024, 1024, false, 1234ULL}, {0.003, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceUnexpL1<double> DistanceUnexpL1D; TEST_P(DistanceUnexpL1D, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceUnexpL1D, ::testing::ValuesIn(inputsd)); class BigMatrixUnexpL1 : public BigMatrixDistanceTest<raft::distance::DistanceType::L1> {}; TEST_F(BigMatrixUnexpL1, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_russell_rao.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceRussellRao : public DistanceTest<raft::distance::DistanceType::RusselRaoExpanded, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceRussellRao<float> DistanceRussellRaoF; TEST_P(DistanceRussellRaoF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceRussellRaoF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001, 1024, 1024, 32, false, 1234ULL}, {0.001, 1024, 32, 1024, false, 1234ULL}, {0.001, 32, 1024, 1024, false, 1234ULL}, {0.003, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceRussellRao<double> DistanceRussellRaoD; TEST_P(DistanceRussellRaoD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceRussellRaoD, ::testing::ValuesIn(inputsd)); class BigMatrixRussellRao : public BigMatrixDistanceTest<raft::distance::DistanceType::RusselRaoExpanded> {}; TEST_F(BigMatrixRussellRao, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_l2_unexp.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceEucUnexpTest : public DistanceTest<raft::distance::DistanceType::L2Unexpanded, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceEucUnexpTest<float> DistanceEucUnexpTestF; TEST_P(DistanceEucUnexpTestF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceEucUnexpTestF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001, 1024, 1024, 32, false, 1234ULL}, {0.001, 1024, 32, 1024, false, 1234ULL}, {0.001, 32, 1024, 1024, false, 1234ULL}, {0.003, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceEucUnexpTest<double> DistanceEucUnexpTestD; TEST_P(DistanceEucUnexpTestD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceEucUnexpTestD, ::testing::ValuesIn(inputsd)); class BigMatrixEucUnexp : public BigMatrixDistanceTest<raft::distance::DistanceType::L2Unexpanded> { }; TEST_F(BigMatrixEucUnexp, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/distance_base.cuh
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/common/nvtx.hpp> // common::nvtx::range #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/device_mdspan.hpp> // make_device_matrix_view #include <raft/core/operators.hpp> // raft::sqrt #include <raft/core/resources.hpp> // raft::resources #include <raft/distance/distance.cuh> #include <raft/distance/distance_types.hpp> // raft::distance::DistanceType #include <raft/random/rng.cuh> #include <rmm/device_uvector.hpp> // rmm::device_uvector namespace raft { namespace distance { template <typename DataType> RAFT_KERNEL naiveDistanceKernel(DataType* dist, const DataType* x, const DataType* y, int m, int n, int k, raft::distance::DistanceType type, bool isRowMajor) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) return; DataType acc = DataType(0); for (int i = 0; i < k; ++i) { int xidx = isRowMajor ? i + midx * k : i * m + midx; int yidx = isRowMajor ? i + nidx * k : i * n + nidx; auto diff = x[xidx] - y[yidx]; acc += diff * diff; } if (type == raft::distance::DistanceType::L2SqrtExpanded || type == raft::distance::DistanceType::L2SqrtUnexpanded) acc = raft::sqrt(acc); int outidx = isRowMajor ? midx * n + nidx : midx + m * nidx; dist[outidx] = acc; } template <typename DataType> RAFT_KERNEL naiveL1_Linf_CanberraDistanceKernel(DataType* dist, const DataType* x, const DataType* y, int m, int n, int k, raft::distance::DistanceType type, bool isRowMajor) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) { return; } DataType acc = DataType(0); for (int i = 0; i < k; ++i) { int xidx = isRowMajor ? i + midx * k : i * m + midx; int yidx = isRowMajor ? i + nidx * k : i * n + nidx; auto a = x[xidx]; auto b = y[yidx]; auto diff = (a > b) ? (a - b) : (b - a); if (type == raft::distance::DistanceType::Linf) { acc = raft::max(acc, diff); } else if (type == raft::distance::DistanceType::Canberra) { const auto add = raft::abs(a) + raft::abs(b); // deal with potential for 0 in denominator by // forcing 1/0 instead acc += ((add != 0) * diff / (add + (add == 0))); } else { acc += diff; } } int outidx = isRowMajor ? midx * n + nidx : midx + m * nidx; dist[outidx] = acc; } template <typename DataType> RAFT_KERNEL naiveCosineDistanceKernel( DataType* dist, const DataType* x, const DataType* y, int m, int n, int k, bool isRowMajor) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) { return; } DataType acc_a = DataType(0); DataType acc_b = DataType(0); DataType acc_ab = DataType(0); for (int i = 0; i < k; ++i) { int xidx = isRowMajor ? i + midx * k : i * m + midx; int yidx = isRowMajor ? i + nidx * k : i * n + nidx; auto a = x[xidx]; auto b = y[yidx]; acc_a += a * a; acc_b += b * b; acc_ab += a * b; } int outidx = isRowMajor ? midx * n + nidx : midx + m * nidx; // Use 1.0 - (cosine similarity) to calc the distance dist[outidx] = (DataType)1.0 - acc_ab / (raft::sqrt(acc_a) * raft::sqrt(acc_b)); } template <typename DataType> RAFT_KERNEL naiveInnerProductKernel( DataType* dist, const DataType* x, const DataType* y, int m, int n, int k, bool isRowMajor) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) { return; } DataType acc_ab = DataType(0); for (int i = 0; i < k; ++i) { int xidx = isRowMajor ? i + midx * k : i * m + midx; int yidx = isRowMajor ? i + nidx * k : i * n + nidx; auto a = x[xidx]; auto b = y[yidx]; acc_ab += a * b; } int outidx = isRowMajor ? midx * n + nidx : midx + m * nidx; dist[outidx] = acc_ab; } template <typename DataType> RAFT_KERNEL naiveHellingerDistanceKernel( DataType* dist, const DataType* x, const DataType* y, int m, int n, int k, bool isRowMajor) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) { return; } DataType acc_ab = DataType(0); for (int i = 0; i < k; ++i) { int xidx = isRowMajor ? i + midx * k : i * m + midx; int yidx = isRowMajor ? i + nidx * k : i * n + nidx; auto a = x[xidx]; auto b = y[yidx]; acc_ab += raft::sqrt(a) * raft::sqrt(b); } int outidx = isRowMajor ? midx * n + nidx : midx + m * nidx; // Adjust to replace NaN in sqrt with 0 if input to sqrt is negative acc_ab = 1 - acc_ab; auto rectifier = (!signbit(acc_ab)); dist[outidx] = raft::sqrt(rectifier * acc_ab); } template <typename DataType> RAFT_KERNEL naiveLpUnexpDistanceKernel(DataType* dist, const DataType* x, const DataType* y, int m, int n, int k, bool isRowMajor, DataType p) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) return; DataType acc = DataType(0); for (int i = 0; i < k; ++i) { int xidx = isRowMajor ? i + midx * k : i * m + midx; int yidx = isRowMajor ? i + nidx * k : i * n + nidx; auto a = x[xidx]; auto b = y[yidx]; auto diff = raft::abs(a - b); acc += raft::pow(diff, p); } auto one_over_p = 1 / p; acc = raft::pow(acc, one_over_p); int outidx = isRowMajor ? midx * n + nidx : midx + m * nidx; dist[outidx] = acc; } template <typename DataType> RAFT_KERNEL naiveHammingDistanceKernel( DataType* dist, const DataType* x, const DataType* y, int m, int n, int k, bool isRowMajor) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) return; DataType acc = DataType(0); for (int i = 0; i < k; ++i) { int xidx = isRowMajor ? i + midx * k : i * m + midx; int yidx = isRowMajor ? i + nidx * k : i * n + nidx; auto a = x[xidx]; auto b = y[yidx]; acc += (a != b); } acc = acc / k; int outidx = isRowMajor ? midx * n + nidx : midx + m * nidx; dist[outidx] = acc; } template <typename DataType> RAFT_KERNEL naiveJensenShannonDistanceKernel( DataType* dist, const DataType* x, const DataType* y, int m, int n, int k, bool isRowMajor) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) return; DataType acc = DataType(0); for (int i = 0; i < k; ++i) { int xidx = isRowMajor ? i + midx * k : i * m + midx; int yidx = isRowMajor ? i + nidx * k : i * n + nidx; auto a = x[xidx]; auto b = y[yidx]; DataType m = 0.5f * (a + b); bool a_zero = a == 0; bool b_zero = b == 0; DataType p = (!a_zero * m) / (a_zero + a); DataType q = (!b_zero * m) / (b_zero + b); bool p_zero = p == 0; bool q_zero = q == 0; acc += (-a * (!p_zero * log(p + p_zero))) + (-b * (!q_zero * log(q + q_zero))); } acc = raft::sqrt(0.5f * acc); int outidx = isRowMajor ? midx * n + nidx : midx + m * nidx; dist[outidx] = acc; } template <typename DataType, typename OutType> RAFT_KERNEL naiveRussellRaoDistanceKernel( OutType* dist, const DataType* x, const DataType* y, int m, int n, int k, bool isRowMajor) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) return; OutType acc = OutType(0); for (int i = 0; i < k; ++i) { int xidx = isRowMajor ? i + midx * k : i * m + midx; int yidx = isRowMajor ? i + nidx * k : i * n + nidx; auto a = x[xidx]; auto b = y[yidx]; acc += (a * b); } acc = (k - acc) / k; int outidx = isRowMajor ? midx * n + nidx : midx + m * nidx; dist[outidx] = acc; } template <typename DataType, typename OutType> RAFT_KERNEL naiveKLDivergenceDistanceKernel( OutType* dist, const DataType* x, const DataType* y, int m, int n, int k, bool isRowMajor) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) return; OutType acc = OutType(0); for (int i = 0; i < k; ++i) { int xidx = isRowMajor ? i + midx * k : i * m + midx; int yidx = isRowMajor ? i + nidx * k : i * n + nidx; auto a = x[xidx]; auto b = y[yidx]; bool b_zero = (b == 0); bool a_zero = (a == 0); acc += a * (log(a + a_zero) - log(b + b_zero)); } acc = 0.5f * acc; int outidx = isRowMajor ? midx * n + nidx : midx + m * nidx; dist[outidx] = acc; } template <typename DataType, typename OutType> RAFT_KERNEL naiveCorrelationDistanceKernel( OutType* dist, const DataType* x, const DataType* y, int m, int n, int k, bool isRowMajor) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) return; OutType acc = OutType(0); auto a_norm = DataType(0); auto b_norm = DataType(0); auto a_sq_norm = DataType(0); auto b_sq_norm = DataType(0); for (int i = 0; i < k; ++i) { int xidx = isRowMajor ? i + midx * k : i * m + midx; int yidx = isRowMajor ? i + nidx * k : i * n + nidx; auto a = x[xidx]; auto b = y[yidx]; a_norm += a; b_norm += b; a_sq_norm += (a * a); b_sq_norm += (b * b); acc += (a * b); } auto numer = k * acc - (a_norm * b_norm); auto Q_denom = k * a_sq_norm - (a_norm * a_norm); auto R_denom = k * b_sq_norm - (b_norm * b_norm); acc = 1 - (numer / raft::sqrt(Q_denom * R_denom)); int outidx = isRowMajor ? midx * n + nidx : midx + m * nidx; dist[outidx] = acc; } template <typename DataType> void naiveDistance(DataType* dist, const DataType* x, const DataType* y, int m, int n, int k, raft::distance::DistanceType type, bool isRowMajor, DataType metric_arg = 2.0f, cudaStream_t stream = 0) { static const dim3 TPB(16, 32, 1); dim3 nblks(raft::ceildiv(m, (int)TPB.x), raft::ceildiv(n, (int)TPB.y), 1); switch (type) { case raft::distance::DistanceType::Canberra: case raft::distance::DistanceType::Linf: case raft::distance::DistanceType::L1: naiveL1_Linf_CanberraDistanceKernel<DataType> <<<nblks, TPB, 0, stream>>>(dist, x, y, m, n, k, type, isRowMajor); break; case raft::distance::DistanceType::L2SqrtUnexpanded: case raft::distance::DistanceType::L2Unexpanded: case raft::distance::DistanceType::L2SqrtExpanded: case raft::distance::DistanceType::L2Expanded: naiveDistanceKernel<DataType> <<<nblks, TPB, 0, stream>>>(dist, x, y, m, n, k, type, isRowMajor); break; case raft::distance::DistanceType::CosineExpanded: naiveCosineDistanceKernel<DataType> <<<nblks, TPB, 0, stream>>>(dist, x, y, m, n, k, isRowMajor); break; case raft::distance::DistanceType::HellingerExpanded: naiveHellingerDistanceKernel<DataType> <<<nblks, TPB, 0, stream>>>(dist, x, y, m, n, k, isRowMajor); break; case raft::distance::DistanceType::LpUnexpanded: naiveLpUnexpDistanceKernel<DataType> <<<nblks, TPB, 0, stream>>>(dist, x, y, m, n, k, isRowMajor, metric_arg); break; case raft::distance::DistanceType::HammingUnexpanded: naiveHammingDistanceKernel<DataType> <<<nblks, TPB, 0, stream>>>(dist, x, y, m, n, k, isRowMajor); break; case raft::distance::DistanceType::InnerProduct: naiveInnerProductKernel<DataType><<<nblks, TPB, 0, stream>>>(dist, x, y, m, n, k, isRowMajor); break; case raft::distance::DistanceType::JensenShannon: naiveJensenShannonDistanceKernel<DataType> <<<nblks, TPB, 0, stream>>>(dist, x, y, m, n, k, isRowMajor); break; case raft::distance::DistanceType::RusselRaoExpanded: naiveRussellRaoDistanceKernel<DataType> <<<nblks, TPB, 0, stream>>>(dist, x, y, m, n, k, isRowMajor); break; case raft::distance::DistanceType::KLDivergence: naiveKLDivergenceDistanceKernel<DataType> <<<nblks, TPB, 0, stream>>>(dist, x, y, m, n, k, isRowMajor); break; case raft::distance::DistanceType::CorrelationExpanded: naiveCorrelationDistanceKernel<DataType> <<<nblks, TPB, 0, stream>>>(dist, x, y, m, n, k, isRowMajor); break; default: FAIL() << "should be here\n"; } RAFT_CUDA_TRY(cudaPeekAtLastError()); } template <typename DataType> struct DistanceInputs { DataType tolerance; int m, n, k; bool isRowMajor; unsigned long long int seed; DataType metric_arg = 2.0f; }; template <typename DataType> ::std::ostream& operator<<(::std::ostream& os, const DistanceInputs<DataType>& dims) { return os; } // TODO: Remove when mdspan-based raft::runtime::distance::pairwise_distance is // implemented. // // Context: // https://github.com/rapidsai/raft/issues/1338 template <typename layout> constexpr bool layout_to_row_major(); template <> constexpr bool layout_to_row_major<layout_c_contiguous>() { return true; } template <> constexpr bool layout_to_row_major<layout_f_contiguous>() { return false; } template <raft::distance::DistanceType distanceType, typename DataType, typename layout> void distanceLauncher(raft::resources const& handle, DataType* x, DataType* y, DataType* dist, DataType* dist2, int m, int n, int k, DistanceInputs<DataType>& params, DataType threshold, DataType metric_arg = 2.0f) { auto x_v = make_device_matrix_view<DataType, int, layout>(x, m, k); auto y_v = make_device_matrix_view<DataType, int, layout>(y, n, k); auto dist_v = make_device_matrix_view<DataType, int, layout>(dist, m, n); raft::distance::distance<distanceType, DataType, DataType, DataType, layout>( handle, x_v, y_v, dist_v, metric_arg); } template <raft::distance::DistanceType distanceType, typename DataType> class DistanceTest : public ::testing::TestWithParam<DistanceInputs<DataType>> { public: DistanceTest() : params(::testing::TestWithParam<DistanceInputs<DataType>>::GetParam()), stream(resource::get_cuda_stream(handle)), x(params.m * params.k, stream), y(params.n * params.k, stream), dist_ref(params.m * params.n, stream), dist(params.m * params.n, stream), dist2(params.m * params.n, stream) { } void SetUp() override { auto testInfo = testing::UnitTest::GetInstance()->current_test_info(); common::nvtx::range fun_scope("test::%s/%s", testInfo->test_suite_name(), testInfo->name()); raft::random::RngState r(params.seed); int m = params.m; int n = params.n; int k = params.k; DataType metric_arg = params.metric_arg; bool isRowMajor = params.isRowMajor; if (distanceType == raft::distance::DistanceType::HellingerExpanded || distanceType == raft::distance::DistanceType::JensenShannon || distanceType == raft::distance::DistanceType::KLDivergence) { // Hellinger works only on positive numbers uniform(handle, r, x.data(), m * k, DataType(0.0), DataType(1.0)); uniform(handle, r, y.data(), n * k, DataType(0.0), DataType(1.0)); } else if (distanceType == raft::distance::DistanceType::RusselRaoExpanded) { uniform(handle, r, x.data(), m * k, DataType(0.0), DataType(1.0)); uniform(handle, r, y.data(), n * k, DataType(0.0), DataType(1.0)); // Russel rao works on boolean values. bernoulli(handle, r, x.data(), m * k, 0.5f); bernoulli(handle, r, y.data(), n * k, 0.5f); } else { uniform(handle, r, x.data(), m * k, DataType(-1.0), DataType(1.0)); uniform(handle, r, y.data(), n * k, DataType(-1.0), DataType(1.0)); } naiveDistance( dist_ref.data(), x.data(), y.data(), m, n, k, distanceType, isRowMajor, metric_arg, stream); DataType threshold = -10000.f; if (isRowMajor) { distanceLauncher<distanceType, DataType, layout_c_contiguous>(handle, x.data(), y.data(), dist.data(), dist2.data(), m, n, k, params, threshold, metric_arg); } else { distanceLauncher<distanceType, DataType, layout_f_contiguous>(handle, x.data(), y.data(), dist.data(), dist2.data(), m, n, k, params, threshold, metric_arg); } resource::sync_stream(handle, stream); } protected: raft::resources handle; cudaStream_t stream; DistanceInputs<DataType> params; rmm::device_uvector<DataType> x, y, dist_ref, dist, dist2; }; /* * This test suite verifies the path when X and Y are same buffer, * distance metrics which requires norms like L2 expanded/cosine/correlation * takes a more optimal path in such case to skip norm calculation for Y buffer. * It may happen that though both X and Y are same buffer but user passes * different dimensions for them like in case of tiled_brute_force_knn. */ template <raft::distance::DistanceType distanceType, typename DataType> class DistanceTestSameBuffer : public ::testing::TestWithParam<DistanceInputs<DataType>> { public: using dev_vector = rmm::device_uvector<DataType>; DistanceTestSameBuffer() : params(::testing::TestWithParam<DistanceInputs<DataType>>::GetParam()), stream(resource::get_cuda_stream(handle)), x(params.m * params.k, stream), dist_ref({dev_vector(params.m * params.m, stream), dev_vector(params.m * params.m, stream)}), dist({dev_vector(params.m * params.m, stream), dev_vector(params.m * params.m, stream)}), dist2({dev_vector(params.m * params.m, stream), dev_vector(params.m * params.m, stream)}) { } void SetUp() override { auto testInfo = testing::UnitTest::GetInstance()->current_test_info(); common::nvtx::range fun_scope("test::%s/%s", testInfo->test_suite_name(), testInfo->name()); raft::random::RngState r(params.seed); int m = params.m; int n = params.m; int k = params.k; DataType metric_arg = params.metric_arg; bool isRowMajor = params.isRowMajor; if (distanceType == raft::distance::DistanceType::HellingerExpanded || distanceType == raft::distance::DistanceType::JensenShannon || distanceType == raft::distance::DistanceType::KLDivergence) { // Hellinger works only on positive numbers uniform(handle, r, x.data(), m * k, DataType(0.0), DataType(1.0)); } else if (distanceType == raft::distance::DistanceType::RusselRaoExpanded) { uniform(handle, r, x.data(), m * k, DataType(0.0), DataType(1.0)); // Russel rao works on boolean values. bernoulli(handle, r, x.data(), m * k, 0.5f); } else { uniform(handle, r, x.data(), m * k, DataType(-1.0), DataType(1.0)); } for (int i = 0; i < 2; i++) { // both X and Y are same buffer but when i = 1 // different dimensions for x & y is passed. m = m / (i + 1); naiveDistance(dist_ref[i].data(), x.data(), x.data(), m, n, k, distanceType, isRowMajor, metric_arg, stream); DataType threshold = -10000.f; if (isRowMajor) { distanceLauncher<distanceType, DataType, layout_c_contiguous>(handle, x.data(), x.data(), dist[i].data(), dist2[i].data(), m, n, k, params, threshold, metric_arg); } else { distanceLauncher<distanceType, DataType, layout_f_contiguous>(handle, x.data(), x.data(), dist[i].data(), dist2[i].data(), m, n, k, params, threshold, metric_arg); } } resource::sync_stream(handle, stream); } protected: raft::resources handle; cudaStream_t stream; DistanceInputs<DataType> params; dev_vector x; static const int N = 2; std::array<dev_vector, N> dist_ref, dist, dist2; }; template <raft::distance::DistanceType distanceType> class BigMatrixDistanceTest : public ::testing::Test { public: BigMatrixDistanceTest() : x(m * k, resource::get_cuda_stream(handle)), dist(std::size_t(m) * m, resource::get_cuda_stream(handle)){}; void SetUp() override { auto testInfo = testing::UnitTest::GetInstance()->current_test_info(); common::nvtx::range fun_scope("test::%s/%s", testInfo->test_suite_name(), testInfo->name()); void pairwise_distance(raft::resources const& handle, float* x, float* y, float* dists, int m, int n, int k, raft::distance::DistanceType metric, bool isRowMajor, float metric_arg); constexpr bool row_major = true; constexpr float metric_arg = 0.0f; raft::distance::distance<distanceType, float, float, float>( handle, x.data(), x.data(), dist.data(), m, n, k, row_major, metric_arg); RAFT_CUDA_TRY(cudaStreamSynchronize(resource::get_cuda_stream(handle))); } protected: raft::resources handle; int m = 48000; int n = 48000; int k = 1; rmm::device_uvector<float> x, dist; }; } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_l_inf.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceLinf : public DistanceTest<raft::distance::DistanceType::Linf, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceLinf<float> DistanceLinfF; TEST_P(DistanceLinfF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceLinfF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001, 1024, 1024, 32, false, 1234ULL}, {0.001, 1024, 32, 1024, false, 1234ULL}, {0.001, 32, 1024, 1024, false, 1234ULL}, {0.003, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceLinf<double> DistanceLinfD; TEST_P(DistanceLinfD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceLinfD, ::testing::ValuesIn(inputsd)); class BigMatrixLinf : public BigMatrixDistanceTest<raft::distance::DistanceType::Linf> {}; TEST_F(BigMatrixLinf, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/gram_base.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <memory> #include <raft/core/resource/cuda_stream.hpp> #include <raft/distance/distance_types.hpp> #include <raft/distance/kernels.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> namespace raft { namespace distance { namespace kernels { // Get the offset of element [i,k]. HDI int get_offset(int i, int k, int ld, bool is_row_major) { return is_row_major ? i * ld + k : i + k * ld; } // Calculate the Gram matrix on the host. template <typename math_t> void naiveGramMatrixKernel(int n1, int n2, int n_cols, const rmm::device_uvector<math_t>& x1, const rmm::device_uvector<math_t>& x2, math_t* gram_host, int ld1, int ld2, int ld_out, bool is_row_major, KernelParams kernel, cudaStream_t stream, const raft::resources& handle) { std::vector<math_t> x1_host(x1.size()); raft::update_host(x1_host.data(), x1.data(), x1.size(), stream); std::vector<math_t> x2_host(x2.size()); raft::update_host(x2_host.data(), x2.data(), x2.size(), stream); resource::sync_stream(handle, stream); for (int i = 0; i < n1; i++) { for (int j = 0; j < n2; j++) { float d = 0; for (int k = 0; k < n_cols; k++) { if (kernel.kernel == KernelType::RBF) { math_t diff = x1_host[get_offset(i, k, ld1, is_row_major)] - x2_host[get_offset(j, k, ld2, is_row_major)]; d += diff * diff; } else { d += x1_host[get_offset(i, k, ld1, is_row_major)] * x2_host[get_offset(j, k, ld2, is_row_major)]; } } int idx = get_offset(i, j, ld_out, is_row_major); math_t v = 0; switch (kernel.kernel) { case (KernelType::LINEAR): gram_host[idx] = d; break; case (KernelType::POLYNOMIAL): v = kernel.gamma * d + kernel.coef0; gram_host[idx] = std::pow(v, kernel.degree); break; case (KernelType::TANH): gram_host[idx] = std::tanh(kernel.gamma * d + kernel.coef0); break; case (KernelType::RBF): gram_host[idx] = exp(-kernel.gamma * d); break; } } } } } // namespace kernels } // namespace distance } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_adj_threshold.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdint> // uint8_t namespace raft::distance { template <typename AccT, typename DataT, typename OutT, typename Index> struct threshold_final_op { DataT threshold_val; __device__ __host__ threshold_final_op() noexcept : threshold_val(0.0) {} __device__ __host__ threshold_final_op(DataT val) noexcept : threshold_val(val) {} __device__ __host__ OutT operator()(AccT d_val, Index g_idx) const noexcept { return d_val <= threshold_val; } }; using threshold_float = threshold_final_op<float, float, uint8_t, int>; using threshold_double = threshold_final_op<double, double, uint8_t, int>; } // namespace raft::distance
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_l2_exp.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceEucExpTest : public DistanceTest<raft::distance::DistanceType::L2Expanded, DataType> { }; template <typename DataType> class DistanceEucExpTestXequalY : public DistanceTestSameBuffer<raft::distance::DistanceType::L2Expanded, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 2048, 4096, 128, true, 1234ULL}, {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.003f, 1021, 1021, 1021, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, {0.003f, 1021, 1021, 1021, false, 1234ULL}, }; const std::vector<DistanceInputs<float>> inputsXeqYf = { {0.01f, 2048, 4096, 128, true, 1234ULL}, {0.01f, 1024, 1024, 32, true, 1234ULL}, {0.01f, 1024, 32, 1024, true, 1234ULL}, {0.01f, 32, 1024, 1024, true, 1234ULL}, {0.03f, 1024, 1024, 1024, true, 1234ULL}, {0.03f, 1021, 1021, 1021, true, 1234ULL}, {0.01f, 1024, 1024, 32, false, 1234ULL}, {0.01f, 1024, 32, 1024, false, 1234ULL}, {0.01f, 32, 1024, 1024, false, 1234ULL}, {0.03f, 1024, 1024, 1024, false, 1234ULL}, {0.03f, 1021, 1021, 1021, false, 1234ULL}, }; typedef DistanceEucExpTest<float> DistanceEucExpTestF; TEST_P(DistanceEucExpTestF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceEucExpTestF, ::testing::ValuesIn(inputsf)); typedef DistanceEucExpTestXequalY<float> DistanceEucExpTestXequalYF; TEST_P(DistanceEucExpTestXequalYF, Result) { int m = params.m; ASSERT_TRUE(raft::devArrMatch(dist_ref[0].data(), dist[0].data(), m, m, raft::CompareApprox<float>(params.tolerance), stream)); ASSERT_TRUE(raft::devArrMatch(dist_ref[1].data(), dist[1].data(), m / 2, m, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceEucExpTestXequalYF, ::testing::ValuesIn(inputsXeqYf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001, 1024, 1024, 32, false, 1234ULL}, {0.001, 1024, 32, 1024, false, 1234ULL}, {0.001, 32, 1024, 1024, false, 1234ULL}, {0.003, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceEucExpTest<double> DistanceEucExpTestD; TEST_P(DistanceEucExpTestD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceEucExpTestD, ::testing::ValuesIn(inputsd)); class BigMatrixEucExp : public BigMatrixDistanceTest<raft::distance::DistanceType::L2Expanded> {}; TEST_F(BigMatrixEucExp, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_hamming.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceHamming : public DistanceTest<raft::distance::DistanceType::HammingUnexpanded, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceHamming<float> DistanceHammingF; TEST_P(DistanceHammingF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceHammingF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001, 1024, 1024, 32, false, 1234ULL}, {0.001, 1024, 32, 1024, false, 1234ULL}, {0.001, 32, 1024, 1024, false, 1234ULL}, {0.003, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceHamming<double> DistanceHammingD; TEST_P(DistanceHammingD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceHammingD, ::testing::ValuesIn(inputsd)); class BigMatrixHamming : public BigMatrixDistanceTest<raft::distance::DistanceType::HammingUnexpanded> {}; TEST_F(BigMatrixHamming, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_lp_unexp.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceLpUnexp : public DistanceTest<raft::distance::DistanceType::LpUnexpanded, DataType> { }; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, true, 1234ULL, 4.0f}, {0.001f, 1024, 32, 1024, true, 1234ULL, 3.0f}, {0.001f, 32, 1024, 1024, true, 1234ULL, 4.0f}, {0.003f, 1024, 1024, 1024, true, 1234ULL, 3.0f}, {0.001f, 1024, 1024, 32, false, 1234ULL, 4.0f}, {0.001f, 1024, 32, 1024, false, 1234ULL, 3.0f}, {0.001f, 32, 1024, 1024, false, 1234ULL, 4.0f}, {0.003f, 1024, 1024, 1024, false, 1234ULL, 3.0f}, }; typedef DistanceLpUnexp<float> DistanceLpUnexpF; TEST_P(DistanceLpUnexpF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceLpUnexpF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL, 4.0}, {0.001, 1024, 32, 1024, true, 1234ULL, 3.0}, {0.001, 32, 1024, 1024, true, 1234ULL, 4.0}, {0.003, 1024, 1024, 1024, true, 1234ULL, 3.0}, {0.001, 1024, 1024, 32, false, 1234ULL, 4.0}, {0.001, 1024, 32, 1024, false, 1234ULL, 3.0}, {0.001, 32, 1024, 1024, false, 1234ULL, 4.0}, {0.003, 1024, 1024, 1024, false, 1234ULL, 3.0}, }; typedef DistanceLpUnexp<double> DistanceLpUnexpD; TEST_P(DistanceLpUnexpD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceLpUnexpD, ::testing::ValuesIn(inputsd)); class BigMatrixLpUnexp : public BigMatrixDistanceTest<raft::distance::DistanceType::LpUnexpanded> { }; TEST_F(BigMatrixLpUnexp, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_l2_sqrt_exp.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceEucSqrtExpTest : public DistanceTest<raft::distance::DistanceType::L2SqrtExpanded, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 2048, 4096, 128, true, 1234ULL}, {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.003f, 1021, 1021, 1021, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, {0.003f, 1021, 1021, 1021, false, 1234ULL}, }; typedef DistanceEucSqrtExpTest<float> DistanceEucSqrtExpTestF; TEST_P(DistanceEucSqrtExpTestF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceEucSqrtExpTestF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001, 1024, 1024, 32, false, 1234ULL}, {0.001, 1024, 32, 1024, false, 1234ULL}, {0.001, 32, 1024, 1024, false, 1234ULL}, {0.003, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceEucSqrtExpTest<double> DistanceEucSqrtExpTestD; TEST_P(DistanceEucSqrtExpTestD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceEucSqrtExpTestD, ::testing::ValuesIn(inputsd)); class BigMatrixEucSqrtExp : public BigMatrixDistanceTest<raft::distance::DistanceType::L2SqrtExpanded> {}; TEST_F(BigMatrixEucSqrtExp, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_hellinger.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceHellingerExp : public DistanceTest<raft::distance::DistanceType::HellingerExpanded, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceHellingerExp<float> DistanceHellingerExpF; TEST_P(DistanceHellingerExpF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceHellingerExpF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001, 1024, 1024, 32, false, 1234ULL}, {0.001, 1024, 32, 1024, false, 1234ULL}, {0.001, 32, 1024, 1024, false, 1234ULL}, {0.003, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceHellingerExp<double> DistanceHellingerExpD; TEST_P(DistanceHellingerExpD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceHellingerExpD, ::testing::ValuesIn(inputsd)); class BigMatrixHellingerExp : public BigMatrixDistanceTest<raft::distance::DistanceType::HellingerExpanded> {}; TEST_F(BigMatrixHellingerExp, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/masked_nn_compress_to_bits.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "../test_utils.h" #include <cstdio> #include <gtest/gtest.h> #include <iostream> #include <raft/core/device_mdarray.hpp> #include <raft/core/device_mdspan.hpp> #include <raft/core/handle.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/distance/detail/compress_to_bits.cuh> #include <raft/matrix/init.cuh> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <raft/util/integer_utils.hpp> #include <raft/util/itertools.hpp> namespace raft::distance::masked_nn::compress_to_bits { /** * @brief Transpose and decompress 2D bitfield to boolean matrix * * Inverse operation of compress_to_bits * * @tparam T * * @parameter[in] in An `m x n` bitfield matrix. Row major. * @parameter in_rows The number of rows of `in`, i.e. `m`. * @parameter in_cols The number of cols of `in`, i.e. `n`. * * @parameter[out] out An `(m * bits_per_elem) x n` boolean matrix. */ template <typename T = uint64_t, typename = std::enable_if_t<std::is_integral<T>::value>> RAFT_KERNEL decompress_bits_kernel(const T* in, int in_rows, int in_cols, bool* out) { constexpr int bits_per_element = 8 * sizeof(T); const size_t i = threadIdx.y + blockIdx.y * blockDim.y; const size_t j = threadIdx.x + blockIdx.x * blockDim.x; if (in_rows <= i || in_cols <= j) { return; } const size_t out_rows = in_rows * bits_per_element; const size_t out_cols = in_cols; const size_t out_i = i * bits_per_element; const size_t out_j = j; if (out_rows <= out_i && out_cols <= out_j) { return; } T bitfield = in[i * in_cols + j]; for (int bitpos = 0; bitpos < bits_per_element; ++bitpos) { bool bit = ((T(1) << bitpos) & bitfield) != 0; out[(out_i + bitpos) * out_cols + out_j] = bit; } } /** * @brief Transpose and decompress 2D bitfield to boolean matrix * * Inverse operation of compress_to_bits * * @tparam T * * @parameter[in] in An `m x n` bitfield matrix. Row major. * @parameter in_rows The number of rows of `in`, i.e. `m`. * @parameter in_cols The number of cols of `in`, i.e. `n`. * * @parameter[out] out An `n x (m * bits_per_elem)` boolean matrix. */ template <typename T = uint64_t, typename = std::enable_if_t<std::is_integral<T>::value>> void decompress_bits(const raft::handle_t& handle, const T* in, int in_rows, int in_cols, bool* out) { auto stream = resource::get_cuda_stream(handle); dim3 grid(raft::ceildiv(in_cols, 32), raft::ceildiv(in_rows, 32)); dim3 block(32, 32); decompress_bits_kernel<<<grid, block, 0, stream>>>(in, in_rows, in_cols, out); RAFT_CUDA_TRY(cudaGetLastError()); } // Params holds parameters for test case struct Params { int m, n; }; inline auto operator<<(std::ostream& os, const Params& p) -> std::ostream& { return os << "m: " << p.m << ", n: " << p.n; } // Check that the following holds // // decompress(compress(x)) == x // // for 2D boolean matrices x. template <typename T> void check_invertible(const Params& p) { using raft::distance::detail::compress_to_bits; constexpr int bits_per_elem = sizeof(T) * 8; // Make m and n that are safe to ceildiv. int m = raft::round_up_safe(p.m, bits_per_elem); int n = p.n; // Generate random input raft::handle_t handle{}; raft::random::RngState r(1ULL); auto in = raft::make_device_matrix<bool, int>(handle, m, n); raft::random::bernoulli(handle, r, in.data_handle(), m * n, 0.5f); int tmp_m = raft::ceildiv(m, bits_per_elem); int out_m = tmp_m * bits_per_elem; auto tmp = raft::make_device_matrix<T, int>(handle, tmp_m, n); auto out = raft::make_device_matrix<bool, int>(handle, out_m, n); resource::sync_stream(handle); RAFT_CUDA_TRY(cudaGetLastError()); ASSERT_EQ(in.extent(0), out.extent(0)) << "M does not match"; ASSERT_EQ(in.extent(1), out.extent(1)) << "N does not match"; compress_to_bits(handle, in.view(), tmp.view()); resource::sync_stream(handle); RAFT_CUDA_TRY(cudaGetLastError()); decompress_bits(handle, tmp.data_handle(), tmp.extent(0), tmp.extent(1), out.data_handle()); resource::sync_stream(handle); RAFT_CUDA_TRY(cudaGetLastError()); // Check for differences. ASSERT_TRUE(raft::devArrMatch(in.data_handle(), out.data_handle(), in.extent(0) * in.extent(1), raft::Compare<bool>(), resource::get_cuda_stream(handle))); resource::sync_stream(handle); RAFT_CUDA_TRY(cudaGetLastError()); } void check_all_true(const Params& p) { using raft::distance::detail::compress_to_bits; using T = uint64_t; constexpr int bits_per_elem = sizeof(T) * 8; // Make m and n that are safe to ceildiv. int m = raft::round_up_safe(p.m, bits_per_elem); int n = p.n; raft::handle_t handle{}; raft::random::RngState r(1ULL); auto in = raft::make_device_matrix<bool, int>(handle, m, n); raft::matrix::fill(handle, in.view(), true); int tmp_m = raft::ceildiv(m, bits_per_elem); auto tmp = raft::make_device_matrix<T, int>(handle, tmp_m, n); resource::sync_stream(handle); RAFT_CUDA_TRY(cudaGetLastError()); compress_to_bits(handle, in.view(), tmp.view()); resource::sync_stream(handle); RAFT_CUDA_TRY(cudaGetLastError()); auto expected = raft::make_device_matrix<T, int>(handle, tmp_m, n); raft::matrix::fill(handle, expected.view(), ~T(0)); // Check for differences. ASSERT_TRUE(raft::devArrMatch(expected.data_handle(), tmp.data_handle(), tmp.extent(0) * tmp.extent(1), raft::Compare<T>(), resource::get_cuda_stream(handle))); resource::sync_stream(handle); RAFT_CUDA_TRY(cudaGetLastError()); } class CompressToBitsTest : public ::testing::TestWithParam<Params> { // Empty. }; TEST_P(CompressToBitsTest, CheckTrue64) { check_all_true(GetParam()); } TEST_P(CompressToBitsTest, CheckInvertible64) { using T = uint64_t; check_invertible<T>(GetParam()); } TEST_P(CompressToBitsTest, CheckInvertible32) { using T = uint32_t; check_invertible<T>(GetParam()); } std::vector<Params> params = raft::util::itertools::product<Params>( {1, 3, 32, 33, 63, 64, 65, 128, 10013}, {1, 3, 32, 33, 63, 64, 65, 13001}); INSTANTIATE_TEST_CASE_P(CompressToBits, CompressToBitsTest, ::testing::ValuesIn(params)); } // namespace raft::distance::masked_nn::compress_to_bits
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/fused_l2_nn.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/core/kvp.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/distance/detail/fused_l2_nn.cuh> #include <raft/distance/fused_l2_nn.cuh> #include <raft/linalg/norm.cuh> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> namespace raft { namespace distance { template <typename LabelT, typename DataT> struct RaftKVPMinReduce { typedef raft::KeyValuePair<LabelT, DataT> KVP; DI KVP operator()(LabelT rit, const KVP& a, const KVP& b) { return b.value < a.value ? b : a; } DI KVP operator()(const KVP& a, const KVP& b) { return b.value < a.value ? b : a; } }; // KVPMinReduce template <typename DataT, bool Sqrt, typename ReduceOpT, int NWARPS> RAFT_KERNEL naiveKernel(raft::KeyValuePair<int, DataT>* min, DataT* x, DataT* y, int m, int n, int k, int* workspace, DataT maxVal) { int midx = threadIdx.y + blockIdx.y * blockDim.y; int nidx = threadIdx.x + blockIdx.x * blockDim.x; DataT acc = DataT(0); for (int i = 0; i < k; ++i) { int xidx = i + midx * k; int yidx = i + nidx * k; auto diff = midx >= m || nidx >= n ? DataT(0) : x[xidx] - y[yidx]; acc += diff * diff; } if (Sqrt) { acc = raft::sqrt(acc); } ReduceOpT redOp; typedef cub::WarpReduce<raft::KeyValuePair<int, DataT>> WarpReduce; __shared__ typename WarpReduce::TempStorage temp[NWARPS]; int warpId = threadIdx.x / raft::WarpSize; raft::KeyValuePair<int, DataT> tmp; tmp.key = nidx; tmp.value = midx >= m || nidx >= n ? maxVal : acc; tmp = WarpReduce(temp[warpId]).Reduce(tmp, RaftKVPMinReduce<int, DataT>()); if (threadIdx.x % raft::WarpSize == 0 && midx < m) { while (atomicCAS(workspace + midx, 0, 1) == 1) ; __threadfence(); redOp(midx, min + midx, tmp); __threadfence(); atomicCAS(workspace + midx, 1, 0); } } template <typename DataT, bool Sqrt> void naive(raft::KeyValuePair<int, DataT>* min, DataT* x, DataT* y, int m, int n, int k, int* workspace, cudaStream_t stream) { static const dim3 TPB(32, 16, 1); dim3 nblks(raft::ceildiv(n, (int)TPB.x), raft::ceildiv(m, (int)TPB.y), 1); RAFT_CUDA_TRY(cudaMemsetAsync(workspace, 0, sizeof(int) * m, stream)); auto blks = raft::ceildiv(m, 256); MinAndDistanceReduceOp<int, DataT> op; detail::initKernel<DataT, raft::KeyValuePair<int, DataT>, int> <<<blks, 256, 0, stream>>>(min, m, std::numeric_limits<DataT>::max(), op); RAFT_CUDA_TRY(cudaGetLastError()); naiveKernel<DataT, Sqrt, MinAndDistanceReduceOp<int, DataT>, 16> <<<nblks, TPB, 0, stream>>>(min, x, y, m, n, k, workspace, std::numeric_limits<DataT>::max()); RAFT_CUDA_TRY(cudaGetLastError()); } template <typename DataT> struct Inputs { DataT tolerance; int m, n, k; unsigned long long int seed; friend std::ostream& operator<<(std::ostream& os, const Inputs& p) { return os << "m: " << p.m << ", " "n: " << p.n << ", " "k: " << p.k << ", " "seed: " << p.seed << ", " "tol: " << p.tolerance; } }; template <typename DataT, bool Sqrt> class FusedL2NNTest : public ::testing::TestWithParam<Inputs<DataT>> { public: FusedL2NNTest() : params(::testing::TestWithParam<Inputs<DataT>>::GetParam()), stream(resource::get_cuda_stream(handle)), x(params.m * params.k, stream), y(params.n * params.k, stream), xn(params.m, stream), yn(params.n, stream), min(params.m, stream), min_ref(params.m, stream), workspace(params.m * sizeof(int), stream) { } protected: void SetUp() override { raft::random::RngState r(params.seed); int m = params.m; int n = params.n; int k = params.k; uniform(handle, r, x.data(), m * k, DataT(-1.0), DataT(1.0)); uniform(handle, r, y.data(), n * k, DataT(-1.0), DataT(1.0)); generateGoldenResult(); raft::linalg::rowNorm(xn.data(), x.data(), k, m, raft::linalg::L2Norm, true, stream); raft::linalg::rowNorm(yn.data(), y.data(), k, n, raft::linalg::L2Norm, true, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } protected: raft::resources handle; cudaStream_t stream; Inputs<DataT> params; rmm::device_uvector<DataT> x; rmm::device_uvector<DataT> y; rmm::device_uvector<DataT> xn; rmm::device_uvector<DataT> yn; rmm::device_uvector<raft::KeyValuePair<int, DataT>> min; rmm::device_uvector<raft::KeyValuePair<int, DataT>> min_ref; rmm::device_uvector<char> workspace; virtual void generateGoldenResult() { int m = params.m; int n = params.n; int k = params.k; naive<DataT, Sqrt>(min_ref.data(), x.data(), y.data(), m, n, k, (int*)workspace.data(), stream); } void runTest(raft::KeyValuePair<int, DataT>* out) { int m = params.m; int n = params.n; int k = params.k; const bool init_out_buffer = true; fusedL2NNMinReduce<DataT, raft::KeyValuePair<int, DataT>, int>(out, x.data(), y.data(), xn.data(), yn.data(), m, n, k, (void*)workspace.data(), Sqrt, init_out_buffer, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } }; template <typename T> struct CompareApproxAbsKVP { typedef typename raft::KeyValuePair<int, T> KVP; CompareApproxAbsKVP(T eps_) : eps(eps_) {} bool operator()(const KVP& a, const KVP& b) const { T diff = std::abs(std::abs(a.value) - std::abs(b.value)); T m = std::max(std::abs(a.value), std::abs(b.value)); T ratio = m >= eps ? diff / m : diff; return (ratio <= eps); } private: T eps; }; template <typename T> struct CompareExactKVP { typedef typename raft::KeyValuePair<int, T> KVP; bool operator()(const KVP& a, const KVP& b) const { if (a.value != b.value) return false; return true; } }; template <typename K, typename V, typename L> ::testing::AssertionResult devArrMatch(const raft::KeyValuePair<K, V>* expected, const raft::KeyValuePair<K, V>* actual, size_t size, L eq_compare, cudaStream_t stream = 0) { typedef typename raft::KeyValuePair<K, V> KVP; std::shared_ptr<KVP> exp_h(new KVP[size]); std::shared_ptr<KVP> act_h(new KVP[size]); raft::update_host<KVP>(exp_h.get(), expected, size, stream); raft::update_host<KVP>(act_h.get(), actual, size, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); for (size_t i(0); i < size; ++i) { auto exp = exp_h.get()[i]; auto act = act_h.get()[i]; if (!eq_compare(exp, act)) { return ::testing::AssertionFailure() << "actual=" << act.key << "," << act.value << " != expected=" << exp.key << "," << exp.value << " @" << i; } } return ::testing::AssertionSuccess(); } const std::vector<Inputs<float>> inputsf = { {0.001f, 32, 32, 32, 1234ULL}, {0.001f, 32, 64, 32, 1234ULL}, {0.001f, 64, 32, 32, 1234ULL}, {0.001f, 64, 64, 32, 1234ULL}, {0.001f, 128, 32, 32, 1234ULL}, {0.001f, 128, 64, 32, 1234ULL}, {0.001f, 128, 128, 64, 1234ULL}, {0.001f, 64, 128, 128, 1234ULL}, {0.001f, 32, 32, 34, 1234ULL}, {0.001f, 32, 64, 34, 1234ULL}, {0.001f, 64, 32, 34, 1234ULL}, {0.001f, 64, 64, 34, 1234ULL}, {0.001f, 128, 32, 34, 1234ULL}, {0.001f, 128, 64, 34, 1234ULL}, {0.001f, 128, 128, 66, 1234ULL}, {0.001f, 64, 128, 130, 1234ULL}, {0.001f, 32, 32, 33, 1234ULL}, {0.001f, 32, 64, 33, 1234ULL}, {0.001f, 64, 32, 33, 1234ULL}, {0.001f, 64, 64, 33, 1234ULL}, {0.001f, 128, 32, 33, 1234ULL}, {0.001f, 128, 64, 33, 1234ULL}, {0.001f, 128, 128, 65, 1234ULL}, {0.001f, 64, 128, 129, 1234ULL}, {0.006f, 1805, 134, 2, 1234ULL}, {0.006f, 8192, 1024, 64, 1234ULL}, {0.006f, 8192, 1025, 64, 1234ULL}, // Repeat with smaller values of k {0.006f, 32, 32, 1, 1234ULL}, {0.001f, 32, 64, 2, 1234ULL}, {0.001f, 64, 32, 3, 1234ULL}, {0.001f, 64, 64, 4, 1234ULL}, {0.001f, 128, 32, 5, 1234ULL}, {0.001f, 128, 64, 6, 1234ULL}, {0.001f, 128, 128, 7, 1234ULL}, {0.001f, 64, 128, 8, 1234ULL}, {0.001f, 32, 32, 9, 1234ULL}, {0.001f, 32, 64, 10, 1234ULL}, {0.001f, 64, 32, 11, 1234ULL}, {0.001f, 64, 64, 12, 1234ULL}, {0.001f, 128, 32, 13, 1234ULL}, {0.001f, 128, 64, 14, 1234ULL}, {0.001f, 128, 128, 15, 1234ULL}, {0.001f, 64, 128, 16, 1234ULL}, {0.001f, 32, 32, 17, 1234ULL}, {0.001f, 32, 64, 18, 1234ULL}, {0.001f, 64, 32, 19, 1234ULL}, {0.001f, 64, 64, 20, 1234ULL}, {0.001f, 128, 32, 21, 1234ULL}, {0.001f, 128, 64, 22, 1234ULL}, {0.001f, 128, 128, 23, 1234ULL}, {0.00001, 64, 128, 24, 1234ULL}, {0.001f, 1805, 134, 25, 1234ULL}, {0.006f, 8192, 1024, 25, 1234ULL}, {0.006f, 8192, 1024, 66, 1234ULL}, }; typedef FusedL2NNTest<float, false> FusedL2NNTestF_Sq; TEST_P(FusedL2NNTestF_Sq, Result) { runTest(min.data()); ASSERT_TRUE(devArrMatch( min_ref.data(), min.data(), params.m, CompareApproxAbsKVP<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestF_Sq, ::testing::ValuesIn(inputsf)); typedef FusedL2NNTest<float, true> FusedL2NNTestF_Sqrt; TEST_P(FusedL2NNTestF_Sqrt, Result) { runTest(min.data()); ASSERT_TRUE(devArrMatch( min_ref.data(), min.data(), params.m, CompareApproxAbsKVP<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestF_Sqrt, ::testing::ValuesIn(inputsf)); const std::vector<Inputs<double>> inputsd = { {0.00001, 32, 32, 32, 1234ULL}, {0.00001, 32, 64, 32, 1234ULL}, {0.00001, 64, 32, 32, 1234ULL}, {0.00001, 64, 64, 32, 1234ULL}, {0.00001, 128, 32, 32, 1234ULL}, {0.00001, 128, 64, 32, 1234ULL}, {0.00001, 128, 128, 64, 1234ULL}, {0.00001, 64, 128, 128, 1234ULL}, {0.00001, 32, 32, 34, 1234ULL}, {0.00001, 32, 64, 34, 1234ULL}, {0.00001, 64, 32, 34, 1234ULL}, {0.00001, 64, 64, 34, 1234ULL}, {0.00001, 128, 32, 34, 1234ULL}, {0.00001, 128, 64, 34, 1234ULL}, {0.00001, 128, 128, 66, 1234ULL}, {0.00001, 64, 128, 130, 1234ULL}, {0.00001, 32, 32, 33, 1234ULL}, {0.00001, 32, 64, 33, 1234ULL}, {0.00001, 64, 32, 33, 1234ULL}, {0.00001, 64, 64, 33, 1234ULL}, {0.00001, 128, 32, 33, 1234ULL}, {0.00001, 128, 64, 33, 1234ULL}, {0.00001, 128, 128, 65, 1234ULL}, {0.00001, 64, 128, 129, 1234ULL}, {0.00001, 1805, 134, 2, 1234ULL}, //{0.00001, 8192, 1024, 25, 1234ULL}, }; typedef FusedL2NNTest<double, false> FusedL2NNTestD_Sq; TEST_P(FusedL2NNTestD_Sq, Result) { runTest(min.data()); ASSERT_TRUE(devArrMatch( min_ref.data(), min.data(), params.m, CompareApproxAbsKVP<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestD_Sq, ::testing::ValuesIn(inputsd)); typedef FusedL2NNTest<double, true> FusedL2NNTestD_Sqrt; TEST_P(FusedL2NNTestD_Sqrt, Result) { runTest(min.data()); ASSERT_TRUE(devArrMatch( min_ref.data(), min.data(), params.m, CompareApproxAbsKVP<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(FusedL2NNTests, FusedL2NNTestD_Sqrt, ::testing::ValuesIn(inputsd)); /// This is to test output determinism of the prim template <typename DataT, bool Sqrt> class FusedL2NNDetTest : public FusedL2NNTest<DataT, Sqrt> { public: FusedL2NNDetTest() : stream(resource::get_cuda_stream(handle)), min1(0, stream) {} void SetUp() override { FusedL2NNTest<DataT, Sqrt>::SetUp(); int m = this->params.m; min1.resize(m, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } void TearDown() override { FusedL2NNTest<DataT, Sqrt>::TearDown(); } protected: raft::resources handle; cudaStream_t stream; rmm::device_uvector<raft::KeyValuePair<int, DataT>> min1; static const int NumRepeats = 3; void generateGoldenResult() override {} }; typedef FusedL2NNDetTest<float, false> FusedL2NNDetTestF_Sq; TEST_P(FusedL2NNDetTestF_Sq, Result) { runTest(min.data()); // assumed to be golden for (int i = 0; i < NumRepeats; ++i) { runTest(min1.data()); ASSERT_TRUE(devArrMatch(min.data(), min1.data(), params.m, CompareExactKVP<float>(), stream)); } } INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestF_Sq, ::testing::ValuesIn(inputsf)); typedef FusedL2NNDetTest<float, true> FusedL2NNDetTestF_Sqrt; TEST_P(FusedL2NNDetTestF_Sqrt, Result) { runTest(min.data()); // assumed to be golden for (int i = 0; i < NumRepeats; ++i) { runTest(min1.data()); ASSERT_TRUE(devArrMatch(min.data(), min1.data(), params.m, CompareExactKVP<float>(), stream)); } } INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestF_Sqrt, ::testing::ValuesIn(inputsf)); typedef FusedL2NNDetTest<double, false> FusedL2NNDetTestD_Sq; TEST_P(FusedL2NNDetTestD_Sq, Result) { runTest(min.data()); // assumed to be golden for (int i = 0; i < NumRepeats; ++i) { runTest(min1.data()); ASSERT_TRUE(devArrMatch(min.data(), min1.data(), params.m, CompareExactKVP<double>(), stream)); } } INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestD_Sq, ::testing::ValuesIn(inputsd)); typedef FusedL2NNDetTest<double, true> FusedL2NNDetTestD_Sqrt; TEST_P(FusedL2NNDetTestD_Sqrt, Result) { runTest(min.data()); // assumed to be golden for (int i = 0; i < NumRepeats; ++i) { runTest(min1.data()); ASSERT_TRUE(devArrMatch(min.data(), min1.data(), params.m, CompareExactKVP<double>(), stream)); } } INSTANTIATE_TEST_CASE_P(FusedL2NNDetTests, FusedL2NNDetTestD_Sqrt, ::testing::ValuesIn(inputsd)); } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_cos.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceExpCos : public DistanceTest<raft::distance::DistanceType::CosineExpanded, DataType> { }; template <typename DataType> class DistanceExpCosXequalY : public DistanceTestSameBuffer<raft::distance::DistanceType::CosineExpanded, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; const std::vector<DistanceInputs<float>> inputsXeqYf = { {0.01f, 1024, 1024, 32, true, 1234ULL}, {0.01f, 1024, 32, 1024, true, 1234ULL}, {0.01f, 32, 1024, 1024, true, 1234ULL}, {0.03f, 1024, 1024, 1024, true, 1234ULL}, {0.01f, 1024, 1024, 32, false, 1234ULL}, {0.01f, 1024, 32, 1024, false, 1234ULL}, {0.01f, 32, 1024, 1024, false, 1234ULL}, {0.03f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceExpCos<float> DistanceExpCosF; TEST_P(DistanceExpCosF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceExpCosF, ::testing::ValuesIn(inputsf)); typedef DistanceExpCosXequalY<float> DistanceExpCosXequalYF; TEST_P(DistanceExpCosXequalYF, Result) { int m = params.m; int n = params.m; ASSERT_TRUE(raft::devArrMatch(dist_ref[0].data(), dist[0].data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); n = params.isRowMajor ? m : m / 2; m = params.isRowMajor ? m / 2 : m; ASSERT_TRUE(raft::devArrMatch(dist_ref[1].data(), dist[1].data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceExpCosXequalYF, ::testing::ValuesIn(inputsXeqYf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceExpCos<double> DistanceExpCosD; TEST_P(DistanceExpCosD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceExpCosD, ::testing::ValuesIn(inputsd)); class BigMatrixCos : public BigMatrixDistanceTest<raft::distance::DistanceType::CosineExpanded> {}; TEST_F(BigMatrixCos, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/distance/dist_correlation.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "distance_base.cuh" namespace raft { namespace distance { template <typename DataType> class DistanceCorrelation : public DistanceTest<raft::distance::DistanceType::CorrelationExpanded, DataType> {}; template <typename DataType> class DistanceCorrelationXequalY : public DistanceTestSameBuffer<raft::distance::DistanceType::CorrelationExpanded, DataType> {}; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, true, 1234ULL}, {0.001f, 1024, 32, 1024, true, 1234ULL}, {0.001f, 32, 1024, 1024, true, 1234ULL}, {0.003f, 1024, 1024, 1024, true, 1234ULL}, {0.001f, 1024, 1024, 32, false, 1234ULL}, {0.001f, 1024, 32, 1024, false, 1234ULL}, {0.001f, 32, 1024, 1024, false, 1234ULL}, {0.003f, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceCorrelation<float> DistanceCorrelationF; TEST_P(DistanceCorrelationF, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceCorrelationF, ::testing::ValuesIn(inputsf)); typedef DistanceCorrelationXequalY<float> DistanceCorrelationXequalYF; TEST_P(DistanceCorrelationXequalYF, Result) { int m = params.m; ASSERT_TRUE(raft::devArrMatch(dist_ref[0].data(), dist[0].data(), m, m, raft::CompareApprox<float>(params.tolerance), stream)); ASSERT_TRUE(raft::devArrMatch(dist_ref[1].data(), dist[1].data(), m / 2, m, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceCorrelationXequalYF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, true, 1234ULL}, {0.001, 1024, 32, 1024, true, 1234ULL}, {0.001, 32, 1024, 1024, true, 1234ULL}, {0.003, 1024, 1024, 1024, true, 1234ULL}, {0.001, 1024, 1024, 32, false, 1234ULL}, {0.001, 1024, 32, 1024, false, 1234ULL}, {0.001, 32, 1024, 1024, false, 1234ULL}, {0.003, 1024, 1024, 1024, false, 1234ULL}, }; typedef DistanceCorrelation<double> DistanceCorrelationD; TEST_P(DistanceCorrelationD, Result) { int m = params.isRowMajor ? params.m : params.n; int n = params.isRowMajor ? params.n : params.m; ASSERT_TRUE(raft::devArrMatch( dist_ref.data(), dist.data(), m, n, raft::CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceCorrelationD, ::testing::ValuesIn(inputsd)); class BigMatrixCorrelation : public BigMatrixDistanceTest<raft::distance::DistanceType::CorrelationExpanded> {}; TEST_F(BigMatrixCorrelation, Result) {} } // end namespace distance } // end namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/neighbors/ball_cover.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "spatial_data.h" #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/thrust_policy.hpp> #include <raft/distance/distance_types.hpp> #include <raft/neighbors/ball_cover.cuh> #include <raft/neighbors/brute_force.cuh> #include <raft/random/make_blobs.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/count.h> #include <thrust/fill.h> #include <thrust/transform.h> #include <cstdint> #include <gtest/gtest.h> #include <iostream> #include <vector> namespace raft::neighbors::ball_cover { using namespace std; template <typename value_idx, typename value_t> RAFT_KERNEL count_discrepancies_kernel(value_idx* actual_idx, value_idx* expected_idx, value_t* actual, value_t* expected, uint32_t m, uint32_t n, uint32_t* out, float thres = 1e-3) { uint32_t row = blockDim.x * blockIdx.x + threadIdx.x; int n_diffs = 0; if (row < m) { for (uint32_t i = 0; i < n; i++) { value_t d = actual[row * n + i] - expected[row * n + i]; bool matches = (fabsf(d) <= thres) || (actual_idx[row * n + i] == expected_idx[row * n + i] && actual_idx[row * n + i] == row); if (!matches) { printf( "row=%ud, n=%ud, actual_dist=%f, actual_ind=%ld, expected_dist=%f, expected_ind=%ld\n", row, i, actual[row * n + i], actual_idx[row * n + i], expected[row * n + i], expected_idx[row * n + i]); } n_diffs += !matches; out[row] = n_diffs; } } } struct is_nonzero { __host__ __device__ bool operator()(uint32_t& i) { return i > 0; } }; template <typename value_idx, typename value_t> uint32_t count_discrepancies(value_idx* actual_idx, value_idx* expected_idx, value_t* actual, value_t* expected, uint32_t m, uint32_t n, uint32_t* out, cudaStream_t stream) { uint32_t tpb = 256; count_discrepancies_kernel<<<raft::ceildiv(m, tpb), tpb, 0, stream>>>( actual_idx, expected_idx, actual, expected, m, n, out); auto exec_policy = rmm::exec_policy(stream); uint32_t result = thrust::count_if(exec_policy, out, out + m, is_nonzero()); return result; } template <typename value_t> void compute_bfknn(const raft::resources& handle, const value_t* X1, const value_t* X2, uint32_t n_rows, uint32_t n_query_rows, uint32_t d, uint32_t k, const raft::distance::DistanceType metric, value_t* dists, int64_t* inds) { std::vector<raft::device_matrix_view<const value_t, uint32_t>> input_vec = { make_device_matrix_view(X1, n_rows, d)}; raft::neighbors::brute_force::knn(handle, input_vec, make_device_matrix_view(X2, n_query_rows, d), make_device_matrix_view(inds, n_query_rows, k), make_device_matrix_view(dists, n_query_rows, k), metric); } struct ToRadians { __device__ __host__ float operator()(float a) { return a * (CUDART_PI_F / 180.0); } }; template <typename value_int = std::uint32_t> struct BallCoverInputs { value_int k; value_int n_rows; value_int n_cols; float weight; value_int n_query; raft::distance::DistanceType metric; }; template <typename value_idx, typename value_t, typename value_int = std::uint32_t> class BallCoverKNNQueryTest : public ::testing::TestWithParam<BallCoverInputs<value_int>> { protected: void basicTest() { params = ::testing::TestWithParam<BallCoverInputs<value_int>>::GetParam(); raft::resources handle; uint32_t k = params.k; uint32_t n_centers = 25; float weight = params.weight; auto metric = params.metric; rmm::device_uvector<value_t> X(params.n_rows * params.n_cols, resource::get_cuda_stream(handle)); rmm::device_uvector<uint32_t> Y(params.n_rows, resource::get_cuda_stream(handle)); // Make sure the train and query sets are completely disjoint rmm::device_uvector<value_t> X2(params.n_query * params.n_cols, resource::get_cuda_stream(handle)); rmm::device_uvector<uint32_t> Y2(params.n_query, resource::get_cuda_stream(handle)); raft::random::make_blobs(X.data(), Y.data(), params.n_rows, params.n_cols, n_centers, resource::get_cuda_stream(handle)); raft::random::make_blobs(X2.data(), Y2.data(), params.n_query, params.n_cols, n_centers, resource::get_cuda_stream(handle)); rmm::device_uvector<value_idx> d_ref_I(params.n_query * k, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> d_ref_D(params.n_query * k, resource::get_cuda_stream(handle)); if (metric == raft::distance::DistanceType::Haversine) { thrust::transform( resource::get_thrust_policy(handle), X.data(), X.data() + X.size(), X.data(), ToRadians()); thrust::transform(resource::get_thrust_policy(handle), X2.data(), X2.data() + X2.size(), X2.data(), ToRadians()); } compute_bfknn(handle, X.data(), X2.data(), params.n_rows, params.n_query, params.n_cols, k, metric, d_ref_D.data(), d_ref_I.data()); resource::sync_stream(handle); // Allocate predicted arrays rmm::device_uvector<value_idx> d_pred_I(params.n_query * k, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> d_pred_D(params.n_query * k, resource::get_cuda_stream(handle)); auto X_view = raft::make_device_matrix_view<value_t, value_int>(X.data(), params.n_rows, params.n_cols); auto X2_view = raft::make_device_matrix_view<const value_t, value_int>( (const value_t*)X2.data(), params.n_query, params.n_cols); auto d_pred_I_view = raft::make_device_matrix_view<value_idx, value_int>(d_pred_I.data(), params.n_query, k); auto d_pred_D_view = raft::make_device_matrix_view<value_t, value_int>(d_pred_D.data(), params.n_query, k); BallCoverIndex<value_idx, value_t, value_int, value_int> index(handle, X_view, metric); build_index(handle, index); knn_query(handle, index, X2_view, d_pred_I_view, d_pred_D_view, k, true); resource::sync_stream(handle); // What we really want are for the distances to match exactly. The // indices may or may not match exactly, depending upon the ordering which // can be nondeterministic. rmm::device_uvector<uint32_t> discrepancies(params.n_query, resource::get_cuda_stream(handle)); thrust::fill(resource::get_thrust_policy(handle), discrepancies.data(), discrepancies.data() + discrepancies.size(), 0); // int res = count_discrepancies(d_ref_I.data(), d_pred_I.data(), d_ref_D.data(), d_pred_D.data(), params.n_query, k, discrepancies.data(), resource::get_cuda_stream(handle)); ASSERT_TRUE(res == 0); } void SetUp() override {} void TearDown() override {} protected: uint32_t d = 2; BallCoverInputs<value_int> params; }; template <typename value_idx, typename value_t, typename value_int = std::uint32_t> class BallCoverAllKNNTest : public ::testing::TestWithParam<BallCoverInputs<value_int>> { protected: void basicTest() { params = ::testing::TestWithParam<BallCoverInputs<value_int>>::GetParam(); raft::resources handle; uint32_t k = params.k; uint32_t n_centers = 25; float weight = params.weight; auto metric = params.metric; rmm::device_uvector<value_t> X(params.n_rows * params.n_cols, resource::get_cuda_stream(handle)); rmm::device_uvector<uint32_t> Y(params.n_rows, resource::get_cuda_stream(handle)); raft::random::make_blobs(X.data(), Y.data(), params.n_rows, params.n_cols, n_centers, resource::get_cuda_stream(handle)); rmm::device_uvector<value_idx> d_ref_I(params.n_rows * k, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> d_ref_D(params.n_rows * k, resource::get_cuda_stream(handle)); auto X_view = raft::make_device_matrix_view<const value_t, value_int>( (const value_t*)X.data(), params.n_rows, params.n_cols); if (metric == raft::distance::DistanceType::Haversine) { thrust::transform( resource::get_thrust_policy(handle), X.data(), X.data() + X.size(), X.data(), ToRadians()); } compute_bfknn(handle, X.data(), X.data(), params.n_rows, params.n_rows, params.n_cols, k, metric, d_ref_D.data(), d_ref_I.data()); resource::sync_stream(handle); // Allocate predicted arrays rmm::device_uvector<value_idx> d_pred_I(params.n_rows * k, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> d_pred_D(params.n_rows * k, resource::get_cuda_stream(handle)); auto d_pred_I_view = raft::make_device_matrix_view<value_idx, value_int>(d_pred_I.data(), params.n_rows, k); auto d_pred_D_view = raft::make_device_matrix_view<value_t, value_int>(d_pred_D.data(), params.n_rows, k); BallCoverIndex<value_idx, value_t> index(handle, X_view, metric); all_knn_query(handle, index, d_pred_I_view, d_pred_D_view, k, true); resource::sync_stream(handle); // What we really want are for the distances to match exactly. The // indices may or may not match exactly, depending upon the ordering which // can be nondeterministic. rmm::device_uvector<uint32_t> discrepancies(params.n_rows, resource::get_cuda_stream(handle)); thrust::fill(resource::get_thrust_policy(handle), discrepancies.data(), discrepancies.data() + discrepancies.size(), 0); // uint32_t res = count_discrepancies(d_ref_I.data(), d_pred_I.data(), d_ref_D.data(), d_pred_D.data(), params.n_rows, k, discrepancies.data(), resource::get_cuda_stream(handle)); // TODO: There seem to be discrepancies here only when // the entire test suite is executed. // Ref: https://github.com/rapidsai/raft/issues/ // 1-5 mismatches in 8000 samples is 0.0125% - 0.0625% ASSERT_TRUE(res <= 5); } void SetUp() override {} void TearDown() override {} protected: BallCoverInputs<value_int> params; }; typedef BallCoverAllKNNTest<int64_t, float> BallCoverAllKNNTestF; typedef BallCoverKNNQueryTest<int64_t, float> BallCoverKNNQueryTestF; const std::vector<BallCoverInputs<std::uint32_t>> ballcover_inputs = { {11, 5000, 2, 1.0, 10000, raft::distance::DistanceType::Haversine}, {25, 10000, 2, 1.0, 5000, raft::distance::DistanceType::Haversine}, {2, 10000, 2, 1.0, 5000, raft::distance::DistanceType::L2SqrtUnexpanded}, {2, 5000, 2, 1.0, 10000, raft::distance::DistanceType::Haversine}, {11, 10000, 2, 1.0, 5000, raft::distance::DistanceType::L2SqrtUnexpanded}, {25, 5000, 2, 1.0, 10000, raft::distance::DistanceType::L2SqrtUnexpanded}, {5, 8000, 3, 1.0, 10000, raft::distance::DistanceType::L2SqrtUnexpanded}, {11, 6000, 3, 1.0, 10000, raft::distance::DistanceType::L2SqrtUnexpanded}, {25, 10000, 3, 1.0, 5000, raft::distance::DistanceType::L2SqrtUnexpanded}}; INSTANTIATE_TEST_CASE_P(BallCoverAllKNNTest, BallCoverAllKNNTestF, ::testing::ValuesIn(ballcover_inputs)); INSTANTIATE_TEST_CASE_P(BallCoverKNNQueryTest, BallCoverKNNQueryTestF, ::testing::ValuesIn(ballcover_inputs)); TEST_P(BallCoverAllKNNTestF, Fit) { basicTest(); } TEST_P(BallCoverKNNQueryTestF, Fit) { basicTest(); } } // namespace raft::neighbors::ball_cover
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/neighbors/refine.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "ann_utils.cuh" #include <raft/core/resource/cuda_stream.hpp> #include <raft_internal/neighbors/refine_helper.cuh> #include <raft/core/logger.hpp> #include <raft/core/resources.hpp> #include <raft/distance/distance_types.hpp> #include <raft/neighbors/detail/refine.cuh> #include <raft/neighbors/refine.cuh> #include <raft/spatial/knn/ann.cuh> #include <raft/util/itertools.hpp> #include <rmm/cuda_stream_view.hpp> #include <gtest/gtest.h> #include <vector> namespace raft::neighbors { template <typename DataT, typename DistanceT, typename IdxT> class RefineTest : public ::testing::TestWithParam<RefineInputs<IdxT>> { public: RefineTest() : stream_(resource::get_cuda_stream(handle_)), data(handle_, ::testing::TestWithParam<RefineInputs<IdxT>>::GetParam()) { } protected: public: // tamas remove void testRefine() { std::vector<IdxT> indices(data.p.n_queries * data.p.k); std::vector<DistanceT> distances(data.p.n_queries * data.p.k); if (data.p.host_data) { raft::neighbors::refine<IdxT, DataT, DistanceT, IdxT>(handle_, data.dataset_host.view(), data.queries_host.view(), data.candidates_host.view(), data.refined_indices_host.view(), data.refined_distances_host.view(), data.p.metric); raft::copy(indices.data(), data.refined_indices_host.data_handle(), data.refined_indices_host.size(), stream_); raft::copy(distances.data(), data.refined_distances_host.data_handle(), data.refined_distances_host.size(), stream_); } else { raft::neighbors::refine<IdxT, DataT, DistanceT, IdxT>(handle_, data.dataset.view(), data.queries.view(), data.candidates.view(), data.refined_indices.view(), data.refined_distances.view(), data.p.metric); update_host(distances.data(), data.refined_distances.data_handle(), data.refined_distances.size(), stream_); update_host( indices.data(), data.refined_indices.data_handle(), data.refined_indices.size(), stream_); } resource::sync_stream(handle_); double min_recall = 1; ASSERT_TRUE(raft::neighbors::eval_neighbours(data.true_refined_indices_host, indices, data.true_refined_distances_host, distances, data.p.n_queries, data.p.k, 0.001, min_recall)); } public: raft::resources handle_; rmm::cuda_stream_view stream_; RefineHelper<DataT, DistanceT, IdxT> data; }; const std::vector<RefineInputs<int64_t>> inputs = raft::util::itertools::product<RefineInputs<int64_t>>( {static_cast<int64_t>(137)}, {static_cast<int64_t>(1000)}, {static_cast<int64_t>(16)}, {static_cast<int64_t>(1), static_cast<int64_t>(10), static_cast<int64_t>(33)}, {static_cast<int64_t>(33)}, {raft::distance::DistanceType::L2Expanded, raft::distance::DistanceType::InnerProduct}, {false, true}); typedef RefineTest<float, float, std::int64_t> RefineTestF; TEST_P(RefineTestF, AnnRefine) { this->testRefine(); } INSTANTIATE_TEST_CASE_P(RefineTest, RefineTestF, ::testing::ValuesIn(inputs)); typedef RefineTest<uint8_t, float, std::int64_t> RefineTestF_uint8; TEST_P(RefineTestF_uint8, AnnRefine) { this->testRefine(); } INSTANTIATE_TEST_CASE_P(RefineTest, RefineTestF_uint8, ::testing::ValuesIn(inputs)); typedef RefineTest<int8_t, float, std::int64_t> RefineTestF_int8; TEST_P(RefineTestF_int8, AnnRefine) { this->testRefine(); } INSTANTIATE_TEST_CASE_P(RefineTest, RefineTestF_int8, ::testing::ValuesIn(inputs)); } // namespace raft::neighbors
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/neighbors/ann_cagra.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #undef RAFT_EXPLICIT_INSTANTIATE_ONLY // Search with filter instantiation #include "../test_utils.cuh" #include "ann_utils.cuh" #include <raft/core/resource/cuda_stream.hpp> #include <raft_internal/neighbors/naive_knn.cuh> #include <raft/core/device_mdspan.hpp> #include <raft/core/device_resources.hpp> #include <raft/core/logger.hpp> #include <raft/distance/distance_types.hpp> #include <raft/linalg/add.cuh> #include <raft/neighbors/cagra.cuh> #include <raft/neighbors/cagra_serialize.cuh> #include <raft/neighbors/sample_filter.cuh> #include <raft/random/rng.cuh> #include <raft/util/itertools.hpp> #include <rmm/device_buffer.hpp> #include <gtest/gtest.h> #include <thrust/sequence.h> #include <cstddef> #include <iostream> #include <string> #include <vector> namespace raft::neighbors::cagra { namespace { /* A filter that excludes all indices below `offset`. */ struct test_cagra_sample_filter { static constexpr unsigned offset = 300; inline _RAFT_HOST_DEVICE auto operator()( // query index const uint32_t query_ix, // the index of the current sample inside the current inverted list const uint32_t sample_ix) const { return sample_ix >= offset; } }; // For sort_knn_graph test template <typename IdxT> void RandomSuffle(raft::host_matrix_view<IdxT, int64_t> index) { for (IdxT i = 0; i < index.extent(0); i++) { uint64_t rand = i; IdxT* const row_ptr = index.data_handle() + i * index.extent(1); for (unsigned j = 0; j < index.extent(1); j++) { // Swap two indices at random rand = raft::neighbors::cagra::detail::device::xorshift64(rand); const auto i0 = rand % index.extent(1); rand = raft::neighbors::cagra::detail::device::xorshift64(rand); const auto i1 = rand % index.extent(1); const auto tmp = row_ptr[i0]; row_ptr[i0] = row_ptr[i1]; row_ptr[i1] = tmp; } } } template <typename DistanceT, typename DatatT, typename IdxT> testing::AssertionResult CheckOrder(raft::host_matrix_view<IdxT, int64_t> index_test, raft::host_matrix_view<DatatT, int64_t> dataset) { for (IdxT i = 0; i < index_test.extent(0); i++) { const DatatT* const base_vec = dataset.data_handle() + i * dataset.extent(1); const IdxT* const index_row = index_test.data_handle() + i * index_test.extent(1); DistanceT prev_distance = 0; for (unsigned j = 0; j < index_test.extent(1) - 1; j++) { const DatatT* const target_vec = dataset.data_handle() + index_row[j] * dataset.extent(1); DistanceT distance = 0; for (unsigned l = 0; l < dataset.extent(1); l++) { const auto diff = static_cast<DistanceT>(target_vec[l]) - static_cast<DistanceT>(base_vec[l]); distance += diff * diff; } if (prev_distance > distance) { return testing::AssertionFailure() << "Wrong index order (row = " << i << ", neighbor_id = " << j << "). (distance[neighbor_id-1] = " << prev_distance << "should be larger than distance[neighbor_id] = " << distance << ")"; } prev_distance = distance; } } return testing::AssertionSuccess(); } // Generate dataset to ensure no rounding error occurs in the norm computation of any two vectors. // When testing the CAGRA index sorting function, rounding errors can affect the norm and alter the // order of the index. To ensure the accuracy of the test, we utilize the dataset. The generation // method is based on the error-free transformation (EFT) method. RAFT_KERNEL GenerateRoundingErrorFreeDataset_kernel(float* const ptr, const uint32_t size, const uint32_t resolution) { const auto tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= size) { return; } const float u32 = *reinterpret_cast<const uint32_t*>(ptr + tid); ptr[tid] = u32 / resolution; } void GenerateRoundingErrorFreeDataset(const raft::resources& handle, float* const ptr, const uint32_t n_row, const uint32_t dim, raft::random::RngState& rng) { auto cuda_stream = resource::get_cuda_stream(handle); const uint32_t size = n_row * dim; const uint32_t block_size = 256; const uint32_t grid_size = (size + block_size - 1) / block_size; const uint32_t resolution = 1u << static_cast<unsigned>(std::floor((24 - std::log2(dim)) / 2)); raft::random::uniformInt(handle, rng, reinterpret_cast<uint32_t*>(ptr), size, 0u, resolution - 1); GenerateRoundingErrorFreeDataset_kernel<<<grid_size, block_size, 0, cuda_stream>>>( ptr, size, resolution); } } // namespace struct AnnCagraInputs { int n_queries; int n_rows; int dim; int k; graph_build_algo build_algo; search_algo algo; int max_queries; int team_size; int itopk_size; int search_width; raft::distance::DistanceType metric; bool host_dataset; bool include_serialized_dataset; // std::optional<double> double min_recall; // = std::nullopt; }; inline ::std::ostream& operator<<(::std::ostream& os, const AnnCagraInputs& p) { std::vector<std::string> algo = {"single-cta", "multi_cta", "multi_kernel", "auto"}; std::vector<std::string> build_algo = {"IVF_PQ", "NN_DESCENT"}; os << "{n_queries=" << p.n_queries << ", dataset shape=" << p.n_rows << "x" << p.dim << ", k=" << p.k << ", " << algo.at((int)p.algo) << ", max_queries=" << p.max_queries << ", itopk_size=" << p.itopk_size << ", search_width=" << p.search_width << ", metric=" << static_cast<int>(p.metric) << (p.host_dataset ? ", host" : ", device") << ", build_algo=" << build_algo.at((int)p.build_algo) << '}' << std::endl; return os; } template <typename DistanceT, typename DataT, typename IdxT> class AnnCagraTest : public ::testing::TestWithParam<AnnCagraInputs> { public: AnnCagraTest() : stream_(resource::get_cuda_stream(handle_)), ps(::testing::TestWithParam<AnnCagraInputs>::GetParam()), database(0, stream_), search_queries(0, stream_) { } protected: void testCagra() { size_t queries_size = ps.n_queries * ps.k; std::vector<IdxT> indices_Cagra(queries_size); std::vector<IdxT> indices_naive(queries_size); std::vector<DistanceT> distances_Cagra(queries_size); std::vector<DistanceT> distances_naive(queries_size); { rmm::device_uvector<DistanceT> distances_naive_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_naive_dev(queries_size, stream_); naive_knn<DistanceT, DataT, IdxT>(handle_, distances_naive_dev.data(), indices_naive_dev.data(), search_queries.data(), database.data(), ps.n_queries, ps.n_rows, ps.dim, ps.k, ps.metric); update_host(distances_naive.data(), distances_naive_dev.data(), queries_size, stream_); update_host(indices_naive.data(), indices_naive_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } { rmm::device_uvector<DistanceT> distances_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_dev(queries_size, stream_); { cagra::index_params index_params; index_params.metric = ps.metric; // Note: currently ony the cagra::index_params metric is // not used for knn_graph building. index_params.build_algo = ps.build_algo; cagra::search_params search_params; search_params.algo = ps.algo; search_params.max_queries = ps.max_queries; search_params.team_size = ps.team_size; auto database_view = raft::make_device_matrix_view<const DataT, int64_t>( (const DataT*)database.data(), ps.n_rows, ps.dim); { cagra::index<DataT, IdxT> index(handle_); if (ps.host_dataset) { auto database_host = raft::make_host_matrix<DataT, int64_t>(ps.n_rows, ps.dim); raft::copy(database_host.data_handle(), database.data(), database.size(), stream_); auto database_host_view = raft::make_host_matrix_view<const DataT, int64_t>( (const DataT*)database_host.data_handle(), ps.n_rows, ps.dim); index = cagra::build<DataT, IdxT>(handle_, index_params, database_host_view); } else { index = cagra::build<DataT, IdxT>(handle_, index_params, database_view); }; cagra::serialize(handle_, "cagra_index", index, ps.include_serialized_dataset); } auto index = cagra::deserialize<DataT, IdxT>(handle_, "cagra_index"); if (!ps.include_serialized_dataset) { index.update_dataset(handle_, database_view); } auto search_queries_view = raft::make_device_matrix_view<const DataT, int64_t>( search_queries.data(), ps.n_queries, ps.dim); auto indices_out_view = raft::make_device_matrix_view<IdxT, int64_t>(indices_dev.data(), ps.n_queries, ps.k); auto dists_out_view = raft::make_device_matrix_view<DistanceT, int64_t>( distances_dev.data(), ps.n_queries, ps.k); cagra::search( handle_, search_params, index, search_queries_view, indices_out_view, dists_out_view); update_host(distances_Cagra.data(), distances_dev.data(), queries_size, stream_); update_host(indices_Cagra.data(), indices_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } // for (int i = 0; i < min(ps.n_queries, 10); i++) { // // std::cout << "query " << i << std::end; // print_vector("T", indices_naive.data() + i * ps.k, ps.k, std::cout); // print_vector("C", indices_Cagra.data() + i * ps.k, ps.k, std::cout); // print_vector("T", distances_naive.data() + i * ps.k, ps.k, std::cout); // print_vector("C", distances_Cagra.data() + i * ps.k, ps.k, std::cout); // } double min_recall = ps.min_recall; EXPECT_TRUE(eval_neighbours(indices_naive, indices_Cagra, distances_naive, distances_Cagra, ps.n_queries, ps.k, 0.003, min_recall)); EXPECT_TRUE(eval_distances(handle_, database.data(), search_queries.data(), indices_dev.data(), distances_dev.data(), ps.n_rows, ps.dim, ps.n_queries, ps.k, ps.metric, 1.0e-4)); } } void SetUp() override { database.resize(((size_t)ps.n_rows) * ps.dim, stream_); search_queries.resize(ps.n_queries * ps.dim, stream_); raft::random::RngState r(1234ULL); if constexpr (std::is_same<DataT, float>{}) { raft::random::normal(handle_, r, database.data(), ps.n_rows * ps.dim, DataT(0.1), DataT(2.0)); raft::random::normal( handle_, r, search_queries.data(), ps.n_queries * ps.dim, DataT(0.1), DataT(2.0)); } else { raft::random::uniformInt( handle_, r, database.data(), ps.n_rows * ps.dim, DataT(1), DataT(20)); raft::random::uniformInt( handle_, r, search_queries.data(), ps.n_queries * ps.dim, DataT(1), DataT(20)); } resource::sync_stream(handle_); } void TearDown() override { resource::sync_stream(handle_); database.resize(0, stream_); search_queries.resize(0, stream_); } private: raft::resources handle_; rmm::cuda_stream_view stream_; AnnCagraInputs ps; rmm::device_uvector<DataT> database; rmm::device_uvector<DataT> search_queries; }; template <typename DistanceT, typename DataT, typename IdxT> class AnnCagraSortTest : public ::testing::TestWithParam<AnnCagraInputs> { public: AnnCagraSortTest() : ps(::testing::TestWithParam<AnnCagraInputs>::GetParam()), database(0, handle_.get_stream()) { } protected: void testCagraSort() { { // Step 1: Build a sorted KNN graph by CAGRA knn build auto database_view = raft::make_device_matrix_view<const DataT, int64_t>( (const DataT*)database.data(), ps.n_rows, ps.dim); auto database_host = raft::make_host_matrix<DataT, int64_t>(ps.n_rows, ps.dim); raft::copy( database_host.data_handle(), database.data(), database.size(), handle_.get_stream()); auto database_host_view = raft::make_host_matrix_view<const DataT, int64_t>( (const DataT*)database_host.data_handle(), ps.n_rows, ps.dim); cagra::index_params index_params; auto knn_graph = raft::make_host_matrix<IdxT, int64_t>(ps.n_rows, index_params.intermediate_graph_degree); if (ps.build_algo == graph_build_algo::IVF_PQ) { if (ps.host_dataset) { cagra::build_knn_graph<DataT, IdxT>(handle_, database_host_view, knn_graph.view()); } else { cagra::build_knn_graph<DataT, IdxT>(handle_, database_view, knn_graph.view()); } } else { auto nn_descent_idx_params = experimental::nn_descent::index_params{}; nn_descent_idx_params.graph_degree = index_params.intermediate_graph_degree; nn_descent_idx_params.intermediate_graph_degree = index_params.intermediate_graph_degree; if (ps.host_dataset) { cagra::build_knn_graph<DataT, IdxT>( handle_, database_host_view, knn_graph.view(), nn_descent_idx_params); } else { cagra::build_knn_graph<DataT, IdxT>( handle_, database_host_view, knn_graph.view(), nn_descent_idx_params); } } handle_.sync_stream(); ASSERT_TRUE(CheckOrder<DistanceT>(knn_graph.view(), database_host.view())); RandomSuffle(knn_graph.view()); cagra::sort_knn_graph(handle_, database_view, knn_graph.view()); handle_.sync_stream(); ASSERT_TRUE(CheckOrder<DistanceT>(knn_graph.view(), database_host.view())); } } void SetUp() override { database.resize(((size_t)ps.n_rows) * ps.dim, handle_.get_stream()); raft::random::RngState r(1234ULL); if constexpr (std::is_same<DataT, float>{}) { GenerateRoundingErrorFreeDataset(handle_, database.data(), ps.n_rows, ps.dim, r); } else { raft::random::uniformInt( handle_, r, database.data(), ps.n_rows * ps.dim, DataT(1), DataT(20)); } handle_.sync_stream(); } void TearDown() override { handle_.sync_stream(); database.resize(0, handle_.get_stream()); } private: raft::device_resources handle_; AnnCagraInputs ps; rmm::device_uvector<DataT> database; }; template <typename DistanceT, typename DataT, typename IdxT> class AnnCagraFilterTest : public ::testing::TestWithParam<AnnCagraInputs> { public: AnnCagraFilterTest() : stream_(resource::get_cuda_stream(handle_)), ps(::testing::TestWithParam<AnnCagraInputs>::GetParam()), database(0, stream_), search_queries(0, stream_) { } protected: void testCagraFilter() { size_t queries_size = ps.n_queries * ps.k; std::vector<IdxT> indices_Cagra(queries_size); std::vector<IdxT> indices_naive(queries_size); std::vector<DistanceT> distances_Cagra(queries_size); std::vector<DistanceT> distances_naive(queries_size); { rmm::device_uvector<DistanceT> distances_naive_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_naive_dev(queries_size, stream_); auto* database_filtered_ptr = database.data() + test_cagra_sample_filter::offset * ps.dim; naive_knn<DistanceT, DataT, IdxT>(handle_, distances_naive_dev.data(), indices_naive_dev.data(), search_queries.data(), database_filtered_ptr, ps.n_queries, ps.n_rows - test_cagra_sample_filter::offset, ps.dim, ps.k, ps.metric); raft::linalg::addScalar(indices_naive_dev.data(), indices_naive_dev.data(), IdxT(test_cagra_sample_filter::offset), queries_size, stream_); update_host(distances_naive.data(), distances_naive_dev.data(), queries_size, stream_); update_host(indices_naive.data(), indices_naive_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } { rmm::device_uvector<DistanceT> distances_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_dev(queries_size, stream_); { cagra::index_params index_params; index_params.metric = ps.metric; // Note: currently ony the cagra::index_params metric is // not used for knn_graph building. index_params.nn_descent_niter = 50; cagra::search_params search_params; search_params.algo = ps.algo; search_params.max_queries = ps.max_queries; search_params.team_size = ps.team_size; search_params.hashmap_mode = cagra::hash_mode::HASH; auto database_view = raft::make_device_matrix_view<const DataT, int64_t>( (const DataT*)database.data(), ps.n_rows, ps.dim); cagra::index<DataT, IdxT> index(handle_); if (ps.host_dataset) { auto database_host = raft::make_host_matrix<DataT, int64_t>(ps.n_rows, ps.dim); raft::copy(database_host.data_handle(), database.data(), database.size(), stream_); auto database_host_view = raft::make_host_matrix_view<const DataT, int64_t>( (const DataT*)database_host.data_handle(), ps.n_rows, ps.dim); index = cagra::build<DataT, IdxT>(handle_, index_params, database_host_view); } else { index = cagra::build<DataT, IdxT>(handle_, index_params, database_view); } if (!ps.include_serialized_dataset) { index.update_dataset(handle_, database_view); } auto search_queries_view = raft::make_device_matrix_view<const DataT, int64_t>( search_queries.data(), ps.n_queries, ps.dim); auto indices_out_view = raft::make_device_matrix_view<IdxT, int64_t>(indices_dev.data(), ps.n_queries, ps.k); auto dists_out_view = raft::make_device_matrix_view<DistanceT, int64_t>( distances_dev.data(), ps.n_queries, ps.k); cagra::search_with_filtering(handle_, search_params, index, search_queries_view, indices_out_view, dists_out_view, test_cagra_sample_filter()); update_host(distances_Cagra.data(), distances_dev.data(), queries_size, stream_); update_host(indices_Cagra.data(), indices_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } // Test filter bool unacceptable_node = false; for (int q = 0; q < ps.n_queries; q++) { for (int i = 0; i < ps.k; i++) { const auto n = indices_Cagra[q * ps.k + i]; unacceptable_node = unacceptable_node | !test_cagra_sample_filter()(q, n); } } EXPECT_FALSE(unacceptable_node); double min_recall = ps.min_recall; EXPECT_TRUE(eval_neighbours(indices_naive, indices_Cagra, distances_naive, distances_Cagra, ps.n_queries, ps.k, 0.003, min_recall)); EXPECT_TRUE(eval_distances(handle_, database.data(), search_queries.data(), indices_dev.data(), distances_dev.data(), ps.n_rows, ps.dim, ps.n_queries, ps.k, ps.metric, 1.0e-4)); } } void testCagraRemoved() { size_t queries_size = ps.n_queries * ps.k; std::vector<IdxT> indices_Cagra(queries_size); std::vector<IdxT> indices_naive(queries_size); std::vector<DistanceT> distances_Cagra(queries_size); std::vector<DistanceT> distances_naive(queries_size); { rmm::device_uvector<DistanceT> distances_naive_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_naive_dev(queries_size, stream_); auto* database_filtered_ptr = database.data() + test_cagra_sample_filter::offset * ps.dim; naive_knn<DistanceT, DataT, IdxT>(handle_, distances_naive_dev.data(), indices_naive_dev.data(), search_queries.data(), database_filtered_ptr, ps.n_queries, ps.n_rows - test_cagra_sample_filter::offset, ps.dim, ps.k, ps.metric); raft::linalg::addScalar(indices_naive_dev.data(), indices_naive_dev.data(), IdxT(test_cagra_sample_filter::offset), queries_size, stream_); update_host(distances_naive.data(), distances_naive_dev.data(), queries_size, stream_); update_host(indices_naive.data(), indices_naive_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } { rmm::device_uvector<DistanceT> distances_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_dev(queries_size, stream_); { cagra::index_params index_params; index_params.metric = ps.metric; // Note: currently ony the cagra::index_params metric is // not used for knn_graph building. index_params.nn_descent_niter = 50; cagra::search_params search_params; search_params.algo = ps.algo; search_params.max_queries = ps.max_queries; search_params.team_size = ps.team_size; search_params.hashmap_mode = cagra::hash_mode::HASH; auto database_view = raft::make_device_matrix_view<const DataT, int64_t>( (const DataT*)database.data(), ps.n_rows, ps.dim); cagra::index<DataT, IdxT> index(handle_); if (ps.host_dataset) { auto database_host = raft::make_host_matrix<DataT, int64_t>(ps.n_rows, ps.dim); raft::copy(database_host.data_handle(), database.data(), database.size(), stream_); auto database_host_view = raft::make_host_matrix_view<const DataT, int64_t>( (const DataT*)database_host.data_handle(), ps.n_rows, ps.dim); index = cagra::build<DataT, IdxT>(handle_, index_params, database_host_view); } else { index = cagra::build<DataT, IdxT>(handle_, index_params, database_view); } if (!ps.include_serialized_dataset) { index.update_dataset(handle_, database_view); } auto search_queries_view = raft::make_device_matrix_view<const DataT, int64_t>( search_queries.data(), ps.n_queries, ps.dim); auto indices_out_view = raft::make_device_matrix_view<IdxT, int64_t>(indices_dev.data(), ps.n_queries, ps.k); auto dists_out_view = raft::make_device_matrix_view<DistanceT, int64_t>( distances_dev.data(), ps.n_queries, ps.k); auto removed_indices = raft::make_device_vector<IdxT, int64_t>(handle_, test_cagra_sample_filter::offset); thrust::sequence( resource::get_thrust_policy(handle_), thrust::device_pointer_cast(removed_indices.data_handle()), thrust::device_pointer_cast(removed_indices.data_handle() + removed_indices.extent(0))); resource::sync_stream(handle_); raft::core::bitset<std::uint32_t, IdxT> removed_indices_bitset( handle_, removed_indices.view(), ps.n_rows); cagra::search_with_filtering( handle_, search_params, index, search_queries_view, indices_out_view, dists_out_view, raft::neighbors::filtering::bitset_filter(removed_indices_bitset.view())); update_host(distances_Cagra.data(), distances_dev.data(), queries_size, stream_); update_host(indices_Cagra.data(), indices_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } double min_recall = ps.min_recall; EXPECT_TRUE(eval_neighbours(indices_naive, indices_Cagra, distances_naive, distances_Cagra, ps.n_queries, ps.k, 0.003, min_recall)); EXPECT_TRUE(eval_distances(handle_, database.data(), search_queries.data(), indices_dev.data(), distances_dev.data(), ps.n_rows, ps.dim, ps.n_queries, ps.k, ps.metric, 1.0e-4)); } } void SetUp() override { database.resize(((size_t)ps.n_rows) * ps.dim, stream_); search_queries.resize(ps.n_queries * ps.dim, stream_); raft::random::RngState r(1234ULL); if constexpr (std::is_same<DataT, float>{}) { raft::random::normal(handle_, r, database.data(), ps.n_rows * ps.dim, DataT(0.1), DataT(2.0)); raft::random::normal( handle_, r, search_queries.data(), ps.n_queries * ps.dim, DataT(0.1), DataT(2.0)); } else { raft::random::uniformInt( handle_, r, database.data(), ps.n_rows * ps.dim, DataT(1), DataT(20)); raft::random::uniformInt( handle_, r, search_queries.data(), ps.n_queries * ps.dim, DataT(1), DataT(20)); } resource::sync_stream(handle_); } void TearDown() override { resource::sync_stream(handle_); database.resize(0, stream_); search_queries.resize(0, stream_); } private: raft::resources handle_; rmm::cuda_stream_view stream_; AnnCagraInputs ps; rmm::device_uvector<DataT> database; rmm::device_uvector<DataT> search_queries; }; inline std::vector<AnnCagraInputs> generate_inputs() { // TODO(tfeher): test MULTI_CTA kernel with search_width > 1 to allow multiple CTA per queries std::vector<AnnCagraInputs> inputs = raft::util::itertools::product<AnnCagraInputs>( {100}, {1000}, {1, 8, 17}, {1, 16}, // k {graph_build_algo::IVF_PQ, graph_build_algo::NN_DESCENT}, {search_algo::SINGLE_CTA, search_algo::MULTI_CTA, search_algo::MULTI_KERNEL}, {0, 1, 10, 100}, // query size {0}, {256}, {1}, {raft::distance::DistanceType::L2Expanded}, {false}, {true}, {0.995}); auto inputs2 = raft::util::itertools::product<AnnCagraInputs>( {100}, {1000}, {1, 3, 5, 7, 8, 17, 64, 128, 137, 192, 256, 512, 619, 1024}, // dim {16}, // k {graph_build_algo::IVF_PQ, graph_build_algo::NN_DESCENT}, {search_algo::AUTO}, {10}, {0}, {64}, {1}, {raft::distance::DistanceType::L2Expanded}, {false}, {true}, {0.995}); inputs.insert(inputs.end(), inputs2.begin(), inputs2.end()); inputs2 = raft::util::itertools::product<AnnCagraInputs>( {100}, {1000}, {64}, {16}, {graph_build_algo::IVF_PQ, graph_build_algo::NN_DESCENT}, {search_algo::AUTO}, {10}, {0, 4, 8, 16, 32}, // team_size {64}, {1}, {raft::distance::DistanceType::L2Expanded}, {false}, {false}, {0.995}); inputs.insert(inputs.end(), inputs2.begin(), inputs2.end()); inputs2 = raft::util::itertools::product<AnnCagraInputs>( {100}, {1000}, {64}, {16}, {graph_build_algo::IVF_PQ, graph_build_algo::NN_DESCENT}, {search_algo::AUTO}, {10}, {0}, // team_size {32, 64, 128, 256, 512, 768}, {1}, {raft::distance::DistanceType::L2Expanded}, {false}, {true}, {0.995}); inputs.insert(inputs.end(), inputs2.begin(), inputs2.end()); inputs2 = raft::util::itertools::product<AnnCagraInputs>( {100}, {10000, 20000}, {32}, {10}, {graph_build_algo::IVF_PQ, graph_build_algo::NN_DESCENT}, {search_algo::AUTO}, {10}, {0}, // team_size {64}, {1}, {raft::distance::DistanceType::L2Expanded}, {false, true}, {false}, {0.995}); inputs.insert(inputs.end(), inputs2.begin(), inputs2.end()); return inputs; } const std::vector<AnnCagraInputs> inputs = generate_inputs(); } // namespace raft::neighbors::cagra
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/neighbors/ann_ivf_flat.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "../test_utils.cuh" #include "ann_utils.cuh" #include <raft/core/device_mdarray.hpp> #include <raft/core/host_mdarray.hpp> #include <raft/core/mdspan.hpp> #include <raft/core/mdspan_types.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/thrust_policy.hpp> #include <raft/linalg/map.cuh> #include <raft/neighbors/ivf_flat_types.hpp> #include <raft/neighbors/ivf_list.hpp> #include <raft/neighbors/sample_filter.cuh> #include <raft/util/cudart_utils.hpp> #include <raft/util/fast_int_div.cuh> #include <thrust/functional.h> #include <raft_internal/neighbors/naive_knn.cuh> #include <raft/core/device_mdspan.hpp> #include <raft/core/logger.hpp> #include <raft/distance/distance_types.hpp> #include <raft/matrix/gather.cuh> #include <raft/neighbors/ivf_flat.cuh> #include <raft/neighbors/ivf_flat_helpers.cuh> #include <raft/random/rng.cuh> #include <raft/spatial/knn/ann.cuh> #include <raft/spatial/knn/knn.cuh> #include <raft/stats/mean.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <gtest/gtest.h> #include <rmm/device_uvector.hpp> #include <thrust/sequence.h> #include <cstddef> #include <iostream> #include <vector> namespace raft::neighbors::ivf_flat { struct test_ivf_sample_filter { static constexpr unsigned offset = 300; }; template <typename IdxT> struct AnnIvfFlatInputs { IdxT num_queries; IdxT num_db_vecs; IdxT dim; IdxT k; IdxT nprobe; IdxT nlist; raft::distance::DistanceType metric; bool adaptive_centers; }; template <typename IdxT> ::std::ostream& operator<<(::std::ostream& os, const AnnIvfFlatInputs<IdxT>& p) { os << "{ " << p.num_queries << ", " << p.num_db_vecs << ", " << p.dim << ", " << p.k << ", " << p.nprobe << ", " << p.nlist << ", " << static_cast<int>(p.metric) << ", " << p.adaptive_centers << '}' << std::endl; return os; } template <typename T, typename DataT, typename IdxT> class AnnIVFFlatTest : public ::testing::TestWithParam<AnnIvfFlatInputs<IdxT>> { public: AnnIVFFlatTest() : stream_(resource::get_cuda_stream(handle_)), ps(::testing::TestWithParam<AnnIvfFlatInputs<IdxT>>::GetParam()), database(0, stream_), search_queries(0, stream_) { } void testIVFFlat() { size_t queries_size = ps.num_queries * ps.k; std::vector<IdxT> indices_ivfflat(queries_size); std::vector<IdxT> indices_naive(queries_size); std::vector<T> distances_ivfflat(queries_size); std::vector<T> distances_naive(queries_size); { rmm::device_uvector<T> distances_naive_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_naive_dev(queries_size, stream_); naive_knn<T, DataT, IdxT>(handle_, distances_naive_dev.data(), indices_naive_dev.data(), search_queries.data(), database.data(), ps.num_queries, ps.num_db_vecs, ps.dim, ps.k, ps.metric); update_host(distances_naive.data(), distances_naive_dev.data(), queries_size, stream_); update_host(indices_naive.data(), indices_naive_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } { // unless something is really wrong with clustering, this could serve as a lower bound on // recall double min_recall = static_cast<double>(ps.nprobe) / static_cast<double>(ps.nlist); rmm::device_uvector<T> distances_ivfflat_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_ivfflat_dev(queries_size, stream_); { // legacy interface raft::spatial::knn::IVFFlatParam ivfParams; ivfParams.nprobe = ps.nprobe; ivfParams.nlist = ps.nlist; raft::spatial::knn::knnIndex index; approx_knn_build_index(handle_, &index, dynamic_cast<raft::spatial::knn::knnIndexParam*>(&ivfParams), ps.metric, (IdxT)0, database.data(), ps.num_db_vecs, ps.dim); resource::sync_stream(handle_); approx_knn_search(handle_, distances_ivfflat_dev.data(), indices_ivfflat_dev.data(), &index, ps.k, search_queries.data(), ps.num_queries); update_host(distances_ivfflat.data(), distances_ivfflat_dev.data(), queries_size, stream_); update_host(indices_ivfflat.data(), indices_ivfflat_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } ASSERT_TRUE(eval_neighbours(indices_naive, indices_ivfflat, distances_naive, distances_ivfflat, ps.num_queries, ps.k, 0.001, min_recall)); { ivf_flat::index_params index_params; ivf_flat::search_params search_params; index_params.n_lists = ps.nlist; index_params.metric = ps.metric; index_params.adaptive_centers = ps.adaptive_centers; search_params.n_probes = ps.nprobe; index_params.add_data_on_build = false; index_params.kmeans_trainset_fraction = 0.5; index_params.metric_arg = 0; auto database_view = raft::make_device_matrix_view<const DataT, IdxT>( (const DataT*)database.data(), ps.num_db_vecs, ps.dim); auto idx = ivf_flat::build(handle_, index_params, database_view); rmm::device_uvector<IdxT> vector_indices(ps.num_db_vecs, stream_); thrust::sequence(resource::get_thrust_policy(handle_), thrust::device_pointer_cast(vector_indices.data()), thrust::device_pointer_cast(vector_indices.data() + ps.num_db_vecs)); resource::sync_stream(handle_); IdxT half_of_data = ps.num_db_vecs / 2; auto half_of_data_view = raft::make_device_matrix_view<const DataT, IdxT>( (const DataT*)database.data(), half_of_data, ps.dim); const std::optional<raft::device_vector_view<const IdxT, IdxT>> no_opt = std::nullopt; index<DataT, IdxT> index_2 = ivf_flat::extend(handle_, half_of_data_view, no_opt, idx); auto new_half_of_data_view = raft::make_device_matrix_view<const DataT, IdxT>( database.data() + half_of_data * ps.dim, IdxT(ps.num_db_vecs) - half_of_data, ps.dim); auto new_half_of_data_indices_view = raft::make_device_vector_view<const IdxT, IdxT>( vector_indices.data() + half_of_data, IdxT(ps.num_db_vecs) - half_of_data); ivf_flat::extend(handle_, new_half_of_data_view, std::make_optional<raft::device_vector_view<const IdxT, IdxT>>( new_half_of_data_indices_view), &index_2); auto search_queries_view = raft::make_device_matrix_view<const DataT, IdxT>( search_queries.data(), ps.num_queries, ps.dim); auto indices_out_view = raft::make_device_matrix_view<IdxT, IdxT>( indices_ivfflat_dev.data(), ps.num_queries, ps.k); auto dists_out_view = raft::make_device_matrix_view<T, IdxT>( distances_ivfflat_dev.data(), ps.num_queries, ps.k); ivf_flat::detail::serialize(handle_, "ivf_flat_index", index_2); auto index_loaded = ivf_flat::detail::deserialize<DataT, IdxT>(handle_, "ivf_flat_index"); ASSERT_EQ(index_2.size(), index_loaded.size()); ivf_flat::search(handle_, search_params, index_loaded, search_queries_view, indices_out_view, dists_out_view); update_host(distances_ivfflat.data(), distances_ivfflat_dev.data(), queries_size, stream_); update_host(indices_ivfflat.data(), indices_ivfflat_dev.data(), queries_size, stream_); resource::sync_stream(handle_); // Test the centroid invariants if (index_2.adaptive_centers()) { // The centers must be up-to-date with the corresponding data std::vector<uint32_t> list_sizes(index_2.n_lists()); std::vector<IdxT*> list_indices(index_2.n_lists()); rmm::device_uvector<float> centroid(ps.dim, stream_); raft::copy( list_sizes.data(), index_2.list_sizes().data_handle(), index_2.n_lists(), stream_); raft::copy( list_indices.data(), index_2.inds_ptrs().data_handle(), index_2.n_lists(), stream_); resource::sync_stream(handle_); for (uint32_t l = 0; l < index_2.n_lists(); l++) { if (list_sizes[l] == 0) continue; rmm::device_uvector<float> cluster_data(list_sizes[l] * ps.dim, stream_); raft::spatial::knn::detail::utils::copy_selected<float>((IdxT)list_sizes[l], (IdxT)ps.dim, database.data(), list_indices[l], (IdxT)ps.dim, cluster_data.data(), (IdxT)ps.dim, stream_); raft::stats::mean<float, uint32_t>( centroid.data(), cluster_data.data(), ps.dim, list_sizes[l], false, true, stream_); ASSERT_TRUE(raft::devArrMatch(index_2.centers().data_handle() + ps.dim * l, centroid.data(), ps.dim, raft::CompareApprox<float>(0.001), stream_)); } } else { // The centers must be immutable ASSERT_TRUE(raft::devArrMatch(index_2.centers().data_handle(), idx.centers().data_handle(), index_2.centers().size(), raft::Compare<float>(), stream_)); } } ASSERT_TRUE(eval_neighbours(indices_naive, indices_ivfflat, distances_naive, distances_ivfflat, ps.num_queries, ps.k, 0.001, min_recall)); } } void testPacker() { ivf_flat::index_params index_params; ivf_flat::search_params search_params; index_params.n_lists = ps.nlist; index_params.metric = ps.metric; index_params.adaptive_centers = false; search_params.n_probes = ps.nprobe; index_params.add_data_on_build = false; index_params.kmeans_trainset_fraction = 1.0; index_params.metric_arg = 0; auto database_view = raft::make_device_matrix_view<const DataT, IdxT>( (const DataT*)database.data(), ps.num_db_vecs, ps.dim); auto idx = ivf_flat::build(handle_, index_params, database_view); const std::optional<raft::device_vector_view<const IdxT, IdxT>> no_opt = std::nullopt; index<DataT, IdxT> extend_index = ivf_flat::extend(handle_, database_view, no_opt, idx); auto list_sizes = raft::make_host_vector<uint32_t>(idx.n_lists()); update_host(list_sizes.data_handle(), extend_index.list_sizes().data_handle(), extend_index.n_lists(), stream_); resource::sync_stream(handle_); auto& lists = idx.lists(); // conservative memory allocation for codepacking auto list_device_spec = list_spec<uint32_t, DataT, IdxT>{idx.dim(), false}; for (uint32_t label = 0; label < idx.n_lists(); label++) { uint32_t list_size = list_sizes.data_handle()[label]; ivf::resize_list(handle_, lists[label], list_device_spec, list_size, 0); } idx.recompute_internal_state(handle_); using interleaved_group = Pow2<kIndexGroupSize>; for (uint32_t label = 0; label < idx.n_lists(); label++) { uint32_t list_size = list_sizes.data_handle()[label]; if (list_size > 0) { uint32_t padded_list_size = interleaved_group::roundUp(list_size); uint32_t n_elems = padded_list_size * idx.dim(); auto list_data = lists[label]->data; auto list_inds = extend_index.lists()[label]->indices; // fetch the flat codes auto flat_codes = make_device_matrix<DataT, uint32_t>(handle_, list_size, idx.dim()); matrix::gather( handle_, make_device_matrix_view<const DataT, uint32_t>( (const DataT*)database.data(), static_cast<uint32_t>(ps.num_db_vecs), idx.dim()), make_device_vector_view<const IdxT, uint32_t>((const IdxT*)list_inds.data_handle(), list_size), flat_codes.view()); helpers::codepacker::pack<DataT, IdxT>( handle_, make_const_mdspan(flat_codes.view()), idx.veclen(), 0, list_data.view()); { auto mask = make_device_vector<bool>(handle_, n_elems); linalg::map_offset(handle_, mask.view(), [dim = idx.dim(), list_size, padded_list_size, chunk_size = util::FastIntDiv(idx.veclen())] __device__(auto i) { uint32_t max_group_offset = interleaved_group::roundDown(list_size); if (i < max_group_offset * dim) { return true; } uint32_t surplus = (i - max_group_offset * dim); uint32_t ingroup_id = interleaved_group::mod(surplus / chunk_size); return ingroup_id < (list_size - max_group_offset); }); // ensure that the correct number of indices are masked out ASSERT_TRUE(thrust::reduce(resource::get_thrust_policy(handle_), mask.data_handle(), mask.data_handle() + n_elems, 0) == list_size * ps.dim); auto packed_list_data = make_device_vector<DataT, uint32_t>(handle_, n_elems); linalg::map_offset(handle_, packed_list_data.view(), [mask = mask.data_handle(), list_data = list_data.data_handle()] __device__(uint32_t i) { if (mask[i]) return list_data[i]; return DataT{0}; }); auto extend_data = extend_index.lists()[label]->data; auto extend_data_filtered = make_device_vector<DataT, uint32_t>(handle_, n_elems); linalg::map_offset(handle_, extend_data_filtered.view(), [mask = mask.data_handle(), extend_data = extend_data.data_handle()] __device__(uint32_t i) { if (mask[i]) return extend_data[i]; return DataT{0}; }); ASSERT_TRUE(raft::devArrMatch(packed_list_data.data_handle(), extend_data_filtered.data_handle(), n_elems, raft::Compare<DataT>(), stream_)); } auto unpacked_flat_codes = make_device_matrix<DataT, uint32_t>(handle_, list_size, idx.dim()); helpers::codepacker::unpack<DataT, IdxT>( handle_, list_data.view(), idx.veclen(), 0, unpacked_flat_codes.view()); ASSERT_TRUE(raft::devArrMatch(flat_codes.data_handle(), unpacked_flat_codes.data_handle(), list_size * ps.dim, raft::Compare<DataT>(), stream_)); } } } void testFilter() { size_t queries_size = ps.num_queries * ps.k; std::vector<IdxT> indices_ivfflat(queries_size); std::vector<IdxT> indices_naive(queries_size); std::vector<T> distances_ivfflat(queries_size); std::vector<T> distances_naive(queries_size); { rmm::device_uvector<T> distances_naive_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_naive_dev(queries_size, stream_); auto* database_filtered_ptr = database.data() + test_ivf_sample_filter::offset * ps.dim; naive_knn<T, DataT, IdxT>(handle_, distances_naive_dev.data(), indices_naive_dev.data(), search_queries.data(), database_filtered_ptr, ps.num_queries, ps.num_db_vecs - test_ivf_sample_filter::offset, ps.dim, ps.k, ps.metric); raft::linalg::addScalar(indices_naive_dev.data(), indices_naive_dev.data(), IdxT(test_ivf_sample_filter::offset), queries_size, stream_); update_host(distances_naive.data(), distances_naive_dev.data(), queries_size, stream_); update_host(indices_naive.data(), indices_naive_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } { // unless something is really wrong with clustering, this could serve as a lower bound on // recall double min_recall = static_cast<double>(ps.nprobe) / static_cast<double>(ps.nlist); auto distances_ivfflat_dev = raft::make_device_matrix<T, IdxT>(handle_, ps.num_queries, ps.k); auto indices_ivfflat_dev = raft::make_device_matrix<IdxT, IdxT>(handle_, ps.num_queries, ps.k); { ivf_flat::index_params index_params; ivf_flat::search_params search_params; index_params.n_lists = ps.nlist; index_params.metric = ps.metric; index_params.adaptive_centers = ps.adaptive_centers; search_params.n_probes = ps.nprobe; index_params.add_data_on_build = true; index_params.kmeans_trainset_fraction = 0.5; index_params.metric_arg = 0; // Create IVF Flat index auto database_view = raft::make_device_matrix_view<const DataT, IdxT>( (const DataT*)database.data(), ps.num_db_vecs, ps.dim); auto index = ivf_flat::build(handle_, index_params, database_view); // Create Bitset filter auto removed_indices = raft::make_device_vector<IdxT, int64_t>(handle_, test_ivf_sample_filter::offset); thrust::sequence(resource::get_thrust_policy(handle_), thrust::device_pointer_cast(removed_indices.data_handle()), thrust::device_pointer_cast(removed_indices.data_handle() + test_ivf_sample_filter::offset)); resource::sync_stream(handle_); raft::core::bitset<std::uint32_t, IdxT> removed_indices_bitset( handle_, removed_indices.view(), ps.num_db_vecs); // Search with the filter auto search_queries_view = raft::make_device_matrix_view<const DataT, IdxT>( search_queries.data(), ps.num_queries, ps.dim); ivf_flat::search_with_filtering( handle_, search_params, index, search_queries_view, indices_ivfflat_dev.view(), distances_ivfflat_dev.view(), raft::neighbors::filtering::bitset_filter(removed_indices_bitset.view())); update_host( distances_ivfflat.data(), distances_ivfflat_dev.data_handle(), queries_size, stream_); update_host( indices_ivfflat.data(), indices_ivfflat_dev.data_handle(), queries_size, stream_); resource::sync_stream(handle_); } ASSERT_TRUE(eval_neighbours(indices_naive, indices_ivfflat, distances_naive, distances_ivfflat, ps.num_queries, ps.k, 0.001, min_recall)); } } void SetUp() override { database.resize(ps.num_db_vecs * ps.dim, stream_); search_queries.resize(ps.num_queries * ps.dim, stream_); raft::random::RngState r(1234ULL); if constexpr (std::is_same<DataT, float>{}) { raft::random::uniform( handle_, r, database.data(), ps.num_db_vecs * ps.dim, DataT(0.1), DataT(2.0)); raft::random::uniform( handle_, r, search_queries.data(), ps.num_queries * ps.dim, DataT(0.1), DataT(2.0)); } else { raft::random::uniformInt( handle_, r, database.data(), ps.num_db_vecs * ps.dim, DataT(1), DataT(20)); raft::random::uniformInt( handle_, r, search_queries.data(), ps.num_queries * ps.dim, DataT(1), DataT(20)); } resource::sync_stream(handle_); } void TearDown() override { resource::sync_stream(handle_); database.resize(0, stream_); search_queries.resize(0, stream_); } private: raft::resources handle_; rmm::cuda_stream_view stream_; AnnIvfFlatInputs<IdxT> ps; rmm::device_uvector<DataT> database; rmm::device_uvector<DataT> search_queries; }; const std::vector<AnnIvfFlatInputs<int64_t>> inputs = { // test various dims (aligned and not aligned to vector sizes) {1000, 10000, 1, 16, 40, 1024, raft::distance::DistanceType::L2Expanded, true}, {1000, 10000, 2, 16, 40, 1024, raft::distance::DistanceType::L2Expanded, false}, {1000, 10000, 3, 16, 40, 1024, raft::distance::DistanceType::L2Expanded, true}, {1000, 10000, 4, 16, 40, 1024, raft::distance::DistanceType::L2Expanded, false}, {1000, 10000, 5, 16, 40, 1024, raft::distance::DistanceType::InnerProduct, false}, {1000, 10000, 8, 16, 40, 1024, raft::distance::DistanceType::InnerProduct, true}, {1000, 10000, 5, 16, 40, 1024, raft::distance::DistanceType::L2SqrtExpanded, false}, {1000, 10000, 8, 16, 40, 1024, raft::distance::DistanceType::L2SqrtExpanded, true}, // test dims that do not fit into kernel shared memory limits {1000, 10000, 2048, 16, 40, 1024, raft::distance::DistanceType::L2Expanded, false}, {1000, 10000, 2049, 16, 40, 1024, raft::distance::DistanceType::L2Expanded, false}, {1000, 10000, 2050, 16, 40, 1024, raft::distance::DistanceType::InnerProduct, false}, {1000, 10000, 2051, 16, 40, 1024, raft::distance::DistanceType::InnerProduct, true}, {1000, 10000, 2052, 16, 40, 1024, raft::distance::DistanceType::InnerProduct, false}, {1000, 10000, 2053, 16, 40, 1024, raft::distance::DistanceType::L2Expanded, true}, {1000, 10000, 2056, 16, 40, 1024, raft::distance::DistanceType::L2Expanded, true}, // various random combinations {1000, 10000, 16, 10, 40, 1024, raft::distance::DistanceType::L2Expanded, false}, {1000, 10000, 16, 10, 50, 1024, raft::distance::DistanceType::L2Expanded, false}, {1000, 10000, 16, 10, 70, 1024, raft::distance::DistanceType::L2Expanded, false}, {100, 10000, 16, 10, 20, 512, raft::distance::DistanceType::L2Expanded, false}, {20, 100000, 16, 10, 20, 1024, raft::distance::DistanceType::L2Expanded, true}, {1000, 100000, 16, 10, 20, 1024, raft::distance::DistanceType::L2Expanded, true}, {10000, 131072, 8, 10, 20, 1024, raft::distance::DistanceType::L2Expanded, false}, {1000, 10000, 16, 10, 40, 1024, raft::distance::DistanceType::InnerProduct, true}, {1000, 10000, 16, 10, 50, 1024, raft::distance::DistanceType::InnerProduct, true}, {1000, 10000, 16, 10, 70, 1024, raft::distance::DistanceType::InnerProduct, false}, {100, 10000, 16, 10, 20, 512, raft::distance::DistanceType::InnerProduct, true}, {20, 100000, 16, 10, 20, 1024, raft::distance::DistanceType::InnerProduct, true}, {1000, 100000, 16, 10, 20, 1024, raft::distance::DistanceType::InnerProduct, false}, {10000, 131072, 8, 10, 50, 1024, raft::distance::DistanceType::InnerProduct, true}, {1000, 10000, 4096, 20, 50, 1024, raft::distance::DistanceType::InnerProduct, false}, // test splitting the big query batches (> max gridDim.y) into smaller batches {100000, 1024, 32, 10, 64, 64, raft::distance::DistanceType::InnerProduct, false}, {1000000, 1024, 32, 10, 256, 256, raft::distance::DistanceType::InnerProduct, false}, {98306, 1024, 32, 10, 64, 64, raft::distance::DistanceType::InnerProduct, true}, // test radix_sort for getting the cluster selection {1000, 10000, 16, 10, raft::matrix::detail::select::warpsort::kMaxCapacity * 2, raft::matrix::detail::select::warpsort::kMaxCapacity * 4, raft::distance::DistanceType::L2Expanded, false}, {1000, 10000, 16, 10, raft::matrix::detail::select::warpsort::kMaxCapacity * 4, raft::matrix::detail::select::warpsort::kMaxCapacity * 4, raft::distance::DistanceType::InnerProduct, false}, // The following two test cases should show very similar recall. // num_queries, num_db_vecs, dim, k, nprobe, nlist, metric, adaptive_centers {20000, 8712, 3, 10, 51, 66, raft::distance::DistanceType::L2Expanded, false}, {100000, 8712, 3, 10, 51, 66, raft::distance::DistanceType::L2Expanded, false}}; } // namespace raft::neighbors::ivf_flat
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/neighbors/haversine.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <iostream> #include <raft/core/resource/cuda_stream.hpp> #include <raft/distance/distance_types.hpp> #include <raft/spatial/knn/detail/haversine_distance.cuh> #include <rmm/device_uvector.hpp> #include <vector> namespace raft { namespace spatial { namespace knn { template <typename value_idx, typename value_t> class HaversineKNNTest : public ::testing::Test { public: HaversineKNNTest() : stream(resource::get_cuda_stream(handle)), d_train_inputs(0, stream), d_ref_I(0, stream), d_ref_D(0, stream), d_pred_I(0, stream), d_pred_D(0, stream) { } protected: void basicTest() { // Allocate input d_train_inputs.resize(n * d, stream); // Allocate reference arrays d_ref_I.resize(n * n, stream); d_ref_D.resize(n * n, stream); // Allocate predicted arrays d_pred_I.resize(n * n, stream); d_pred_D.resize(n * n, stream); // make testdata on host std::vector<value_t> h_train_inputs = {0.71113885, -1.29215058, 0.59613176, -2.08048115, 0.74932804, -1.33634042, 0.51486728, -1.65962873, 0.53154002, -1.47049808, 0.72891737, -1.54095137}; h_train_inputs.resize(d_train_inputs.size()); raft::update_device( d_train_inputs.data(), h_train_inputs.data(), d_train_inputs.size(), stream); std::vector<value_t> h_res_D = {0., 0.05041587, 0.18767063, 0.23048252, 0.35749438, 0.62925595, 0., 0.36575755, 0.44288665, 0.5170737, 0.59501296, 0.62925595, 0., 0.05041587, 0.152463, 0.2426416, 0.34925285, 0.59501296, 0., 0.16461092, 0.2345792, 0.34925285, 0.35749438, 0.36575755, 0., 0.16461092, 0.20535265, 0.23048252, 0.2426416, 0.5170737, 0., 0.152463, 0.18767063, 0.20535265, 0.2345792, 0.44288665}; h_res_D.resize(n * n); raft::update_device(d_ref_D.data(), h_res_D.data(), n * n, stream); std::vector<value_idx> h_res_I = {0, 2, 5, 4, 3, 1, 1, 3, 5, 4, 2, 0, 2, 0, 5, 4, 3, 1, 3, 4, 5, 2, 0, 1, 4, 3, 5, 0, 2, 1, 5, 2, 0, 4, 3, 1}; h_res_I.resize(n * n); raft::update_device<value_idx>(d_ref_I.data(), h_res_I.data(), n * n, stream); raft::spatial::knn::detail::haversine_knn(d_pred_I.data(), d_pred_D.data(), d_train_inputs.data(), d_train_inputs.data(), n, n, k, stream); resource::sync_stream(handle, stream); } void SetUp() override { basicTest(); } protected: raft::resources handle; cudaStream_t stream; rmm::device_uvector<value_t> d_train_inputs; int n = 6; int d = 2; int k = 6; rmm::device_uvector<value_idx> d_pred_I; rmm::device_uvector<value_t> d_pred_D; rmm::device_uvector<value_idx> d_ref_I; rmm::device_uvector<value_t> d_ref_D; }; typedef HaversineKNNTest<int, float> HaversineKNNTestF; TEST_F(HaversineKNNTestF, Fit) { ASSERT_TRUE(raft::devArrMatch( d_ref_D.data(), d_pred_D.data(), n * n, raft::CompareApprox<float>(1e-3), stream)); ASSERT_TRUE( raft::devArrMatch(d_ref_I.data(), d_pred_I.data(), n * n, raft::Compare<int>(), stream)); } } // namespace knn } // namespace spatial } // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/neighbors/ann_nn_descent.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "../test_utils.cuh" #include "ann_utils.cuh" #include <raft_internal/neighbors/naive_knn.cuh> #include <raft/core/resource/cuda_stream.hpp> #include <raft/neighbors/nn_descent.cuh> #include <raft/util/itertools.hpp> #include <gtest/gtest.h> #include <cstddef> #include <iostream> #include <string> #include <vector> namespace raft::neighbors::experimental::nn_descent { struct AnnNNDescentInputs { int n_rows; int dim; int graph_degree; raft::distance::DistanceType metric; bool host_dataset; double min_recall; }; inline ::std::ostream& operator<<(::std::ostream& os, const AnnNNDescentInputs& p) { os << "dataset shape=" << p.n_rows << "x" << p.dim << ", graph_degree=" << p.graph_degree << ", metric=" << static_cast<int>(p.metric) << (p.host_dataset ? ", host" : ", device") << std::endl; return os; } template <typename DistanceT, typename DataT, typename IdxT> class AnnNNDescentTest : public ::testing::TestWithParam<AnnNNDescentInputs> { public: AnnNNDescentTest() : stream_(resource::get_cuda_stream(handle_)), ps(::testing::TestWithParam<AnnNNDescentInputs>::GetParam()), database(0, stream_) { } protected: void testNNDescent() { size_t queries_size = ps.n_rows * ps.graph_degree; std::vector<IdxT> indices_NNDescent(queries_size); std::vector<IdxT> indices_naive(queries_size); { rmm::device_uvector<DistanceT> distances_naive_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_naive_dev(queries_size, stream_); naive_knn<DistanceT, DataT, IdxT>(handle_, distances_naive_dev.data(), indices_naive_dev.data(), database.data(), database.data(), ps.n_rows, ps.n_rows, ps.dim, ps.graph_degree, ps.metric); update_host(indices_naive.data(), indices_naive_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } { { nn_descent::index_params index_params; index_params.metric = ps.metric; index_params.graph_degree = ps.graph_degree; index_params.intermediate_graph_degree = 2 * ps.graph_degree; index_params.max_iterations = 100; auto database_view = raft::make_device_matrix_view<const DataT, int64_t>( (const DataT*)database.data(), ps.n_rows, ps.dim); { if (ps.host_dataset) { auto database_host = raft::make_host_matrix<DataT, int64_t>(ps.n_rows, ps.dim); raft::copy(database_host.data_handle(), database.data(), database.size(), stream_); auto database_host_view = raft::make_host_matrix_view<const DataT, int64_t>( (const DataT*)database_host.data_handle(), ps.n_rows, ps.dim); auto index = nn_descent::build<DataT, IdxT>(handle_, index_params, database_host_view); update_host( indices_NNDescent.data(), index.graph().data_handle(), queries_size, stream_); } else { auto index = nn_descent::build<DataT, IdxT>(handle_, index_params, database_view); update_host( indices_NNDescent.data(), index.graph().data_handle(), queries_size, stream_); }; } resource::sync_stream(handle_); } double min_recall = ps.min_recall; EXPECT_TRUE(eval_recall( indices_naive, indices_NNDescent, ps.n_rows, ps.graph_degree, 0.001, min_recall)); } } void SetUp() override { database.resize(((size_t)ps.n_rows) * ps.dim, stream_); raft::random::RngState r(1234ULL); if constexpr (std::is_same<DataT, float>{}) { raft::random::normal(handle_, r, database.data(), ps.n_rows * ps.dim, DataT(0.1), DataT(2.0)); } else { raft::random::uniformInt( handle_, r, database.data(), ps.n_rows * ps.dim, DataT(1), DataT(20)); } resource::sync_stream(handle_); } void TearDown() override { resource::sync_stream(handle_); database.resize(0, stream_); } private: raft::resources handle_; rmm::cuda_stream_view stream_; AnnNNDescentInputs ps; rmm::device_uvector<DataT> database; }; const std::vector<AnnNNDescentInputs> inputs = raft::util::itertools::product<AnnNNDescentInputs>( {1000, 2000}, // n_rows {3, 5, 7, 8, 17, 64, 128, 137, 192, 256, 512, 619, 1024}, // dim {32, 64}, // graph_degree {raft::distance::DistanceType::L2Expanded}, {false, true}, {0.90}); } // namespace raft::neighbors::experimental::nn_descent
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/neighbors/ann_ivf_pq.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "../test_utils.cuh" #include "ann_utils.cuh" #include <raft/core/resource/cuda_stream.hpp> #include <raft_internal/neighbors/naive_knn.cuh> #include <raft/core/logger.hpp> #include <raft/distance/distance_types.hpp> #include <raft/linalg/map.cuh> #include <raft/linalg/map_reduce.cuh> #include <raft/matrix/gather.cuh> #include <raft/neighbors/ivf_pq.cuh> #include <raft/neighbors/ivf_pq_helpers.cuh> #include <raft/neighbors/ivf_pq_serialize.cuh> #include <raft/neighbors/sample_filter.cuh> #include <raft/random/rng.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_vector.hpp> #include <rmm/mr/device/managed_memory_resource.hpp> #include <gtest/gtest.h> #include <cub/cub.cuh> #include <thrust/sequence.h> #include <algorithm> #include <cstddef> #include <iostream> #include <optional> #include <vector> namespace raft::neighbors::ivf_pq { struct test_ivf_sample_filter { static constexpr unsigned offset = 1500; }; struct ivf_pq_inputs { uint32_t num_db_vecs = 4096; uint32_t num_queries = 1024; uint32_t dim = 64; uint32_t k = 32; std::optional<double> min_recall = std::nullopt; ivf_pq::index_params index_params; ivf_pq::search_params search_params; // Set some default parameters for tests ivf_pq_inputs() { index_params.n_lists = max(32u, min(1024u, num_db_vecs / 128u)); index_params.kmeans_trainset_fraction = 1.0; } }; inline auto operator<<(std::ostream& os, const ivf_pq::codebook_gen& p) -> std::ostream& { switch (p) { case ivf_pq::codebook_gen::PER_CLUSTER: os << "codebook_gen::PER_CLUSTER"; break; case ivf_pq::codebook_gen::PER_SUBSPACE: os << "codebook_gen::PER_SUBSPACE"; break; default: RAFT_FAIL("unreachable code"); } return os; } inline auto operator<<(std::ostream& os, const ivf_pq_inputs& p) -> std::ostream& { ivf_pq_inputs dflt; bool need_comma = false; #define PRINT_DIFF_V(spec, val) \ do { \ if (dflt spec != p spec) { \ if (need_comma) { os << ", "; } \ os << #spec << " = " << val; \ need_comma = true; \ } \ } while (0) #define PRINT_DIFF(spec) PRINT_DIFF_V(spec, p spec) os << "ivf_pq_inputs {"; PRINT_DIFF(.num_db_vecs); PRINT_DIFF(.num_queries); PRINT_DIFF(.dim); PRINT_DIFF(.k); PRINT_DIFF_V(.min_recall, p.min_recall.value_or(0)); PRINT_DIFF_V(.index_params.metric, print_metric{p.index_params.metric}); PRINT_DIFF(.index_params.metric_arg); PRINT_DIFF(.index_params.add_data_on_build); PRINT_DIFF(.index_params.n_lists); PRINT_DIFF(.index_params.kmeans_n_iters); PRINT_DIFF(.index_params.kmeans_trainset_fraction); PRINT_DIFF(.index_params.pq_bits); PRINT_DIFF(.index_params.pq_dim); PRINT_DIFF(.index_params.codebook_kind); PRINT_DIFF(.index_params.force_random_rotation); PRINT_DIFF(.search_params.n_probes); PRINT_DIFF_V(.search_params.lut_dtype, print_dtype{p.search_params.lut_dtype}); PRINT_DIFF_V(.search_params.internal_distance_dtype, print_dtype{p.search_params.internal_distance_dtype}); os << "}"; return os; } template <typename T> void compare_vectors_l2( const raft::resources& res, T a, T b, uint32_t label, double compression_ratio, double eps) { auto n_rows = a.extent(0); auto dim = a.extent(1); rmm::mr::managed_memory_resource managed_memory; auto dist = make_device_mdarray<double>(res, &managed_memory, make_extents<uint32_t>(n_rows)); linalg::map_offset(res, dist.view(), [a, b, dim] __device__(uint32_t i) { spatial::knn::detail::utils::mapping<float> f{}; double d = 0.0f; for (uint32_t j = 0; j < dim; j++) { double t = f(a(i, j)) - f(b(i, j)); d += t * t; } return sqrt(d / double(dim)); }); resource::sync_stream(res); for (uint32_t i = 0; i < n_rows; i++) { double d = dist(i); // The theoretical estimate of the error is hard to come up with, // the estimate below is based on experimentation + curse of dimensionality ASSERT_LE(d, 1.2 * eps * std::pow(2.0, compression_ratio)) << " (label = " << label << ", ix = " << i << ", eps = " << eps << ")"; } } template <typename IdxT> auto min_output_size(const raft::resources& handle, const ivf_pq::index<IdxT>& index, uint32_t n_probes) -> IdxT { auto acc_sizes = index.accum_sorted_sizes(); uint32_t last_nonzero = index.n_lists(); while (last_nonzero > 0 && acc_sizes(last_nonzero - 1) == acc_sizes(last_nonzero)) { last_nonzero--; } return acc_sizes(last_nonzero) - acc_sizes(last_nonzero - std::min(last_nonzero, n_probes)); } template <typename EvalT, typename DataT, typename IdxT> class ivf_pq_test : public ::testing::TestWithParam<ivf_pq_inputs> { public: ivf_pq_test() : stream_(resource::get_cuda_stream(handle_)), ps(::testing::TestWithParam<ivf_pq_inputs>::GetParam()), database(0, stream_), search_queries(0, stream_) { } void gen_data() { database.resize(size_t{ps.num_db_vecs} * size_t{ps.dim}, stream_); search_queries.resize(size_t{ps.num_queries} * size_t{ps.dim}, stream_); raft::random::RngState r(1234ULL); if constexpr (std::is_same<DataT, float>{}) { raft::random::uniform( handle_, r, database.data(), ps.num_db_vecs * ps.dim, DataT(0.1), DataT(2.0)); raft::random::uniform( handle_, r, search_queries.data(), ps.num_queries * ps.dim, DataT(0.1), DataT(2.0)); } else { raft::random::uniformInt( handle_, r, database.data(), ps.num_db_vecs * ps.dim, DataT(1), DataT(20)); raft::random::uniformInt( handle_, r, search_queries.data(), ps.num_queries * ps.dim, DataT(1), DataT(20)); } resource::sync_stream(handle_); } void calc_ref() { size_t queries_size = size_t{ps.num_queries} * size_t{ps.k}; rmm::device_uvector<EvalT> distances_naive_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_naive_dev(queries_size, stream_); naive_knn<EvalT, DataT, IdxT>(handle_, distances_naive_dev.data(), indices_naive_dev.data(), search_queries.data(), database.data(), ps.num_queries, ps.num_db_vecs, ps.dim, ps.k, ps.index_params.metric); distances_ref.resize(queries_size); update_host(distances_ref.data(), distances_naive_dev.data(), queries_size, stream_); indices_ref.resize(queries_size); update_host(indices_ref.data(), indices_naive_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } auto build_only() { auto ipams = ps.index_params; ipams.add_data_on_build = true; auto index_view = raft::make_device_matrix_view<DataT, IdxT>(database.data(), ps.num_db_vecs, ps.dim); return ivf_pq::build<DataT, IdxT>(handle_, ipams, index_view); } auto build_2_extends() { auto db_indices = make_device_vector<IdxT>(handle_, ps.num_db_vecs); linalg::map_offset(handle_, db_indices.view(), identity_op{}); resource::sync_stream(handle_); auto size_1 = IdxT(ps.num_db_vecs) / 2; auto size_2 = IdxT(ps.num_db_vecs) - size_1; auto vecs_1 = database.data(); auto vecs_2 = database.data() + size_t(size_1) * size_t(ps.dim); auto inds_1 = db_indices.data_handle(); auto inds_2 = db_indices.data_handle() + size_t(size_1); auto ipams = ps.index_params; ipams.add_data_on_build = false; auto database_view = raft::make_device_matrix_view<DataT, IdxT>(database.data(), ps.num_db_vecs, ps.dim); auto idx = ivf_pq::build<DataT, IdxT>(handle_, ipams, database_view); auto vecs_2_view = raft::make_device_matrix_view<DataT, IdxT>(vecs_2, size_2, ps.dim); auto inds_2_view = raft::make_device_vector_view<IdxT, IdxT>(inds_2, size_2); ivf_pq::extend<DataT, IdxT>(handle_, vecs_2_view, inds_2_view, &idx); auto vecs_1_view = raft::make_device_matrix_view<DataT, IdxT, row_major>(vecs_1, size_1, ps.dim); auto inds_1_view = raft::make_device_vector_view<const IdxT, IdxT>(inds_1, size_1); ivf_pq::extend<DataT, IdxT>(handle_, vecs_1_view, inds_1_view, &idx); return idx; } auto build_serialize() { ivf_pq::serialize<IdxT>(handle_, "ivf_pq_index", build_only()); return ivf_pq::deserialize<IdxT>(handle_, "ivf_pq_index"); } void check_reconstruction(const index<IdxT>& index, double compression_ratio, uint32_t label, uint32_t n_take, uint32_t n_skip) { auto& rec_list = index.lists()[label]; auto dim = index.dim(); n_take = std::min<uint32_t>(n_take, rec_list->size.load()); n_skip = std::min<uint32_t>(n_skip, rec_list->size.load() - n_take); if (n_take == 0) { return; } auto rec_data = make_device_matrix<DataT>(handle_, n_take, dim); auto orig_data = make_device_matrix<DataT>(handle_, n_take, dim); ivf_pq::helpers::reconstruct_list_data(handle_, index, rec_data.view(), label, n_skip); matrix::gather(database.data(), IdxT{dim}, IdxT{n_take}, rec_list->indices.data_handle() + n_skip, IdxT{n_take}, orig_data.data_handle(), stream_); compare_vectors_l2(handle_, rec_data.view(), orig_data.view(), label, compression_ratio, 0.06); } void check_reconstruct_extend(index<IdxT>* index, double compression_ratio, uint32_t label) { // NB: this is not reference, the list is retained; the index will have to create a new list on // `erase_list` op. auto old_list = index->lists()[label]; auto n_rows = old_list->size.load(); if (n_rows == 0) { return; } auto vectors_1 = make_device_matrix<EvalT>(handle_, n_rows, index->dim()); auto indices = make_device_vector<IdxT>(handle_, n_rows); copy(indices.data_handle(), old_list->indices.data_handle(), n_rows, stream_); ivf_pq::helpers::reconstruct_list_data(handle_, *index, vectors_1.view(), label, 0); ivf_pq::helpers::erase_list(handle_, index, label); // NB: passing the type parameter because const->non-const implicit conversion of the mdspans // breaks type inference ivf_pq::helpers::extend_list<EvalT, IdxT>( handle_, index, vectors_1.view(), indices.view(), label); auto& new_list = index->lists()[label]; ASSERT_NE(old_list.get(), new_list.get()) << "The old list should have been shared and retained after ivf_pq index has erased the " "corresponding cluster."; auto vectors_2 = make_device_matrix<EvalT>(handle_, n_rows, index->dim()); ivf_pq::helpers::reconstruct_list_data(handle_, *index, vectors_2.view(), label, 0); // The code search is unstable, and there's high chance of repeating values of the lvl-2 codes. // Hence, encoding-decoding chain often leads to altering both the PQ codes and the // reconstructed data. compare_vectors_l2( handle_, vectors_1.view(), vectors_2.view(), label, compression_ratio, 0.04); // 0.025); } void check_packing(index<IdxT>* index, uint32_t label) { auto old_list = index->lists()[label]; auto n_rows = old_list->size.load(); if (n_rows == 0) { return; } auto codes = make_device_matrix<uint8_t>(handle_, n_rows, index->pq_dim()); auto indices = make_device_vector<IdxT>(handle_, n_rows); copy(indices.data_handle(), old_list->indices.data_handle(), n_rows, stream_); ivf_pq::helpers::unpack_list_data(handle_, *index, codes.view(), label, 0); ivf_pq::helpers::erase_list(handle_, index, label); ivf_pq::helpers::extend_list_with_codes<IdxT>( handle_, index, codes.view(), indices.view(), label); auto& new_list = index->lists()[label]; ASSERT_NE(old_list.get(), new_list.get()) << "The old list should have been shared and retained after ivf_pq index has erased the " "corresponding cluster."; auto list_data_size = (n_rows / ivf_pq::kIndexGroupSize) * new_list->data.extent(1) * new_list->data.extent(2) * new_list->data.extent(3); ASSERT_TRUE(old_list->data.size() >= list_data_size); ASSERT_TRUE(new_list->data.size() >= list_data_size); ASSERT_TRUE(devArrMatch(old_list->data.data_handle(), new_list->data.data_handle(), list_data_size, Compare<uint8_t>{})); // Pack a few vectors back to the list. int row_offset = 9; int n_vec = 3; ASSERT_TRUE(row_offset + n_vec < n_rows); size_t offset = row_offset * index->pq_dim(); auto codes_to_pack = make_device_matrix_view<const uint8_t, uint32_t>( codes.data_handle() + offset, n_vec, index->pq_dim()); ivf_pq::helpers::pack_list_data(handle_, index, codes_to_pack, label, row_offset); ASSERT_TRUE(devArrMatch(old_list->data.data_handle(), new_list->data.data_handle(), list_data_size, Compare<uint8_t>{})); // Another test with the API that take list_data directly auto list_data = index->lists()[label]->data.view(); uint32_t n_take = 4; ASSERT_TRUE(row_offset + n_take < n_rows); auto codes2 = raft::make_device_matrix<uint8_t>(handle_, n_take, index->pq_dim()); ivf_pq::helpers::codepacker::unpack( handle_, list_data, index->pq_bits(), row_offset, codes2.view()); // Write it back ivf_pq::helpers::codepacker::pack( handle_, make_const_mdspan(codes2.view()), index->pq_bits(), row_offset, list_data); ASSERT_TRUE(devArrMatch(old_list->data.data_handle(), new_list->data.data_handle(), list_data_size, Compare<uint8_t>{})); } void check_packing_contiguous(index<IdxT>* index, uint32_t label) { auto old_list = index->lists()[label]; auto n_rows = old_list->size.load(); if (n_rows == 0) { return; } auto codes = make_device_matrix<uint8_t>(handle_, n_rows, index->pq_dim()); auto indices = make_device_vector<IdxT>(handle_, n_rows); copy(indices.data_handle(), old_list->indices.data_handle(), n_rows, stream_); uint32_t code_size = ceildiv<uint32_t>(index->pq_dim() * index->pq_bits(), 8); auto codes_compressed = make_device_matrix<uint8_t>(handle_, n_rows, code_size); ivf_pq::helpers::unpack_contiguous_list_data( handle_, *index, codes_compressed.data_handle(), n_rows, label, 0); ivf_pq::helpers::erase_list(handle_, index, label); ivf_pq::detail::extend_list_prepare(handle_, index, make_const_mdspan(indices.view()), label); ivf_pq::helpers::pack_contiguous_list_data<IdxT>( handle_, index, codes_compressed.data_handle(), n_rows, label, 0); ivf_pq::helpers::recompute_internal_state(handle_, index); auto& new_list = index->lists()[label]; ASSERT_NE(old_list.get(), new_list.get()) << "The old list should have been shared and retained after ivf_pq index has erased the " "corresponding cluster."; auto list_data_size = (n_rows / ivf_pq::kIndexGroupSize) * new_list->data.extent(1) * new_list->data.extent(2) * new_list->data.extent(3); ASSERT_TRUE(old_list->data.size() >= list_data_size); ASSERT_TRUE(new_list->data.size() >= list_data_size); ASSERT_TRUE(devArrMatch(old_list->data.data_handle(), new_list->data.data_handle(), list_data_size, Compare<uint8_t>{})); // Pack a few vectors back to the list. uint32_t row_offset = 9; uint32_t n_vec = 3; ASSERT_TRUE(row_offset + n_vec < n_rows); size_t offset = row_offset * code_size; auto codes_to_pack = make_device_matrix_view<uint8_t, uint32_t>( codes_compressed.data_handle() + offset, n_vec, index->pq_dim()); ivf_pq::helpers::pack_contiguous_list_data( handle_, index, codes_to_pack.data_handle(), n_vec, label, row_offset); ASSERT_TRUE(devArrMatch(old_list->data.data_handle(), new_list->data.data_handle(), list_data_size, Compare<uint8_t>{})); // // Another test with the API that take list_data directly auto list_data = index->lists()[label]->data.view(); uint32_t n_take = 4; ASSERT_TRUE(row_offset + n_take < n_rows); auto codes2 = raft::make_device_matrix<uint8_t>(handle_, n_take, code_size); ivf_pq::helpers::codepacker::unpack_contiguous(handle_, list_data, index->pq_bits(), row_offset, n_take, index->pq_dim(), codes2.data_handle()); // Write it back ivf_pq::helpers::codepacker::pack_contiguous(handle_, codes2.data_handle(), n_vec, index->pq_dim(), index->pq_bits(), row_offset, list_data); ASSERT_TRUE(devArrMatch(old_list->data.data_handle(), new_list->data.data_handle(), list_data_size, Compare<uint8_t>{})); } template <typename BuildIndex> void run(BuildIndex build_index) { index<IdxT> index = build_index(); double compression_ratio = static_cast<double>(ps.dim * 8) / static_cast<double>(index.pq_dim() * index.pq_bits()); for (uint32_t label = 0; label < index.n_lists(); label++) { switch (label % 3) { case 0: { // Reconstruct and re-write vectors for one label check_reconstruct_extend(&index, compression_ratio, label); } break; case 1: { // Dump and re-write codes for one label check_packing(&index, label); check_packing_contiguous(&index, label); } break; default: { // check a small subset of data in a randomly chosen cluster to see if the data // reconstruction works well. check_reconstruction(index, compression_ratio, label, 100, 7); } } } size_t queries_size = ps.num_queries * ps.k; std::vector<IdxT> indices_ivf_pq(queries_size); std::vector<EvalT> distances_ivf_pq(queries_size); rmm::device_uvector<EvalT> distances_ivf_pq_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_ivf_pq_dev(queries_size, stream_); auto query_view = raft::make_device_matrix_view<DataT, uint32_t>(search_queries.data(), ps.num_queries, ps.dim); auto inds_view = raft::make_device_matrix_view<IdxT, uint32_t>( indices_ivf_pq_dev.data(), ps.num_queries, ps.k); auto dists_view = raft::make_device_matrix_view<EvalT, uint32_t>( distances_ivf_pq_dev.data(), ps.num_queries, ps.k); ivf_pq::search<DataT, IdxT>( handle_, ps.search_params, index, query_view, inds_view, dists_view); update_host(distances_ivf_pq.data(), distances_ivf_pq_dev.data(), queries_size, stream_); update_host(indices_ivf_pq.data(), indices_ivf_pq_dev.data(), queries_size, stream_); resource::sync_stream(handle_); // A very conservative lower bound on recall double min_recall = static_cast<double>(ps.search_params.n_probes) / static_cast<double>(ps.index_params.n_lists); // Using a heuristic to lower the required recall due to code-packing errors min_recall = std::min(std::erfc(0.05 * compression_ratio / std::max(min_recall, 0.5)), min_recall); // Use explicit per-test min recall value if provided. min_recall = ps.min_recall.value_or(min_recall); ASSERT_TRUE(eval_neighbours(indices_ref, indices_ivf_pq, distances_ref, distances_ivf_pq, ps.num_queries, ps.k, 0.0001 * compression_ratio, min_recall)) << ps; // Test a few extra invariants IdxT min_results = min_output_size(handle_, index, ps.search_params.n_probes); IdxT max_oob = ps.k <= min_results ? 0 : ps.k - min_results; IdxT found_oob = 0; for (uint32_t query_ix = 0; query_ix < ps.num_queries; query_ix++) { for (uint32_t k = 0; k < ps.k; k++) { auto flat_i = query_ix * ps.k + k; auto found_ix = indices_ivf_pq[flat_i]; if (found_ix == ivf_pq::kOutOfBoundsRecord<IdxT>) { found_oob++; continue; } ASSERT_NE(found_ix, ivf::kInvalidRecord<IdxT>) << "got an invalid record at query_ix = " << query_ix << ", k = " << k << " (distance = " << distances_ivf_pq[flat_i] << ")"; ASSERT_LT(found_ix, ps.num_db_vecs) << "got an impossible index = " << found_ix << " at query_ix = " << query_ix << ", k = " << k << " (distance = " << distances_ivf_pq[flat_i] << ")"; } } ASSERT_LE(found_oob, max_oob) << "got too many records out-of-bounds (see ivf_pq::kOutOfBoundsRecord<IdxT>)."; if (found_oob > 0) { RAFT_LOG_WARN( "Got %zu results out-of-bounds because of large top-k (%zu) and small n_probes (%u) and " "small DB size/n_lists ratio (%zu / %u)", size_t(found_oob), size_t(ps.k), ps.search_params.n_probes, size_t(ps.num_db_vecs), ps.index_params.n_lists); } } void SetUp() override // NOLINT { gen_data(); calc_ref(); } void TearDown() override // NOLINT { cudaGetLastError(); resource::sync_stream(handle_); database.resize(0, stream_); search_queries.resize(0, stream_); } private: raft::resources handle_; rmm::cuda_stream_view stream_; ivf_pq_inputs ps; // NOLINT rmm::device_uvector<DataT> database; // NOLINT rmm::device_uvector<DataT> search_queries; // NOLINT std::vector<IdxT> indices_ref; // NOLINT std::vector<EvalT> distances_ref; // NOLINT }; template <typename EvalT, typename DataT, typename IdxT> class ivf_pq_filter_test : public ::testing::TestWithParam<ivf_pq_inputs> { public: ivf_pq_filter_test() : stream_(resource::get_cuda_stream(handle_)), ps(::testing::TestWithParam<ivf_pq_inputs>::GetParam()), database(0, stream_), search_queries(0, stream_) { } void gen_data() { database.resize(size_t{ps.num_db_vecs} * size_t{ps.dim}, stream_); search_queries.resize(size_t{ps.num_queries} * size_t{ps.dim}, stream_); raft::random::RngState r(1234ULL); if constexpr (std::is_same<DataT, float>{}) { raft::random::uniform( handle_, r, database.data(), ps.num_db_vecs * ps.dim, DataT(0.1), DataT(2.0)); raft::random::uniform( handle_, r, search_queries.data(), ps.num_queries * ps.dim, DataT(0.1), DataT(2.0)); } else { raft::random::uniformInt( handle_, r, database.data(), ps.num_db_vecs * ps.dim, DataT(1), DataT(20)); raft::random::uniformInt( handle_, r, search_queries.data(), ps.num_queries * ps.dim, DataT(1), DataT(20)); } resource::sync_stream(handle_); } void calc_ref() { size_t queries_size = size_t{ps.num_queries} * size_t{ps.k}; rmm::device_uvector<EvalT> distances_naive_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_naive_dev(queries_size, stream_); naive_knn<EvalT, DataT, IdxT>(handle_, distances_naive_dev.data(), indices_naive_dev.data(), search_queries.data(), database.data() + test_ivf_sample_filter::offset * ps.dim, ps.num_queries, ps.num_db_vecs - test_ivf_sample_filter::offset, ps.dim, ps.k, ps.index_params.metric); raft::linalg::addScalar(indices_naive_dev.data(), indices_naive_dev.data(), IdxT(test_ivf_sample_filter::offset), queries_size, stream_); distances_ref.resize(queries_size); update_host(distances_ref.data(), distances_naive_dev.data(), queries_size, stream_); indices_ref.resize(queries_size); update_host(indices_ref.data(), indices_naive_dev.data(), queries_size, stream_); resource::sync_stream(handle_); } auto build_only() { auto ipams = ps.index_params; ipams.add_data_on_build = true; auto index_view = raft::make_device_matrix_view<DataT, IdxT>(database.data(), ps.num_db_vecs, ps.dim); return ivf_pq::build<DataT, IdxT>(handle_, ipams, index_view); } template <typename BuildIndex> void run(BuildIndex build_index) { index<IdxT> index = build_index(); double compression_ratio = static_cast<double>(ps.dim * 8) / static_cast<double>(index.pq_dim() * index.pq_bits()); size_t queries_size = ps.num_queries * ps.k; std::vector<IdxT> indices_ivf_pq(queries_size); std::vector<EvalT> distances_ivf_pq(queries_size); rmm::device_uvector<EvalT> distances_ivf_pq_dev(queries_size, stream_); rmm::device_uvector<IdxT> indices_ivf_pq_dev(queries_size, stream_); auto query_view = raft::make_device_matrix_view<DataT, uint32_t>(search_queries.data(), ps.num_queries, ps.dim); auto inds_view = raft::make_device_matrix_view<IdxT, uint32_t>( indices_ivf_pq_dev.data(), ps.num_queries, ps.k); auto dists_view = raft::make_device_matrix_view<EvalT, uint32_t>( distances_ivf_pq_dev.data(), ps.num_queries, ps.k); // Create Bitset filter auto removed_indices = raft::make_device_vector<IdxT, int64_t>(handle_, test_ivf_sample_filter::offset); thrust::sequence( resource::get_thrust_policy(handle_), thrust::device_pointer_cast(removed_indices.data_handle()), thrust::device_pointer_cast(removed_indices.data_handle() + test_ivf_sample_filter::offset)); resource::sync_stream(handle_); raft::core::bitset<std::uint32_t, IdxT> removed_indices_bitset( handle_, removed_indices.view(), ps.num_db_vecs); ivf_pq::search_with_filtering<DataT, IdxT>( handle_, ps.search_params, index, query_view, inds_view, dists_view, raft::neighbors::filtering::bitset_filter(removed_indices_bitset.view())); update_host(distances_ivf_pq.data(), distances_ivf_pq_dev.data(), queries_size, stream_); update_host(indices_ivf_pq.data(), indices_ivf_pq_dev.data(), queries_size, stream_); resource::sync_stream(handle_); // A very conservative lower bound on recall double min_recall = static_cast<double>(ps.search_params.n_probes) / static_cast<double>(ps.index_params.n_lists); // Using a heuristic to lower the required recall due to code-packing errors min_recall = std::min(std::erfc(0.05 * compression_ratio / std::max(min_recall, 0.5)), min_recall); // Use explicit per-test min recall value if provided. min_recall = ps.min_recall.value_or(min_recall); ASSERT_TRUE(eval_neighbours(indices_ref, indices_ivf_pq, distances_ref, distances_ivf_pq, ps.num_queries, ps.k, 0.0001 * compression_ratio, min_recall)) << ps; } void SetUp() override // NOLINT { gen_data(); calc_ref(); } void TearDown() override // NOLINT { cudaGetLastError(); resource::sync_stream(handle_); database.resize(0, stream_); search_queries.resize(0, stream_); } private: raft::resources handle_; rmm::cuda_stream_view stream_; ivf_pq_inputs ps; // NOLINT rmm::device_uvector<DataT> database; // NOLINT rmm::device_uvector<DataT> search_queries; // NOLINT std::vector<IdxT> indices_ref; // NOLINT std::vector<EvalT> distances_ref; // NOLINT }; /* Test cases */ using test_cases_t = std::vector<ivf_pq_inputs>; // concatenate parameter sets for different type template <typename T> auto operator+(const std::vector<T>& a, const std::vector<T>& b) -> std::vector<T> { std::vector<T> res = a; res.insert(res.end(), b.begin(), b.end()); return res; } inline auto defaults() -> test_cases_t { return {ivf_pq_inputs{}}; } template <typename B, typename A, typename F> auto map(const std::vector<A>& xs, F f) -> std::vector<B> { std::vector<B> ys(xs.size()); std::transform(xs.begin(), xs.end(), ys.begin(), f); return ys; } inline auto with_dims(const std::vector<uint32_t>& dims) -> test_cases_t { return map<ivf_pq_inputs>(dims, [](uint32_t d) { ivf_pq_inputs x; x.dim = d; return x; }); } /** These will surely trigger the fastest kernel available. */ inline auto small_dims() -> test_cases_t { return with_dims({1, 2, 3, 4, 5, 8, 15, 16, 17}); } inline auto small_dims_per_cluster() -> test_cases_t { return map<ivf_pq_inputs>(small_dims(), [](const ivf_pq_inputs& x) { ivf_pq_inputs y(x); y.index_params.codebook_kind = ivf_pq::codebook_gen::PER_CLUSTER; return y; }); } inline auto big_dims() -> test_cases_t { // with_dims({512, 513, 1023, 1024, 1025, 2048, 2049, 2050, 2053, 6144, 8192, 12288, 16384}); auto xs = with_dims({512, 513, 1023, 1024, 1025, 2048, 2049, 2050, 2053, 6144}); return map<ivf_pq_inputs>(xs, [](const ivf_pq_inputs& x) { ivf_pq_inputs y(x); uint32_t pq_len = 2; y.index_params.pq_dim = div_rounding_up_safe(x.dim, pq_len); // This comes from pure experimentation, also the recall depens a lot on pq_len. y.min_recall = 0.48 + 0.028 * std::log2(x.dim); return y; }); } /** These will surely trigger no-smem-lut kernel. */ inline auto big_dims_moderate_lut() -> test_cases_t { return map<ivf_pq_inputs>(big_dims(), [](const ivf_pq_inputs& x) { ivf_pq_inputs y(x); uint32_t pq_len = 2; y.index_params.pq_dim = round_up_safe(div_rounding_up_safe(x.dim, pq_len), 4u); y.index_params.pq_bits = 6; y.search_params.lut_dtype = CUDA_R_16F; y.min_recall = 0.69; return y; }); } /** Some of these should trigger no-basediff kernel. */ inline auto big_dims_small_lut() -> test_cases_t { return map<ivf_pq_inputs>(big_dims(), [](const ivf_pq_inputs& x) { ivf_pq_inputs y(x); uint32_t pq_len = 8; y.index_params.pq_dim = round_up_safe(div_rounding_up_safe(x.dim, pq_len), 4u); y.index_params.pq_bits = 6; y.search_params.lut_dtype = CUDA_R_8U; y.min_recall = 0.21; return y; }); } /** * A minimal set of tests to check various enum-like parameters. */ inline auto enum_variety() -> test_cases_t { test_cases_t xs; #define ADD_CASE(f) \ do { \ xs.push_back({}); \ ([](ivf_pq_inputs & x) f)(xs[xs.size() - 1]); \ } while (0); ADD_CASE({ x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_CLUSTER; x.min_recall = 0.86; }); ADD_CASE({ x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_SUBSPACE; x.min_recall = 0.86; }); ADD_CASE({ x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_CLUSTER; x.index_params.pq_bits = 4; x.min_recall = 0.79; }); ADD_CASE({ x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_CLUSTER; x.index_params.pq_bits = 5; x.min_recall = 0.83; }); ADD_CASE({ x.index_params.pq_bits = 6; x.min_recall = 0.84; }); ADD_CASE({ x.index_params.pq_bits = 7; x.min_recall = 0.85; }); ADD_CASE({ x.index_params.pq_bits = 8; x.min_recall = 0.86; }); ADD_CASE({ x.index_params.force_random_rotation = true; x.min_recall = 0.86; }); ADD_CASE({ x.index_params.force_random_rotation = false; x.min_recall = 0.86; }); ADD_CASE({ x.search_params.lut_dtype = CUDA_R_32F; x.min_recall = 0.86; }); ADD_CASE({ x.search_params.lut_dtype = CUDA_R_16F; x.min_recall = 0.86; }); ADD_CASE({ x.search_params.lut_dtype = CUDA_R_8U; x.min_recall = 0.84; }); ADD_CASE({ x.search_params.internal_distance_dtype = CUDA_R_32F; x.min_recall = 0.86; }); ADD_CASE({ x.search_params.internal_distance_dtype = CUDA_R_16F; x.search_params.lut_dtype = CUDA_R_16F; x.min_recall = 0.86; }); return xs; } inline auto enum_variety_l2() -> test_cases_t { return map<ivf_pq_inputs>(enum_variety(), [](const ivf_pq_inputs& x) { ivf_pq_inputs y(x); y.index_params.metric = distance::DistanceType::L2Expanded; return y; }); } inline auto enum_variety_ip() -> test_cases_t { return map<ivf_pq_inputs>(enum_variety(), [](const ivf_pq_inputs& x) { ivf_pq_inputs y(x); if (y.min_recall.has_value()) { if (y.search_params.lut_dtype == CUDA_R_8U) { // InnerProduct score is signed, // thus we're forced to used signed 8-bit representation, // thus we have one bit less precision y.min_recall = y.min_recall.value() * 0.90; } else { // In other cases it seems to perform a little bit better, still worse than L2 y.min_recall = y.min_recall.value() * 0.94; } } y.index_params.metric = distance::DistanceType::InnerProduct; return y; }); } inline auto enum_variety_l2sqrt() -> test_cases_t { return map<ivf_pq_inputs>(enum_variety(), [](const ivf_pq_inputs& x) { ivf_pq_inputs y(x); y.index_params.metric = distance::DistanceType::L2SqrtExpanded; return y; }); } /** * Try different number of n_probes, some of which may trigger the non-fused version of the search * kernel. */ inline auto var_n_probes() -> test_cases_t { ivf_pq_inputs dflt; std::vector<uint32_t> xs; for (auto x = dflt.index_params.n_lists; x >= 1; x /= 2) { xs.push_back(x); } return map<ivf_pq_inputs>(xs, [](uint32_t n_probes) { ivf_pq_inputs x; x.search_params.n_probes = n_probes; return x; }); } /** * Try different number of nearest neighbours. * Values smaller than 32 test if the code behaves well when Capacity (== 32) does not change, * but `k <= Capacity` changes. * * Values between `32 and ivf_pq::detail::kMaxCapacity` test various instantiations of the * main kernel (Capacity-templated) * * Values above ivf_pq::detail::kMaxCapacity should trigger the non-fused version of the kernel * (manage_local_topk = false). * * Also we test here various values that are close-but-not-power-of-two to catch any problems * related to rounding/alignment. * * Note, we cannot control explicitly which instance of the search kernel to choose, hence it's * important to try a variety of different values of `k` to make sure all paths are triggered. * * Set the log level to DEBUG (5) or above to inspect the selected kernel instances. */ inline auto var_k() -> test_cases_t { return map<ivf_pq_inputs, uint32_t>( {1, 2, 3, 5, 8, 15, 16, 32, 63, 65, 127, 128, 256, 257, 1023, 2048, 2049}, [](uint32_t k) { ivf_pq_inputs x; x.k = k; // when there's not enough data, try more cluster probes x.search_params.n_probes = max(x.search_params.n_probes, min(x.index_params.n_lists, k)); return x; }); } /** * Cases brought up from downstream projects. */ inline auto special_cases() -> test_cases_t { test_cases_t xs; #define ADD_CASE(f) \ do { \ xs.push_back({}); \ ([](ivf_pq_inputs & x) f)(xs[xs.size() - 1]); \ } while (0); ADD_CASE({ x.num_db_vecs = 1183514; x.dim = 100; x.num_queries = 10000; x.k = 10; x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_SUBSPACE; x.index_params.pq_dim = 10; x.index_params.pq_bits = 8; x.index_params.n_lists = 1024; x.search_params.n_probes = 50; }); ADD_CASE({ x.num_db_vecs = 10000; x.dim = 16; x.num_queries = 500; x.k = 128; x.index_params.metric = distance::DistanceType::L2Expanded; x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_SUBSPACE; x.index_params.pq_bits = 8; x.index_params.n_lists = 100; x.search_params.n_probes = 100; }); ADD_CASE({ x.num_db_vecs = 10000; x.dim = 16; x.num_queries = 500; x.k = 129; x.index_params.metric = distance::DistanceType::L2Expanded; x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_SUBSPACE; x.index_params.pq_bits = 8; x.index_params.n_lists = 100; x.search_params.n_probes = 100; }); ADD_CASE({ x.num_db_vecs = 4335; x.dim = 4; x.num_queries = 100000; x.k = 12; x.index_params.metric = distance::DistanceType::L2Expanded; x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_SUBSPACE; x.index_params.pq_dim = 2; x.index_params.pq_bits = 8; x.index_params.n_lists = 69; x.search_params.n_probes = 69; }); ADD_CASE({ x.num_db_vecs = 4335; x.dim = 4; x.num_queries = 100000; x.k = 12; x.index_params.metric = distance::DistanceType::L2Expanded; x.index_params.codebook_kind = ivf_pq::codebook_gen::PER_CLUSTER; x.index_params.pq_dim = 2; x.index_params.pq_bits = 8; x.index_params.n_lists = 69; x.search_params.n_probes = 69; }); return xs; } /* Test instantiations */ #define TEST_BUILD_SEARCH(type) \ TEST_P(type, build_search) /* NOLINT */ \ { \ this->run([this]() { return this->build_only(); }); \ } #define TEST_BUILD_EXTEND_SEARCH(type) \ TEST_P(type, build_extend_search) /* NOLINT */ \ { \ this->run([this]() { return this->build_2_extends(); }); \ } #define TEST_BUILD_SERIALIZE_SEARCH(type) \ TEST_P(type, build_serialize_search) /* NOLINT */ \ { \ this->run([this]() { return this->build_serialize(); }); \ } #define INSTANTIATE(type, vals) \ INSTANTIATE_TEST_SUITE_P(IvfPq, type, ::testing::ValuesIn(vals)); /* NOLINT */ } // namespace raft::neighbors::ivf_pq
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/neighbors/knn.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/device_mdspan.hpp> #include <raft/core/logger.hpp> #include <raft/distance/distance_types.hpp> #include <raft/neighbors/brute_force.cuh> #include <rmm/device_buffer.hpp> #include <gtest/gtest.h> #include <cstddef> #include <iostream> #include <vector> namespace raft::neighbors::brute_force { struct KNNInputs { std::vector<std::vector<float>> input; int k; std::vector<int> labels; }; template <typename IdxT> RAFT_KERNEL build_actual_output( int* output, int n_rows, int k, const int* idx_labels, const IdxT* indices) { int element = threadIdx.x + blockDim.x * blockIdx.x; if (element >= n_rows * k) return; output[element] = idx_labels[indices[element]]; } RAFT_KERNEL build_expected_output(int* output, int n_rows, int k, const int* labels) { int row = threadIdx.x + blockDim.x * blockIdx.x; if (row >= n_rows) return; int cur_label = labels[row]; for (int i = 0; i < k; i++) { output[row * k + i] = cur_label; } } template <typename T, typename IdxT> class KNNTest : public ::testing::TestWithParam<KNNInputs> { public: KNNTest() : params_(::testing::TestWithParam<KNNInputs>::GetParam()), stream(resource::get_cuda_stream(handle)), actual_labels_(0, stream), expected_labels_(0, stream), input_(0, stream), search_data_(0, stream), indices_(0, stream), distances_(0, stream), search_labels_(0, stream) { } protected: void testBruteForce() { // #if (RAFT_ACTIVE_LEVEL >= RAFT_LEVEL_DEBUG) raft::print_device_vector("Input array: ", input_.data(), rows_ * cols_, std::cout); std::cout << "K: " << k_ << std::endl; raft::print_device_vector("Labels array: ", search_labels_.data(), rows_, std::cout); // #endif std::vector<device_matrix_view<const T, IdxT, row_major>> index = { make_device_matrix_view((const T*)(input_.data()), rows_, cols_)}; auto search = raft::make_device_matrix_view<const T, IdxT, row_major>( (const T*)(search_data_.data()), rows_, cols_); auto indices = raft::make_device_matrix_view<IdxT, IdxT, row_major>(indices_.data(), rows_, k_); auto distances = raft::make_device_matrix_view<T, IdxT, row_major>(distances_.data(), rows_, k_); auto metric = raft::distance::DistanceType::L2Unexpanded; knn(handle, index, search, indices, distances, metric, std::make_optional<IdxT>(0)); build_actual_output<<<raft::ceildiv(rows_ * k_, 32), 32, 0, stream>>>( actual_labels_.data(), rows_, k_, search_labels_.data(), indices_.data()); build_expected_output<<<raft::ceildiv(rows_ * k_, 32), 32, 0, stream>>>( expected_labels_.data(), rows_, k_, search_labels_.data()); ASSERT_TRUE(devArrMatch( expected_labels_.data(), actual_labels_.data(), rows_ * k_, raft::Compare<int>(), stream)); } void SetUp() override { rows_ = params_.input.size(); cols_ = params_.input[0].size(); k_ = params_.k; actual_labels_.resize(rows_ * k_, stream); expected_labels_.resize(rows_ * k_, stream); input_.resize(rows_ * cols_, stream); search_data_.resize(rows_ * cols_, stream); indices_.resize(rows_ * k_, stream); distances_.resize(rows_ * k_, stream); search_labels_.resize(rows_, stream); RAFT_CUDA_TRY( cudaMemsetAsync(actual_labels_.data(), 0, actual_labels_.size() * sizeof(int), stream)); RAFT_CUDA_TRY( cudaMemsetAsync(expected_labels_.data(), 0, expected_labels_.size() * sizeof(int), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(input_.data(), 0, input_.size() * sizeof(float), stream)); RAFT_CUDA_TRY( cudaMemsetAsync(search_data_.data(), 0, search_data_.size() * sizeof(float), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(indices_.data(), 0, indices_.size() * sizeof(IdxT), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(distances_.data(), 0, distances_.size() * sizeof(float), stream)); RAFT_CUDA_TRY( cudaMemsetAsync(search_labels_.data(), 0, search_labels_.size() * sizeof(int), stream)); std::vector<float> row_major_input; for (std::size_t i = 0; i < params_.input.size(); ++i) { for (std::size_t j = 0; j < params_.input[i].size(); ++j) { row_major_input.push_back(params_.input[i][j]); } } rmm::device_buffer input_d = rmm::device_buffer(row_major_input.data(), row_major_input.size() * sizeof(float), stream); float* input_ptr = static_cast<float*>(input_d.data()); rmm::device_buffer labels_d = rmm::device_buffer(params_.labels.data(), params_.labels.size() * sizeof(int), stream); int* labels_ptr = static_cast<int*>(labels_d.data()); raft::copy(input_.data(), input_ptr, rows_ * cols_, stream); raft::copy(search_data_.data(), input_ptr, rows_ * cols_, stream); raft::copy(search_labels_.data(), labels_ptr, rows_, stream); resource::sync_stream(handle, stream); } private: raft::resources handle; cudaStream_t stream; KNNInputs params_; int rows_; int cols_; rmm::device_uvector<float> input_; rmm::device_uvector<float> search_data_; rmm::device_uvector<IdxT> indices_; rmm::device_uvector<float> distances_; int k_; rmm::device_uvector<int> search_labels_; rmm::device_uvector<int> actual_labels_; rmm::device_uvector<int> expected_labels_; }; const std::vector<KNNInputs> inputs = { // 2D {{ {2.7810836, 2.550537003}, {1.465489372, 2.362125076}, {3.396561688, 4.400293529}, {1.38807019, 1.850220317}, {3.06407232, 3.005305973}, {7.627531214, 2.759262235}, {5.332441248, 2.088626775}, {6.922596716, 1.77106367}, {8.675418651, -0.242068655}, {7.673756466, 3.508563011}, }, 2, {0, 0, 0, 0, 0, 1, 1, 1, 1, 1}}}; typedef KNNTest<float, int> KNNTestFint32_t; TEST_P(KNNTestFint32_t, BruteForce) { this->testBruteForce(); } typedef KNNTest<float, uint32_t> KNNTestFuint32_t; TEST_P(KNNTestFuint32_t, BruteForce) { this->testBruteForce(); } INSTANTIATE_TEST_CASE_P(KNNTest, KNNTestFint32_t, ::testing::ValuesIn(inputs)); INSTANTIATE_TEST_CASE_P(KNNTest, KNNTestFuint32_t, ::testing::ValuesIn(inputs)); } // namespace raft::neighbors::brute_force
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/neighbors/ann_utils.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/device_mdarray.hpp> // raft::make_device_matrix #include <raft/core/resource/cuda_stream.hpp> #include <raft/distance/distance_types.hpp> #include <raft/matrix/copy.cuh> #include <raft/matrix/detail/select_k.cuh> #include <raft/spatial/knn/detail/ann_utils.cuh> #include <raft/util/cuda_utils.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/mr/device/device_memory_resource.hpp> #include <raft_internal/neighbors/naive_knn.cuh> #include "../test_utils.cuh" #include <gtest/gtest.h> #include <iostream> namespace raft::neighbors { struct print_dtype { cudaDataType_t value; }; inline auto operator<<(std::ostream& os, const print_dtype& p) -> std::ostream& { switch (p.value) { case CUDA_R_16F: os << "CUDA_R_16F"; break; case CUDA_C_16F: os << "CUDA_C_16F"; break; case CUDA_R_16BF: os << "CUDA_R_16BF"; break; case CUDA_C_16BF: os << "CUDA_C_16BF"; break; case CUDA_R_32F: os << "CUDA_R_32F"; break; case CUDA_C_32F: os << "CUDA_C_32F"; break; case CUDA_R_64F: os << "CUDA_R_64F"; break; case CUDA_C_64F: os << "CUDA_C_64F"; break; case CUDA_R_4I: os << "CUDA_R_4I"; break; case CUDA_C_4I: os << "CUDA_C_4I"; break; case CUDA_R_4U: os << "CUDA_R_4U"; break; case CUDA_C_4U: os << "CUDA_C_4U"; break; case CUDA_R_8I: os << "CUDA_R_8I"; break; case CUDA_C_8I: os << "CUDA_C_8I"; break; case CUDA_R_8U: os << "CUDA_R_8U"; break; case CUDA_C_8U: os << "CUDA_C_8U"; break; case CUDA_R_16I: os << "CUDA_R_16I"; break; case CUDA_C_16I: os << "CUDA_C_16I"; break; case CUDA_R_16U: os << "CUDA_R_16U"; break; case CUDA_C_16U: os << "CUDA_C_16U"; break; case CUDA_R_32I: os << "CUDA_R_32I"; break; case CUDA_C_32I: os << "CUDA_C_32I"; break; case CUDA_R_32U: os << "CUDA_R_32U"; break; case CUDA_C_32U: os << "CUDA_C_32U"; break; case CUDA_R_64I: os << "CUDA_R_64I"; break; case CUDA_C_64I: os << "CUDA_C_64I"; break; case CUDA_R_64U: os << "CUDA_R_64U"; break; case CUDA_C_64U: os << "CUDA_C_64U"; break; default: RAFT_FAIL("unreachable code"); } return os; } struct print_metric { raft::distance::DistanceType value; }; inline auto operator<<(std::ostream& os, const print_metric& p) -> std::ostream& { switch (p.value) { case raft::distance::L2Expanded: os << "distance::L2Expanded"; break; case raft::distance::L2SqrtExpanded: os << "distance::L2SqrtExpanded"; break; case raft::distance::CosineExpanded: os << "distance::CosineExpanded"; break; case raft::distance::L1: os << "distance::L1"; break; case raft::distance::L2Unexpanded: os << "distance::L2Unexpanded"; break; case raft::distance::L2SqrtUnexpanded: os << "distance::L2SqrtUnexpanded"; break; case raft::distance::InnerProduct: os << "distance::InnerProduct"; break; case raft::distance::Linf: os << "distance::Linf"; break; case raft::distance::Canberra: os << "distance::Canberra"; break; case raft::distance::LpUnexpanded: os << "distance::LpUnexpanded"; break; case raft::distance::CorrelationExpanded: os << "distance::CorrelationExpanded"; break; case raft::distance::JaccardExpanded: os << "distance::JaccardExpanded"; break; case raft::distance::HellingerExpanded: os << "distance::HellingerExpanded"; break; case raft::distance::Haversine: os << "distance::Haversine"; break; case raft::distance::BrayCurtis: os << "distance::BrayCurtis"; break; case raft::distance::JensenShannon: os << "distance::JensenShannon"; break; case raft::distance::HammingUnexpanded: os << "distance::HammingUnexpanded"; break; case raft::distance::KLDivergence: os << "distance::KLDivergence"; break; case raft::distance::RusselRaoExpanded: os << "distance::RusselRaoExpanded"; break; case raft::distance::DiceExpanded: os << "distance::DiceExpanded"; break; case raft::distance::Precomputed: os << "distance::Precomputed"; break; default: RAFT_FAIL("unreachable code"); } return os; } template <typename IdxT, typename DistT, typename CompareDist> struct idx_dist_pair { IdxT idx; DistT dist; CompareDist eq_compare; auto operator==(const idx_dist_pair<IdxT, DistT, CompareDist>& a) const -> bool { if (idx == a.idx) return true; if (eq_compare(dist, a.dist)) return true; return false; } idx_dist_pair(IdxT x, DistT y, CompareDist op) : idx(x), dist(y), eq_compare(op) {} }; /** Calculate recall value using only neighbor indices */ template <typename T> auto calc_recall(const std::vector<T>& expected_idx, const std::vector<T>& actual_idx, size_t rows, size_t cols) { size_t match_count = 0; size_t total_count = static_cast<size_t>(rows) * static_cast<size_t>(cols); for (size_t i = 0; i < rows; ++i) { for (size_t k = 0; k < cols; ++k) { size_t idx_k = i * cols + k; // row major assumption! auto act_idx = actual_idx[idx_k]; for (size_t j = 0; j < cols; ++j) { size_t idx = i * cols + j; // row major assumption! auto exp_idx = expected_idx[idx]; if (act_idx == exp_idx) { match_count++; break; } } } } return std::make_tuple( static_cast<double>(match_count) / static_cast<double>(total_count), match_count, total_count); } template <typename T> auto eval_recall(const std::vector<T>& expected_idx, const std::vector<T>& actual_idx, size_t rows, size_t cols, double eps, double min_recall) -> testing::AssertionResult { auto [actual_recall, match_count, total_count] = calc_recall(expected_idx, actual_idx, rows, cols); double error_margin = (actual_recall - min_recall) / std::max(1.0 - min_recall, eps); RAFT_LOG_INFO("Recall = %f (%zu/%zu), the error is %2.1f%% %s the threshold (eps = %f).", actual_recall, match_count, total_count, std::abs(error_margin * 100.0), error_margin < 0 ? "above" : "below", eps); if (actual_recall < min_recall - eps) { return testing::AssertionFailure() << "actual recall (" << actual_recall << ") is lower than the minimum expected recall (" << min_recall << "); eps = " << eps << ". "; } return testing::AssertionSuccess(); } /** Overload of calc_recall to account for distances */ template <typename T, typename DistT> auto calc_recall(const std::vector<T>& expected_idx, const std::vector<T>& actual_idx, const std::vector<DistT>& expected_dist, const std::vector<DistT>& actual_dist, size_t rows, size_t cols, double eps) { size_t match_count = 0; size_t total_count = static_cast<size_t>(rows) * static_cast<size_t>(cols); for (size_t i = 0; i < rows; ++i) { for (size_t k = 0; k < cols; ++k) { size_t idx_k = i * cols + k; // row major assumption! auto act_idx = actual_idx[idx_k]; auto act_dist = actual_dist[idx_k]; for (size_t j = 0; j < cols; ++j) { size_t idx = i * cols + j; // row major assumption! auto exp_idx = expected_idx[idx]; auto exp_dist = expected_dist[idx]; idx_dist_pair exp_kvp(exp_idx, exp_dist, raft::CompareApprox<DistT>(eps)); idx_dist_pair act_kvp(act_idx, act_dist, raft::CompareApprox<DistT>(eps)); if (exp_kvp == act_kvp) { match_count++; break; } } } } return std::make_tuple( static_cast<double>(match_count) / static_cast<double>(total_count), match_count, total_count); } /** same as eval_recall, but in case indices do not match, * then check distances as well, and accept match if actual dist is equal to expected_dist */ template <typename T, typename DistT> auto eval_neighbours(const std::vector<T>& expected_idx, const std::vector<T>& actual_idx, const std::vector<DistT>& expected_dist, const std::vector<DistT>& actual_dist, size_t rows, size_t cols, double eps, double min_recall) -> testing::AssertionResult { auto [actual_recall, match_count, total_count] = calc_recall(expected_idx, actual_idx, expected_dist, actual_dist, rows, cols, eps); double error_margin = (actual_recall - min_recall) / std::max(1.0 - min_recall, eps); RAFT_LOG_INFO("Recall = %f (%zu/%zu), the error is %2.1f%% %s the threshold (eps = %f).", actual_recall, match_count, total_count, std::abs(error_margin * 100.0), error_margin < 0 ? "above" : "below", eps); if (actual_recall < min_recall - eps) { return testing::AssertionFailure() << "actual recall (" << actual_recall << ") is lower than the minimum expected recall (" << min_recall << "); eps = " << eps << ". "; } return testing::AssertionSuccess(); } template <typename T, typename DistT, typename IdxT> auto eval_distances(raft::resources const& handle, const T* x, // dataset, n_rows * n_cols const T* queries, // n_queries * n_cols const IdxT* neighbors, // n_queries * k const DistT* distances, // n_queries *k size_t n_rows, size_t n_cols, size_t n_queries, uint32_t k, raft::distance::DistanceType metric, double eps) -> testing::AssertionResult { // for each vector, we calculate the actual distance to the k neighbors for (size_t i = 0; i < n_queries; i++) { auto y = raft::make_device_matrix<T, IdxT>(handle, k, n_cols); auto naive_dist = raft::make_device_matrix<DistT, IdxT>(handle, 1, k); raft::matrix::copy_rows<T, IdxT>( handle, make_device_matrix_view<const T, IdxT>(x, k, n_cols), y.view(), make_device_vector_view<const IdxT, IdxT>(neighbors + i * k, k)); dim3 block_dim(16, 32, 1); auto grid_y = static_cast<uint16_t>(std::min<size_t>(raft::ceildiv<size_t>(k, block_dim.y), 32768)); dim3 grid_dim(raft::ceildiv<size_t>(n_rows, block_dim.x), grid_y, 1); naive_distance_kernel<DistT, T, IdxT> <<<grid_dim, block_dim, 0, resource::get_cuda_stream(handle)>>>( naive_dist.data_handle(), queries + i * n_cols, y.data_handle(), 1, k, n_cols, metric); if (!devArrMatch(distances + i * k, naive_dist.data_handle(), naive_dist.size(), CompareApprox<float>(eps))) { std::cout << n_rows << "x" << n_cols << ", " << k << std::endl; std::cout << "query " << i << std::endl; print_vector(" indices", neighbors + i * k, k, std::cout); print_vector("n dist", distances + i * k, k, std::cout); print_vector("c dist", naive_dist.data_handle(), naive_dist.size(), std::cout); return testing::AssertionFailure(); } } return testing::AssertionSuccess(); } } // namespace raft::neighbors
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/neighbors/tiled_knn.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "./ann_utils.cuh" #include "./knn_utils.cuh" #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/device_mdspan.hpp> #include <raft/core/logger.hpp> #include <raft/distance/distance.cuh> // raft::distance::pairwise_distance #include <raft/distance/distance_types.hpp> #include <raft/linalg/transpose.cuh> #include <raft/matrix/init.cuh> #include <raft/neighbors/brute_force.cuh> #include <raft/neighbors/detail/knn_brute_force.cuh> // raft::neighbors::detail::brute_force_knn_impl #include <raft/neighbors/detail/selection_faiss.cuh> // raft::neighbors::detail::select_k #include <rmm/device_buffer.hpp> #include <gtest/gtest.h> #include <cstddef> #include <iostream> #include <vector> namespace raft::neighbors::brute_force { struct TiledKNNInputs { int num_queries; int num_db_vecs; int dim; int k; int row_tiles; int col_tiles; raft::distance::DistanceType metric; bool row_major; }; std::ostream& operator<<(std::ostream& os, const TiledKNNInputs& input) { return os << "num_queries:" << input.num_queries << " num_vecs:" << input.num_db_vecs << " dim:" << input.dim << " k:" << input.k << " row_tiles:" << input.row_tiles << " col_tiles:" << input.col_tiles << " metric:" << print_metric{input.metric} << " row_major:" << input.row_major; } template <typename T> class TiledKNNTest : public ::testing::TestWithParam<TiledKNNInputs> { public: TiledKNNTest() : stream_(resource::get_cuda_stream(handle_)), params_(::testing::TestWithParam<TiledKNNInputs>::GetParam()), database(params_.num_db_vecs * params_.dim, stream_), search_queries(params_.num_queries * params_.dim, stream_), raft_indices_(params_.num_queries * params_.k, stream_), raft_distances_(params_.num_queries * params_.k, stream_), ref_indices_(params_.num_queries * params_.k, stream_), ref_distances_(params_.num_queries * params_.k, stream_) { raft::matrix::fill( handle_, raft::make_device_matrix_view(database.data(), params_.num_db_vecs, params_.dim), T{0.0}); raft::matrix::fill( handle_, raft::make_device_matrix_view(search_queries.data(), params_.num_queries, params_.dim), T{0.0}); raft::matrix::fill( handle_, raft::make_device_matrix_view(raft_indices_.data(), params_.num_queries, params_.k), 0); raft::matrix::fill( handle_, raft::make_device_matrix_view(raft_distances_.data(), params_.num_queries, params_.k), T{0.0}); raft::matrix::fill( handle_, raft::make_device_matrix_view(ref_indices_.data(), params_.num_queries, params_.k), 0); raft::matrix::fill( handle_, raft::make_device_matrix_view(ref_distances_.data(), params_.num_queries, params_.k), T{0.0}); } protected: void testBruteForce() { float metric_arg = 3.0; // calculate the naive knn, by calculating the full pairwise distances and doing a k-select rmm::device_uvector<T> temp_distances(num_db_vecs * num_queries, stream_); rmm::device_uvector<char> workspace(0, stream_); distance::pairwise_distance(handle_, search_queries.data(), database.data(), temp_distances.data(), num_queries, num_db_vecs, dim, workspace, metric, params_.row_major, metric_arg); // setting the 'isRowMajor' flag in the pairwise distances api, not only sets // the inputs as colmajor - but also the output. this means we have to transpose in this // case auto temp_dist = temp_distances.data(); rmm::device_uvector<T> temp_row_major_dist(num_db_vecs * num_queries, stream_); if (!params_.row_major) { raft::linalg::transpose( handle_, temp_dist, temp_row_major_dist.data(), num_queries, num_db_vecs, stream_); temp_dist = temp_row_major_dist.data(); } raft::neighbors::detail::select_k<int, T>(temp_dist, nullptr, num_queries, num_db_vecs, ref_distances_.data(), ref_indices_.data(), raft::distance::is_min_close(metric), k_, stream_); if ((params_.row_tiles == 0) && (params_.col_tiles == 0)) { std::vector<T*> input{database.data()}; std::vector<size_t> sizes{static_cast<size_t>(num_db_vecs)}; neighbors::detail::brute_force_knn_impl<size_t, int, T>(handle_, input, sizes, dim, const_cast<T*>(search_queries.data()), num_queries, raft_indices_.data(), raft_distances_.data(), k_, params_.row_major, params_.row_major, nullptr, metric, metric_arg); } else { neighbors::detail::tiled_brute_force_knn(handle_, search_queries.data(), database.data(), num_queries, num_db_vecs, dim, k_, raft_distances_.data(), raft_indices_.data(), metric, metric_arg, params_.row_tiles, params_.col_tiles); } // verify. ASSERT_TRUE(raft::spatial::knn::devArrMatchKnnPair(ref_indices_.data(), raft_indices_.data(), ref_distances_.data(), raft_distances_.data(), num_queries, k_, float(0.001), stream_, true)); // Also test out the 'index' api - where we can use precomputed norms if (params_.row_major) { auto idx = raft::neighbors::brute_force::build<T>(handle_, raft::make_device_matrix_view<const T, int64_t>( database.data(), params_.num_db_vecs, params_.dim), metric, metric_arg); auto query_view = raft::make_device_matrix_view<const T, int64_t>( search_queries.data(), params_.num_queries, params_.dim); raft::neighbors::brute_force::search<T, int>( handle_, idx, query_view, raft::make_device_matrix_view<int, int64_t>( raft_indices_.data(), params_.num_queries, params_.k), raft::make_device_matrix_view<T, int64_t>( raft_distances_.data(), params_.num_queries, params_.k)); ASSERT_TRUE(raft::spatial::knn::devArrMatchKnnPair(ref_indices_.data(), raft_indices_.data(), ref_distances_.data(), raft_distances_.data(), num_queries, k_, float(0.001), stream_, true)); // also test out the batch api. First get new reference results (all k, up to a certain // max size) auto all_size = std::min(params_.num_db_vecs, 1024); auto all_indices = raft::make_device_matrix<int, int64_t>(handle_, num_queries, all_size); auto all_distances = raft::make_device_matrix<T, int64_t>(handle_, num_queries, all_size); raft::neighbors::brute_force::search<T, int>( handle_, idx, query_view, all_indices.view(), all_distances.view()); int64_t offset = 0; auto query = make_batch_k_query<T, int>(handle_, idx, query_view, k_); for (auto batch : *query) { auto batch_size = batch.batch_size(); auto indices = raft::make_device_matrix<int, int64_t>(handle_, num_queries, batch_size); auto distances = raft::make_device_matrix<T, int64_t>(handle_, num_queries, batch_size); matrix::slice_coordinates<int64_t> coords{0, offset, num_queries, offset + batch_size}; matrix::slice(handle_, raft::make_const_mdspan(all_indices.view()), indices.view(), coords); matrix::slice( handle_, raft::make_const_mdspan(all_distances.view()), distances.view(), coords); ASSERT_TRUE(raft::spatial::knn::devArrMatchKnnPair(indices.data_handle(), batch.indices().data_handle(), distances.data_handle(), batch.distances().data_handle(), num_queries, batch_size, float(0.001), stream_, true)); offset += batch_size; if (offset + batch_size > all_size) break; } // also test out with variable batch sizes offset = 0; int64_t batch_size = k_; query = make_batch_k_query<T, int>(handle_, idx, query_view, batch_size); for (auto it = query->begin(); it != query->end(); it.advance(batch_size)) { // batch_size could be less than requested (in the case of final batch). handle. ASSERT_TRUE(it->indices().extent(1) <= batch_size); batch_size = it->indices().extent(1); auto indices = raft::make_device_matrix<int, int64_t>(handle_, num_queries, batch_size); auto distances = raft::make_device_matrix<T, int64_t>(handle_, num_queries, batch_size); matrix::slice_coordinates<int64_t> coords{0, offset, num_queries, offset + batch_size}; matrix::slice(handle_, raft::make_const_mdspan(all_indices.view()), indices.view(), coords); matrix::slice( handle_, raft::make_const_mdspan(all_distances.view()), distances.view(), coords); ASSERT_TRUE(raft::spatial::knn::devArrMatchKnnPair(indices.data_handle(), it->indices().data_handle(), distances.data_handle(), it->distances().data_handle(), num_queries, batch_size, float(0.001), stream_, true)); offset += batch_size; if (offset + batch_size > all_size) break; batch_size += 2; } } } void SetUp() override { num_queries = params_.num_queries; num_db_vecs = params_.num_db_vecs; dim = params_.dim; k_ = params_.k; metric = params_.metric; unsigned long long int seed = 1234ULL; raft::random::RngState r(seed); // JensenShannon distance requires positive values T min_val = metric == raft::distance::DistanceType::JensenShannon ? T(0.0) : T(-1.0); uniform(handle_, r, database.data(), num_db_vecs * dim, min_val, T(1.0)); uniform(handle_, r, search_queries.data(), num_queries * dim, min_val, T(1.0)); } private: raft::resources handle_; cudaStream_t stream_ = 0; TiledKNNInputs params_; int num_queries; int num_db_vecs; int dim; rmm::device_uvector<T> database; rmm::device_uvector<T> search_queries; rmm::device_uvector<int> raft_indices_; rmm::device_uvector<T> raft_distances_; rmm::device_uvector<int> ref_indices_; rmm::device_uvector<T> ref_distances_; int k_; raft::distance::DistanceType metric; }; const std::vector<TiledKNNInputs> random_inputs = { {256, 512, 16, 8, 16, 8, raft::distance::DistanceType::L2Expanded, true}, {256, 512, 16, 8, 16, 8, raft::distance::DistanceType::L2Unexpanded, true}, {256, 512, 16, 8, 16, 8, raft::distance::DistanceType::L2SqrtExpanded, true}, {256, 512, 16, 8, 16, 8, raft::distance::DistanceType::L2SqrtUnexpanded, true}, {256, 512, 16, 8, 16, 8, raft::distance::DistanceType::L1, true}, {256, 512, 16, 8, 16, 8, raft::distance::DistanceType::Linf, true}, {256, 512, 16, 8, 16, 8, raft::distance::DistanceType::InnerProduct, true}, {256, 512, 16, 8, 16, 8, raft::distance::DistanceType::CorrelationExpanded, true}, {256, 512, 16, 8, 16, 8, raft::distance::DistanceType::CosineExpanded, true}, {256, 512, 16, 8, 16, 8, raft::distance::DistanceType::LpUnexpanded, true}, {256, 512, 16, 8, 16, 8, raft::distance::DistanceType::JensenShannon, true}, {256, 512, 16, 8, 16, 8, raft::distance::DistanceType::L2SqrtExpanded, true}, // BrayCurtis isn't currently supported by pairwise_distance api // {256, 512, 16, 8, 16, 8, raft::distance::DistanceType::BrayCurtis}, {256, 512, 16, 8, 16, 8, raft::distance::DistanceType::Canberra, true}, {10000, 40000, 32, 30, 512, 1024, raft::distance::DistanceType::L2Expanded, true}, {345, 1023, 16, 128, 512, 1024, raft::distance::DistanceType::CosineExpanded, true}, {789, 20516, 64, 256, 512, 4096, raft::distance::DistanceType::L2SqrtExpanded, true}, // Test where the final column tile has < K items: {4, 12, 32, 6, 4, 8, raft::distance::DistanceType::L2Expanded, true}, // Test where passing column_tiles < K {1, 40, 32, 30, 1, 8, raft::distance::DistanceType::L2Expanded, true}, // Passing tile sizes of 0 means to use brute_force_knn_impl (instead of the // tiled_brute_force_knn api). {1000, 500000, 128, 128, 0, 0, raft::distance::DistanceType::L2Expanded, true}, {1000, 500000, 128, 128, 0, 0, raft::distance::DistanceType::L2Expanded, false}, {1000, 5000, 128, 128, 0, 0, raft::distance::DistanceType::LpUnexpanded, true}, {1000, 5000, 128, 128, 0, 0, raft::distance::DistanceType::L2SqrtExpanded, false}, {1000, 5000, 128, 128, 0, 0, raft::distance::DistanceType::InnerProduct, false}}; typedef TiledKNNTest<float> TiledKNNTestF; TEST_P(TiledKNNTestF, BruteForce) { this->testBruteForce(); } INSTANTIATE_TEST_CASE_P(TiledKNNTest, TiledKNNTestF, ::testing::ValuesIn(random_inputs)); } // namespace raft::neighbors::brute_force
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/neighbors/spatial_data.h
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <vector> namespace raft { namespace spatial { // Latitude and longitude coordinates of 51 US states / territories std::vector<float> spatial_data = { 63.588753, -154.493062, 32.318231, -86.902298, 35.20105, -91.831833, 34.048928, -111.093731, 36.778261, -119.417932, 39.550051, -105.782067, 41.603221, -73.087749, 38.905985, -77.033418, 38.910832, -75.52767, 27.664827, -81.515754, 32.157435, -82.907123, 19.898682, -155.665857, 41.878003, -93.097702, 44.068202, -114.742041, 40.633125, -89.398528, 40.551217, -85.602364, 39.011902, -98.484246, 37.839333, -84.270018, 31.244823, -92.145024, 42.407211, -71.382437, 39.045755, -76.641271, 45.253783, -69.445469, 44.314844, -85.602364, 46.729553, -94.6859, 37.964253, -91.831833, 32.354668, -89.398528, 46.879682, -110.362566, 35.759573, -79.0193, 47.551493, -101.002012, 41.492537, -99.901813, 43.193852, -71.572395, 40.058324, -74.405661, 34.97273, -105.032363, 38.80261, -116.419389, 43.299428, -74.217933, 40.417287, -82.907123, 35.007752, -97.092877, 43.804133, -120.554201, 41.203322, -77.194525, 18.220833, -66.590149, 41.580095, -71.477429, 33.836081, -81.163725, 43.969515, -99.901813, 35.517491, -86.580447, 31.968599, -99.901813, 39.32098, -111.093731, 37.431573, -78.656894, 44.558803, -72.577841, 47.751074, -120.740139, 43.78444, -88.787868, 38.597626, -80.454903, 43.075968, -107.290284}; }; // namespace spatial }; // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/neighbors/knn_utils.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <memory> #include "../test_utils.cuh" #include <gtest/gtest.h> #include <raft/util/cudart_utils.hpp> namespace raft::spatial::knn { template <typename IdxT, typename DistT, typename compareDist> struct idx_dist_pair { IdxT idx; DistT dist; compareDist eq_compare; bool operator==(const idx_dist_pair<IdxT, DistT, compareDist>& a) const { if (idx == a.idx) return true; if (eq_compare(dist, a.dist)) return true; return false; } idx_dist_pair(IdxT x, DistT y, compareDist op) : idx(x), dist(y), eq_compare(op) {} }; template <typename T, typename DistT> testing::AssertionResult devArrMatchKnnPair(const T* expected_idx, const T* actual_idx, const DistT* expected_dist, const DistT* actual_dist, size_t rows, size_t cols, const DistT eps, cudaStream_t stream = 0, bool sort_inputs = false) { size_t size = rows * cols; std::unique_ptr<T[]> exp_idx_h(new T[size]); std::unique_ptr<T[]> act_idx_h(new T[size]); std::unique_ptr<DistT[]> exp_dist_h(new DistT[size]); std::unique_ptr<DistT[]> act_dist_h(new DistT[size]); raft::update_host<T>(exp_idx_h.get(), expected_idx, size, stream); raft::update_host<T>(act_idx_h.get(), actual_idx, size, stream); raft::update_host<DistT>(exp_dist_h.get(), expected_dist, size, stream); raft::update_host<DistT>(act_dist_h.get(), actual_dist, size, stream); RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); for (size_t i(0); i < rows; ++i) { std::vector<std::pair<DistT, T>> actual; std::vector<std::pair<DistT, T>> expected; for (size_t j(0); j < cols; ++j) { auto idx = i * cols + j; // row major assumption! auto exp_idx = exp_idx_h.get()[idx]; auto act_idx = act_idx_h.get()[idx]; auto exp_dist = exp_dist_h.get()[idx]; auto act_dist = act_dist_h.get()[idx]; actual.push_back(std::make_pair(act_dist, act_idx)); expected.push_back(std::make_pair(exp_dist, exp_idx)); } if (sort_inputs) { // inputs could be unsorted here, sort for comparison std::sort(actual.begin(), actual.end()); std::sort(expected.begin(), expected.end()); } for (size_t j(0); j < cols; ++j) { auto act = actual[j]; auto exp = expected[j]; idx_dist_pair exp_kvp(exp.second, exp.first, raft::CompareApprox<DistT>(eps)); idx_dist_pair act_kvp(act.second, act.first, raft::CompareApprox<DistT>(eps)); if (!(exp_kvp == act_kvp)) { return testing::AssertionFailure() << "actual=" << act_kvp.idx << "," << act_kvp.dist << "!=" << "expected" << exp_kvp.idx << "," << exp_kvp.dist << " @" << i << "," << j; } } } return testing::AssertionSuccess(); } } // namespace raft::spatial::knn
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/neighbors/epsilon_neighborhood.cu
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <memory> #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/random/make_blobs.cuh> #include <raft/spatial/knn/epsilon_neighborhood.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> namespace raft { namespace spatial { namespace knn { template <typename T, typename IdxT> struct EpsInputs { IdxT n_row, n_col, n_centers, n_batches; T eps; }; template <typename T, typename IdxT> ::std::ostream& operator<<(::std::ostream& os, const EpsInputs<T, IdxT>& p) { return os; } template <typename T, typename IdxT> class EpsNeighTest : public ::testing::TestWithParam<EpsInputs<T, IdxT>> { protected: EpsNeighTest() : data(0, resource::get_cuda_stream(handle)), adj(0, resource::get_cuda_stream(handle)), labels(0, resource::get_cuda_stream(handle)), vd(0, resource::get_cuda_stream(handle)) { } void SetUp() override { auto stream = resource::get_cuda_stream(handle); param = ::testing::TestWithParam<EpsInputs<T, IdxT>>::GetParam(); data.resize(param.n_row * param.n_col, stream); labels.resize(param.n_row, stream); batchSize = param.n_row / param.n_batches; adj.resize(param.n_row * batchSize, stream); vd.resize(batchSize + 1, stream); RAFT_CUDA_TRY(cudaMemsetAsync(vd.data(), 0, vd.size() * sizeof(IdxT), stream)); random::make_blobs<T, IdxT>(data.data(), labels.data(), param.n_row, param.n_col, param.n_centers, stream, true, nullptr, nullptr, T(0.01), false); } const raft::resources handle; EpsInputs<T, IdxT> param; cudaStream_t stream = 0; rmm::device_uvector<T> data; rmm::device_uvector<bool> adj; rmm::device_uvector<IdxT> labels, vd; IdxT batchSize; }; // class EpsNeighTest const std::vector<EpsInputs<float, int>> inputsfi = { {15000, 16, 5, 1, 2.f}, {14000, 16, 5, 1, 2.f}, {15000, 17, 5, 1, 2.f}, {14000, 17, 5, 1, 2.f}, {15000, 18, 5, 1, 2.f}, {14000, 18, 5, 1, 2.f}, {15000, 32, 5, 1, 2.f}, {14000, 32, 5, 1, 2.f}, {20000, 10000, 10, 1, 2.f}, {20000, 10000, 10, 2, 2.f}, }; typedef EpsNeighTest<float, int> EpsNeighTestFI; TEST_P(EpsNeighTestFI, Result) { for (int i = 0; i < param.n_batches; ++i) { RAFT_CUDA_TRY(cudaMemsetAsync(adj.data(), 0, sizeof(bool) * param.n_row * batchSize, stream)); RAFT_CUDA_TRY(cudaMemsetAsync(vd.data(), 0, sizeof(int) * (batchSize + 1), stream)); auto adj_view = make_device_matrix_view<bool, int>(adj.data(), param.n_row, batchSize); auto vd_view = make_device_vector_view<int, int>(vd.data(), batchSize + 1); auto x_view = make_device_matrix_view<float, int>(data.data(), param.n_row, param.n_col); auto y_view = make_device_matrix_view<float, int>( data.data() + (i * batchSize * param.n_col), batchSize, param.n_col); eps_neighbors_l2sq<float, int, int>( handle, x_view, y_view, adj_view, vd_view, param.eps * param.eps); ASSERT_TRUE(raft::devArrMatch( param.n_row / param.n_centers, vd.data(), batchSize, raft::Compare<int>(), stream)); } } INSTANTIATE_TEST_CASE_P(EpsNeighTests, EpsNeighTestFI, ::testing::ValuesIn(inputsfi)); }; // namespace knn }; // namespace spatial }; // namespace raft
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/neighbors/selection.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <gtest/gtest.h> #include <numeric> #include <raft/core/resource/cuda_stream.hpp> #include <raft/neighbors/detail/selection_faiss.cuh> #include <raft/neighbors/detail/selection_faiss_helpers.cuh> // kFaissMax #include <raft/random/rng.cuh> #include <raft/util/cudart_utils.hpp> #include "../test_utils.cuh" #include <raft/sparse/detail/utils.h> #include <raft/spatial/knn/knn.cuh> namespace raft::spatial::selection { using namespace raft; using namespace raft::sparse; struct SelectTestSpec { int n_inputs; int input_len; int k; int select_min; bool use_index_input = true; }; std::ostream& operator<<(std::ostream& os, const SelectTestSpec& ss) { os << "spec{size: " << ss.input_len << "*" << ss.n_inputs << ", k: " << ss.k; os << (ss.select_min ? "; min}" : "; max}"); return os; } template <typename IdxT> auto gen_simple_ids(int n_inputs, int input_len, const raft::resources& handle) -> std::vector<IdxT> { std::vector<IdxT> out(n_inputs * input_len); auto s = resource::get_cuda_stream(handle); rmm::device_uvector<IdxT> out_d(out.size(), s); iota_fill(out_d.data(), IdxT(n_inputs), IdxT(input_len), s); update_host(out.data(), out_d.data(), out.size(), s); s.synchronize(); return out; } template <typename KeyT, typename IdxT> struct SelectInOutSimple { public: bool not_supported = false; SelectInOutSimple(std::shared_ptr<raft::resources> handle, const SelectTestSpec& spec, const std::vector<KeyT>& in_dists, const std::vector<KeyT>& out_dists, const std::vector<IdxT>& out_ids) : in_dists_(in_dists), in_ids_(gen_simple_ids<IdxT>(spec.n_inputs, spec.input_len, *handle.get())), out_dists_(out_dists), out_ids_(out_ids), handle_(handle) { } auto get_in_dists() -> std::vector<KeyT>& { return in_dists_; } auto get_in_ids() -> std::vector<IdxT>& { return in_ids_; } auto get_out_dists() -> std::vector<KeyT>& { return out_dists_; } auto get_out_ids() -> std::vector<IdxT>& { return out_ids_; } private: std::shared_ptr<raft::resources> handle_; std::vector<KeyT> in_dists_; std::vector<IdxT> in_ids_; std::vector<KeyT> out_dists_; std::vector<IdxT> out_ids_; }; template <typename KeyT, typename IdxT> struct SelectInOutComputed { public: bool not_supported = false; SelectInOutComputed(std::shared_ptr<raft::resources> handle, const SelectTestSpec& spec, knn::SelectKAlgo algo, const std::vector<KeyT>& in_dists, const std::optional<std::vector<IdxT>>& in_ids = std::nullopt) : handle_(handle), in_dists_(in_dists), in_ids_(in_ids.value_or(gen_simple_ids<IdxT>(spec.n_inputs, spec.input_len, *handle.get()))), out_dists_(spec.n_inputs * spec.k), out_ids_(spec.n_inputs * spec.k) { // check if the size is supported by the algorithm switch (algo) { case knn::SelectKAlgo::WARP_SORT: if (spec.k > raft::matrix::detail::select::warpsort::kMaxCapacity) { not_supported = true; return; } break; case knn::SelectKAlgo::FAISS: if (spec.k > raft::neighbors::detail::kFaissMaxK<IdxT, KeyT>()) { not_supported = true; return; } break; default: break; } auto stream = resource::get_cuda_stream(*handle_); rmm::device_uvector<KeyT> in_dists_d(in_dists_.size(), stream); rmm::device_uvector<IdxT> in_ids_d(in_ids_.size(), stream); rmm::device_uvector<KeyT> out_dists_d(out_dists_.size(), stream); rmm::device_uvector<IdxT> out_ids_d(out_ids_.size(), stream); update_device(in_dists_d.data(), in_dists_.data(), in_dists_.size(), stream); update_device(in_ids_d.data(), in_ids_.data(), in_ids_.size(), stream); raft::spatial::knn::select_k<IdxT, KeyT>(in_dists_d.data(), spec.use_index_input ? in_ids_d.data() : nullptr, spec.n_inputs, spec.input_len, out_dists_d.data(), out_ids_d.data(), spec.select_min, spec.k, stream, algo); update_host(out_dists_.data(), out_dists_d.data(), out_dists_.size(), stream); update_host(out_ids_.data(), out_ids_d.data(), out_ids_.size(), stream); interruptible::synchronize(stream); auto p = topk_sort_permutation(out_dists_, out_ids_, spec.k, spec.select_min); apply_permutation(out_dists_, p); apply_permutation(out_ids_, p); } auto get_in_dists() -> std::vector<KeyT>& { return in_dists_; } auto get_in_ids() -> std::vector<IdxT>& { return in_ids_; } auto get_out_dists() -> std::vector<KeyT>& { return out_dists_; } auto get_out_ids() -> std::vector<IdxT>& { return out_ids_; } private: std::shared_ptr<raft::resources> handle_; std::vector<KeyT> in_dists_; std::vector<IdxT> in_ids_; std::vector<KeyT> out_dists_; std::vector<IdxT> out_ids_; auto topk_sort_permutation(const std::vector<KeyT>& vec, const std::vector<IdxT>& inds, int k, bool select_min) -> std::vector<IdxT> { std::vector<IdxT> p(vec.size()); std::iota(p.begin(), p.end(), 0); if (select_min) { std::sort(p.begin(), p.end(), [&vec, &inds, k](IdxT i, IdxT j) { const IdxT ik = i / k; const IdxT jk = j / k; if (ik == jk) { if (vec[i] == vec[j]) { return inds[i] < inds[j]; } return vec[i] < vec[j]; } return ik < jk; }); } else { std::sort(p.begin(), p.end(), [&vec, &inds, k](IdxT i, IdxT j) { const IdxT ik = i / k; const IdxT jk = j / k; if (ik == jk) { if (vec[i] == vec[j]) { return inds[i] < inds[j]; } return vec[i] > vec[j]; } return ik < jk; }); } return p; } template <typename T> void apply_permutation(std::vector<T>& vec, const std::vector<IdxT>& p) { for (auto i = IdxT(vec.size()) - 1; i > 0; i--) { auto j = p[i]; while (j > i) j = p[j]; std::swap(vec[j], vec[i]); } } }; template <typename InOut> using Params = std::tuple<SelectTestSpec, knn::SelectKAlgo, InOut, std::shared_ptr<raft::resources>>; template <typename KeyT, typename IdxT, template <typename, typename> typename ParamsReader> class SelectionTest : public testing::TestWithParam<typename ParamsReader<KeyT, IdxT>::ParamsIn> { protected: std::shared_ptr<raft::resources> handle_; const SelectTestSpec spec; const knn::SelectKAlgo algo; typename ParamsReader<KeyT, IdxT>::InOut ref; SelectInOutComputed<KeyT, IdxT> res; public: explicit SelectionTest(Params<typename ParamsReader<KeyT, IdxT>::InOut> ps) : handle_(std::get<3>(ps)), spec(std::get<0>(ps)), algo(std::get<1>(ps)), ref(std::get<2>(ps)), res(handle_, spec, algo, ref.get_in_dists(), ref.get_in_ids()) { } explicit SelectionTest(typename ParamsReader<KeyT, IdxT>::ParamsIn ps) : SelectionTest(ParamsReader<KeyT, IdxT>::read(ps)) { } SelectionTest() : SelectionTest(testing::TestWithParam<typename ParamsReader<KeyT, IdxT>::ParamsIn>::GetParam()) { } void run() { if (ref.not_supported || res.not_supported) { GTEST_SKIP(); } ASSERT_TRUE(hostVecMatch(ref.get_out_dists(), res.get_out_dists(), Compare<KeyT>())); // If the dists (keys) are the same, different corresponding ids may end up in the selection due // to non-deterministic nature of some implementations. auto& in_ids = ref.get_in_ids(); auto& in_dists = ref.get_in_dists(); auto compare_ids = [&in_ids, &in_dists](const IdxT& i, const IdxT& j) { if (i == j) return true; auto ix_i = size_t(std::find(in_ids.begin(), in_ids.end(), i) - in_ids.begin()); auto ix_j = size_t(std::find(in_ids.begin(), in_ids.end(), j) - in_ids.begin()); if (ix_i >= in_ids.size() || ix_j >= in_ids.size()) return false; auto dist_i = in_dists[ix_i]; auto dist_j = in_dists[ix_j]; if (dist_i == dist_j) return true; std::cout << "ERROR: ref[" << ix_i << "] = " << dist_i << " != " << "res[" << ix_j << "] = " << dist_j << std::endl; return false; }; ASSERT_TRUE(hostVecMatch(ref.get_out_ids(), res.get_out_ids(), compare_ids)); } }; template <typename KeyT, typename IdxT> struct params_simple { using InOut = SelectInOutSimple<KeyT, IdxT>; using Inputs = std::tuple<SelectTestSpec, std::vector<KeyT>, std::vector<KeyT>, std::vector<IdxT>>; using Handle = std::shared_ptr<raft::resources>; using ParamsIn = std::tuple<Inputs, knn::SelectKAlgo, Handle>; static auto read(ParamsIn ps) -> Params<InOut> { auto ins = std::get<0>(ps); auto algo = std::get<1>(ps); auto handle = std::get<2>(ps); return std::make_tuple( std::get<0>(ins), algo, SelectInOutSimple<KeyT, IdxT>( handle, std::get<0>(ins), std::get<1>(ins), std::get<2>(ins), std::get<3>(ins)), handle); } }; auto inputs_simple_f = testing::Values( params_simple<float, int>::Inputs( {5, 5, 5, true, true}, {5.0, 4.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 2.0, 3.0, 5.0, 1.0, 4.0, 5.0, 3.0, 2.0, 4.0, 1.0, 1.0, 3.0, 2.0, 5.0, 4.0}, {1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0}, {4, 3, 2, 1, 0, 0, 1, 2, 3, 4, 3, 0, 1, 4, 2, 4, 2, 1, 3, 0, 0, 2, 1, 4, 3}), params_simple<float, int>::Inputs( {5, 5, 3, true, true}, {5.0, 4.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 2.0, 3.0, 5.0, 1.0, 4.0, 5.0, 3.0, 2.0, 4.0, 1.0, 1.0, 3.0, 2.0, 5.0, 4.0}, {1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0}, {4, 3, 2, 0, 1, 2, 3, 0, 1, 4, 2, 1, 0, 2, 1}), params_simple<float, int>::Inputs( {5, 5, 5, true, false}, {5.0, 4.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 2.0, 3.0, 5.0, 1.0, 4.0, 5.0, 3.0, 2.0, 4.0, 1.0, 1.0, 3.0, 2.0, 5.0, 4.0}, {1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0}, {4, 3, 2, 1, 0, 0, 1, 2, 3, 4, 3, 0, 1, 4, 2, 4, 2, 1, 3, 0, 0, 2, 1, 4, 3}), params_simple<float, int>::Inputs( {5, 5, 3, true, false}, {5.0, 4.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 2.0, 3.0, 5.0, 1.0, 4.0, 5.0, 3.0, 2.0, 4.0, 1.0, 1.0, 3.0, 2.0, 5.0, 4.0}, {1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0}, {4, 3, 2, 0, 1, 2, 3, 0, 1, 4, 2, 1, 0, 2, 1}), params_simple<float, int>::Inputs( {5, 7, 3, true, true}, {5.0, 4.0, 3.0, 2.0, 1.3, 7.5, 19.0, 9.0, 2.0, 3.0, 3.0, 5.0, 6.0, 4.0, 2.0, 3.0, 5.0, 1.0, 4.0, 1.0, 1.0, 5.0, 7.0, 2.5, 4.0, 7.0, 8.0, 8.0, 1.0, 3.0, 2.0, 5.0, 4.0, 1.1, 1.2}, {1.3, 2.0, 3.0, 2.0, 3.0, 3.0, 1.0, 1.0, 1.0, 2.5, 4.0, 5.0, 1.0, 1.1, 1.2}, {4, 3, 2, 1, 2, 3, 3, 5, 6, 2, 3, 0, 0, 5, 6}), params_simple<float, int>::Inputs( {1, 7, 3, true, true}, {2.0, 3.0, 5.0, 1.0, 4.0, 1.0, 1.0}, {1.0, 1.0, 1.0}, {3, 5, 6}), params_simple<float, int>::Inputs( {1, 7, 3, false, false}, {2.0, 3.0, 5.0, 1.0, 4.0, 1.0, 1.0}, {5.0, 4.0, 3.0}, {2, 4, 1}), params_simple<float, int>::Inputs( {1, 7, 3, false, true}, {2.0, 3.0, 5.0, 9.0, 4.0, 9.0, 9.0}, {9.0, 9.0, 9.0}, {3, 5, 6}), params_simple<float, int>::Inputs( {1, 130, 5, false, true}, {19, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 4, 4, 2, 3, 2, 3, 2, 3, 2, 3, 2, 20}, {20, 19, 18, 17, 16}, {129, 0, 117, 116, 115}), params_simple<float, int>::Inputs( {1, 130, 15, false, true}, {19, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 4, 4, 2, 3, 2, 3, 2, 3, 2, 3, 2, 20}, {20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6}, {129, 0, 117, 116, 115, 114, 113, 112, 111, 110, 109, 108, 107, 106, 105})); typedef SelectionTest<float, int, params_simple> SimpleFloatInt; TEST_P(SimpleFloatInt, Run) { run(); } INSTANTIATE_TEST_CASE_P(SelectionTest, SimpleFloatInt, testing::Combine(inputs_simple_f, testing::Values(knn::SelectKAlgo::FAISS, knn::SelectKAlgo::RADIX_8_BITS, knn::SelectKAlgo::RADIX_11_BITS, knn::SelectKAlgo::WARP_SORT), testing::Values(std::make_shared<raft::resources>()))); template <knn::SelectKAlgo RefAlgo> struct with_ref { template <typename KeyT, typename IdxT> struct params_random { using InOut = SelectInOutComputed<KeyT, IdxT>; using Handle = std::shared_ptr<raft::resources>; using ParamsIn = std::tuple<SelectTestSpec, knn::SelectKAlgo, Handle>; static auto read(ParamsIn ps) -> Params<InOut> { auto spec = std::get<0>(ps); auto algo = std::get<1>(ps); auto handle = std::get<2>(ps); std::vector<KeyT> dists(spec.input_len * spec.n_inputs); { auto s = resource::get_cuda_stream(*handle); rmm::device_uvector<KeyT> dists_d(spec.input_len * spec.n_inputs, s); raft::random::RngState r(42); normal(*(handle.get()), r, dists_d.data(), dists_d.size(), KeyT(10.0), KeyT(100.0)); update_host(dists.data(), dists_d.data(), dists_d.size(), s); s.synchronize(); } return std::make_tuple( spec, algo, SelectInOutComputed<KeyT, IdxT>(handle, spec, RefAlgo, dists), handle); } }; }; auto inputs_random_longlist = testing::Values(SelectTestSpec{1, 130, 15, false}, SelectTestSpec{1, 128, 15, false}, SelectTestSpec{20, 700, 1, true}, SelectTestSpec{20, 700, 2, true}, SelectTestSpec{20, 700, 3, true}, SelectTestSpec{20, 700, 4, true}, SelectTestSpec{20, 700, 5, true}, SelectTestSpec{20, 700, 6, true}, SelectTestSpec{20, 700, 7, true}, SelectTestSpec{20, 700, 8, true}, SelectTestSpec{20, 700, 9, true}, SelectTestSpec{20, 700, 10, true, false}, SelectTestSpec{20, 700, 11, true}, SelectTestSpec{20, 700, 12, true}, SelectTestSpec{20, 700, 16, true}, SelectTestSpec{100, 1700, 17, true}, SelectTestSpec{100, 1700, 31, true, false}, SelectTestSpec{100, 1700, 32, false}, SelectTestSpec{100, 1700, 33, false}, SelectTestSpec{100, 1700, 63, false}, SelectTestSpec{100, 1700, 64, false, false}, SelectTestSpec{100, 1700, 65, false}, SelectTestSpec{100, 1700, 255, true}, SelectTestSpec{100, 1700, 256, true}, SelectTestSpec{100, 1700, 511, false}, SelectTestSpec{100, 1700, 512, true}, SelectTestSpec{100, 1700, 1023, false, false}, SelectTestSpec{100, 1700, 1024, true}, SelectTestSpec{100, 1700, 1700, true}); auto inputs_random_largesize = testing::Values(SelectTestSpec{100, 100000, 1, true}, SelectTestSpec{100, 100000, 2, true}, SelectTestSpec{100, 100000, 3, true, false}, SelectTestSpec{100, 100000, 7, true}, SelectTestSpec{100, 100000, 16, true}, SelectTestSpec{100, 100000, 31, true}, SelectTestSpec{100, 100000, 32, true, false}, SelectTestSpec{100, 100000, 60, true}, SelectTestSpec{100, 100000, 100, true, false}, SelectTestSpec{100, 100000, 200, true}, SelectTestSpec{100000, 100, 100, false}, SelectTestSpec{1, 100000000, 1, true}, SelectTestSpec{1, 100000000, 16, false, false}, SelectTestSpec{1, 100000000, 64, false}, SelectTestSpec{1, 100000000, 128, true, false}, SelectTestSpec{1, 100000000, 256, false, false}); auto inputs_random_largek = testing::Values(SelectTestSpec{100, 100000, 1000, true}, SelectTestSpec{100, 100000, 2000, false}, SelectTestSpec{100, 100000, 100000, true, false}, SelectTestSpec{100, 100000, 2048, false}, SelectTestSpec{100, 100000, 1237, true}); typedef SelectionTest<float, int, with_ref<knn::SelectKAlgo::FAISS>::params_random> ReferencedRandomFloatInt; TEST_P(ReferencedRandomFloatInt, Run) { run(); } INSTANTIATE_TEST_CASE_P(SelectionTest, ReferencedRandomFloatInt, testing::Combine(inputs_random_longlist, testing::Values(knn::SelectKAlgo::RADIX_8_BITS, knn::SelectKAlgo::RADIX_11_BITS, knn::SelectKAlgo::WARP_SORT), testing::Values(std::make_shared<raft::resources>()))); typedef SelectionTest<double, size_t, with_ref<knn::SelectKAlgo::FAISS>::params_random> ReferencedRandomDoubleSizeT; TEST_P(ReferencedRandomDoubleSizeT, Run) { run(); } INSTANTIATE_TEST_CASE_P(SelectionTest, ReferencedRandomDoubleSizeT, testing::Combine(inputs_random_longlist, testing::Values(knn::SelectKAlgo::RADIX_8_BITS, knn::SelectKAlgo::RADIX_11_BITS, knn::SelectKAlgo::WARP_SORT), testing::Values(std::make_shared<raft::resources>()))); typedef SelectionTest<double, int, with_ref<knn::SelectKAlgo::FAISS>::params_random> ReferencedRandomDoubleInt; TEST_P(ReferencedRandomDoubleInt, LargeSize) { run(); } INSTANTIATE_TEST_CASE_P(SelectionTest, ReferencedRandomDoubleInt, testing::Combine(inputs_random_largesize, testing::Values(knn::SelectKAlgo::WARP_SORT), testing::Values(std::make_shared<raft::resources>()))); /** TODO: Fix test failure in RAFT CI * * SelectionTest/ReferencedRandomFloatSizeT.LargeK/0 * Indicices do not match! ref[91628] = 131.359 != res[36504] = 158.438 * Actual: false (actual=36504 != expected=91628 @38999; * * SelectionTest/ReferencedRandomFloatSizeT.LargeK/1 * ERROR: ref[57977] = 58.9079 != res[21973] = 54.9354 * Actual: false (actual=21973 != expected=57977 @107999; * */ typedef SelectionTest<float, size_t, with_ref<knn::SelectKAlgo::RADIX_11_BITS>::params_random> ReferencedRandomFloatSizeT; TEST_P(ReferencedRandomFloatSizeT, LargeK) { run(); } INSTANTIATE_TEST_CASE_P(SelectionTest, ReferencedRandomFloatSizeT, testing::Combine(inputs_random_largek, testing::Values(knn::SelectKAlgo::FAISS), testing::Values(std::make_shared<raft::resources>()))); } // namespace raft::spatial::selection
0
rapidsai_public_repos/raft/cpp/test
rapidsai_public_repos/raft/cpp/test/neighbors/fused_l2_knn.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "./knn_utils.cuh" #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/device_mdspan.hpp> #include <raft/distance/distance_types.hpp> #include <raft/neighbors/brute_force.cuh> #include <raft/random/rng.cuh> #include <raft/spatial/knn/knn.cuh> #include <raft/distance/distance.cuh> #include <rmm/device_buffer.hpp> #include <gtest/gtest.h> #include <cstddef> #include <iostream> #include <vector> namespace raft { namespace spatial { namespace knn { struct FusedL2KNNInputs { int num_queries; int num_db_vecs; int dim; int k; raft::distance::DistanceType metric_; }; template <typename T> class FusedL2KNNTest : public ::testing::TestWithParam<FusedL2KNNInputs> { public: FusedL2KNNTest() : stream_(resource::get_cuda_stream(handle_)), params_(::testing::TestWithParam<FusedL2KNNInputs>::GetParam()), database(params_.num_db_vecs * params_.dim, stream_), search_queries(params_.num_queries * params_.dim, stream_), raft_indices_(params_.num_queries * params_.k, stream_), raft_distances_(params_.num_queries * params_.k, stream_), ref_indices_(params_.num_queries * params_.k, stream_), ref_distances_(params_.num_queries * params_.k, stream_) { RAFT_CUDA_TRY(cudaMemsetAsync(database.data(), 0, database.size() * sizeof(T), stream_)); RAFT_CUDA_TRY( cudaMemsetAsync(search_queries.data(), 0, search_queries.size() * sizeof(T), stream_)); RAFT_CUDA_TRY( cudaMemsetAsync(raft_indices_.data(), 0, raft_indices_.size() * sizeof(int64_t), stream_)); RAFT_CUDA_TRY( cudaMemsetAsync(raft_distances_.data(), 0, raft_distances_.size() * sizeof(T), stream_)); RAFT_CUDA_TRY( cudaMemsetAsync(ref_indices_.data(), 0, ref_indices_.size() * sizeof(int64_t), stream_)); RAFT_CUDA_TRY( cudaMemsetAsync(ref_distances_.data(), 0, ref_distances_.size() * sizeof(T), stream_)); } protected: void testBruteForce() { // calculate the naive knn, by calculating the full pairwise distances and doing a k-select rmm::device_uvector<T> temp_distances(num_db_vecs * num_queries, stream_); distance::pairwise_distance( handle_, raft::make_device_matrix_view<T, int32_t>(search_queries.data(), num_queries, dim), raft::make_device_matrix_view<T, int32_t>(database.data(), num_db_vecs, dim), raft::make_device_matrix_view<T, int32_t>(temp_distances.data(), num_queries, num_db_vecs), metric); spatial::knn::select_k<int64_t, T>(temp_distances.data(), nullptr, num_queries, num_db_vecs, ref_distances_.data(), ref_indices_.data(), true, k_, stream_); auto index_view = raft::make_device_matrix_view<const T, int64_t>(database.data(), num_db_vecs, dim); auto query_view = raft::make_device_matrix_view<const T, int64_t>(search_queries.data(), num_queries, dim); auto out_indices_view = raft::make_device_matrix_view<int64_t, int64_t>(raft_indices_.data(), num_queries, k_); auto out_dists_view = raft::make_device_matrix_view<T, int64_t>(raft_distances_.data(), num_queries, k_); raft::neighbors::brute_force::fused_l2_knn( handle_, index_view, query_view, out_indices_view, out_dists_view, metric); // verify. ASSERT_TRUE(devArrMatchKnnPair(ref_indices_.data(), raft_indices_.data(), ref_distances_.data(), raft_distances_.data(), num_queries, k_, float(0.001), stream_)); } void SetUp() override { num_queries = params_.num_queries; num_db_vecs = params_.num_db_vecs; dim = params_.dim; k_ = params_.k; metric = params_.metric_; unsigned long long int seed = 1234ULL; raft::random::RngState r(seed); uniform(handle_, r, database.data(), num_db_vecs * dim, T(-1.0), T(1.0)); uniform(handle_, r, search_queries.data(), num_queries * dim, T(-1.0), T(1.0)); } private: raft::resources handle_; cudaStream_t stream_ = 0; FusedL2KNNInputs params_; int num_queries; int num_db_vecs; int dim; rmm::device_uvector<T> database; rmm::device_uvector<T> search_queries; rmm::device_uvector<int64_t> raft_indices_; rmm::device_uvector<T> raft_distances_; rmm::device_uvector<int64_t> ref_indices_; rmm::device_uvector<T> ref_distances_; int k_; raft::distance::DistanceType metric; }; const std::vector<FusedL2KNNInputs> inputs = { {100, 1000, 16, 10, raft::distance::DistanceType::L2Expanded}, {256, 256, 30, 10, raft::distance::DistanceType::L2Expanded}, {1000, 10000, 16, 10, raft::distance::DistanceType::L2Expanded}, {100, 1000, 16, 50, raft::distance::DistanceType::L2Expanded}, {20, 10000, 16, 10, raft::distance::DistanceType::L2Expanded}, {1000, 10000, 16, 50, raft::distance::DistanceType::L2Expanded}, {1000, 10000, 32, 50, raft::distance::DistanceType::L2Expanded}, {10000, 40000, 32, 30, raft::distance::DistanceType::L2Expanded}, // L2 unexpanded {100, 1000, 16, 10, raft::distance::DistanceType::L2Unexpanded}, {1000, 10000, 16, 10, raft::distance::DistanceType::L2Unexpanded}, {100, 1000, 16, 50, raft::distance::DistanceType::L2Unexpanded}, {20, 10000, 16, 50, raft::distance::DistanceType::L2Unexpanded}, {1000, 10000, 16, 50, raft::distance::DistanceType::L2Unexpanded}, {1000, 10000, 32, 50, raft::distance::DistanceType::L2Unexpanded}, {10000, 40000, 32, 30, raft::distance::DistanceType::L2Unexpanded}, }; typedef FusedL2KNNTest<float> FusedL2KNNTestF; TEST_P(FusedL2KNNTestF, FusedBruteForce) { this->testBruteForce(); } INSTANTIATE_TEST_CASE_P(FusedL2KNNTest, FusedL2KNNTestF, ::testing::ValuesIn(inputs)); } // namespace knn } // namespace spatial } // namespace raft
0
rapidsai_public_repos/raft/cpp/test/neighbors
rapidsai_public_repos/raft/cpp/test/neighbors/ann_cagra/test_uint8_t_uint32_t.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "../ann_cagra.cuh" namespace raft::neighbors::cagra { typedef AnnCagraTest<float, std::uint8_t, std::uint32_t> AnnCagraTestU8_U32; TEST_P(AnnCagraTestU8_U32, AnnCagra) { this->testCagra(); } typedef AnnCagraSortTest<float, std::uint8_t, std::uint32_t> AnnCagraSortTestU8_U32; TEST_P(AnnCagraSortTestU8_U32, AnnCagraSort) { this->testCagraSort(); } typedef AnnCagraFilterTest<float, std::uint8_t, std::uint32_t> AnnCagraFilterTestU8_U32; TEST_P(AnnCagraFilterTestU8_U32, AnnCagraSort) { this->testCagraFilter(); this->testCagraRemoved(); } INSTANTIATE_TEST_CASE_P(AnnCagraTest, AnnCagraTestU8_U32, ::testing::ValuesIn(inputs)); INSTANTIATE_TEST_CASE_P(AnnCagraSortTest, AnnCagraSortTestU8_U32, ::testing::ValuesIn(inputs)); INSTANTIATE_TEST_CASE_P(AnnCagraFilterTest, AnnCagraFilterTestU8_U32, ::testing::ValuesIn(inputs)); } // namespace raft::neighbors::cagra
0
rapidsai_public_repos/raft/cpp/test/neighbors
rapidsai_public_repos/raft/cpp/test/neighbors/ann_cagra/test_float_uint32_t.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "../ann_cagra.cuh" namespace raft::neighbors::cagra { typedef AnnCagraTest<float, float, std::uint32_t> AnnCagraTestF_U32; TEST_P(AnnCagraTestF_U32, AnnCagra) { this->testCagra(); } typedef AnnCagraSortTest<float, float, std::uint32_t> AnnCagraSortTestF_U32; TEST_P(AnnCagraSortTestF_U32, AnnCagraSort) { this->testCagraSort(); } typedef AnnCagraFilterTest<float, float, std::uint32_t> AnnCagraFilterTestF_U32; TEST_P(AnnCagraFilterTestF_U32, AnnCagraFilter) { this->testCagraFilter(); this->testCagraRemoved(); } INSTANTIATE_TEST_CASE_P(AnnCagraTest, AnnCagraTestF_U32, ::testing::ValuesIn(inputs)); INSTANTIATE_TEST_CASE_P(AnnCagraSortTest, AnnCagraSortTestF_U32, ::testing::ValuesIn(inputs)); INSTANTIATE_TEST_CASE_P(AnnCagraFilterTest, AnnCagraFilterTestF_U32, ::testing::ValuesIn(inputs)); } // namespace raft::neighbors::cagra
0
rapidsai_public_repos/raft/cpp/test/neighbors
rapidsai_public_repos/raft/cpp/test/neighbors/ann_cagra/test_float_int64_t.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "../ann_cagra.cuh" #include "search_kernel_uint64_t.cuh" namespace raft::neighbors::cagra { typedef AnnCagraTest<float, float, std::int64_t> AnnCagraTestF_I64; TEST_P(AnnCagraTestF_I64, AnnCagra) { this->testCagra(); } INSTANTIATE_TEST_CASE_P(AnnCagraTest, AnnCagraTestF_I64, ::testing::ValuesIn(inputs)); } // namespace raft::neighbors::cagra
0