repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/raft/cpp/test/neighbors | rapidsai_public_repos/raft/cpp/test/neighbors/ann_cagra/test_int8_t_uint32_t.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "../ann_cagra.cuh"
namespace raft::neighbors::cagra {
typedef AnnCagraTest<float, std::int8_t, std::uint32_t> AnnCagraTestI8_U32;
TEST_P(AnnCagraTestI8_U32, AnnCagra) { this->testCagra(); }
typedef AnnCagraSortTest<float, std::int8_t, std::uint32_t> AnnCagraSortTestI8_U32;
TEST_P(AnnCagraSortTestI8_U32, AnnCagraSort) { this->testCagraSort(); }
typedef AnnCagraFilterTest<float, std::int8_t, std::uint32_t> AnnCagraFilterTestI8_U32;
TEST_P(AnnCagraFilterTestI8_U32, AnnCagraFilter)
{
this->testCagraFilter();
this->testCagraRemoved();
}
INSTANTIATE_TEST_CASE_P(AnnCagraTest, AnnCagraTestI8_U32, ::testing::ValuesIn(inputs));
INSTANTIATE_TEST_CASE_P(AnnCagraSortTest, AnnCagraSortTestI8_U32, ::testing::ValuesIn(inputs));
INSTANTIATE_TEST_CASE_P(AnnCagraFilterTest, AnnCagraFilterTestI8_U32, ::testing::ValuesIn(inputs));
} // namespace raft::neighbors::cagra
| 0 |
rapidsai_public_repos/raft/cpp/test/neighbors | rapidsai_public_repos/raft/cpp/test/neighbors/ann_cagra/search_kernel_uint64_t.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/neighbors/sample_filter_types.hpp> // none_cagra_sample_filter
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
namespace raft::neighbors::cagra::detail {
namespace multi_cta_search {
#define instantiate_kernel_selection( \
TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T) \
extern template void \
select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>( \
raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, \
raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, \
INDEX_T* const topk_indices_ptr, \
DISTANCE_T* const topk_distances_ptr, \
const DATA_T* const queries_ptr, \
const uint32_t num_queries, \
const INDEX_T* dev_seed_ptr, \
uint32_t* const num_executed_iterations, \
uint32_t topk, \
uint32_t block_size, \
uint32_t result_buffer_size, \
uint32_t smem_size, \
int64_t hash_bitlen, \
INDEX_T* hashmap_ptr, \
uint32_t num_cta_per_query, \
uint32_t num_random_samplings, \
uint64_t rand_xor_mask, \
uint32_t num_seeds, \
size_t itopk_size, \
size_t search_width, \
size_t min_iterations, \
size_t max_iterations, \
SAMPLE_FILTER_T sample_filter, \
cudaStream_t stream);
instantiate_kernel_selection(
32, 1024, float, uint64_t, float, raft::neighbors::filtering::none_cagra_sample_filter);
instantiate_kernel_selection(
8, 128, float, uint64_t, float, raft::neighbors::filtering::none_cagra_sample_filter);
instantiate_kernel_selection(
16, 256, float, uint64_t, float, raft::neighbors::filtering::none_cagra_sample_filter);
instantiate_kernel_selection(
32, 512, float, uint64_t, float, raft::neighbors::filtering::none_cagra_sample_filter);
#undef instantiate_kernel_selection
} // namespace multi_cta_search
namespace single_cta_search {
#define instantiate_single_cta_select_and_run( \
TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T) \
extern template void \
select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>( \
raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, \
raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, \
INDEX_T* const topk_indices_ptr, \
DISTANCE_T* const topk_distances_ptr, \
const DATA_T* const queries_ptr, \
const uint32_t num_queries, \
const INDEX_T* dev_seed_ptr, \
uint32_t* const num_executed_iterations, \
uint32_t topk, \
uint32_t num_itopk_candidates, \
uint32_t block_size, \
uint32_t smem_size, \
int64_t hash_bitlen, \
INDEX_T* hashmap_ptr, \
size_t small_hash_bitlen, \
size_t small_hash_reset_interval, \
uint32_t num_random_samplings, \
uint64_t rand_xor_mask, \
uint32_t num_seeds, \
size_t itopk_size, \
size_t search_width, \
size_t min_iterations, \
size_t max_iterations, \
SAMPLE_FILTER_T sample_filter, \
cudaStream_t stream);
instantiate_single_cta_select_and_run(
32, 1024, float, uint64_t, float, raft::neighbors::filtering::none_cagra_sample_filter);
instantiate_single_cta_select_and_run(
8, 128, float, uint64_t, float, raft::neighbors::filtering::none_cagra_sample_filter);
instantiate_single_cta_select_and_run(
16, 256, float, uint64_t, float, raft::neighbors::filtering::none_cagra_sample_filter);
instantiate_single_cta_select_and_run(
32, 512, float, uint64_t, float, raft::neighbors::filtering::none_cagra_sample_filter);
} // namespace single_cta_search
} // namespace raft::neighbors::cagra::detail | 0 |
rapidsai_public_repos/raft/cpp/test/neighbors | rapidsai_public_repos/raft/cpp/test/neighbors/ann_ivf_pq/test_uint8_t_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../ann_ivf_pq.cuh"
namespace raft::neighbors::ivf_pq {
using f32_u08_i64 = ivf_pq_test<float, uint8_t, int64_t>;
TEST_BUILD_SEARCH(f32_u08_i64)
TEST_BUILD_EXTEND_SEARCH(f32_u08_i64)
INSTANTIATE(f32_u08_i64, small_dims_per_cluster() + enum_variety());
} // namespace raft::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/raft/cpp/test/neighbors | rapidsai_public_repos/raft/cpp/test/neighbors/ann_ivf_pq/test_filter_int8_t_int64_t.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#undef RAFT_EXPLICIT_INSTANTIATE_ONLY // Enable instantiation of search with filter
#include "../ann_ivf_pq.cuh"
namespace raft::neighbors::ivf_pq {
using f32_i08_i64_filter = ivf_pq_filter_test<float, int8_t, int64_t>;
TEST_BUILD_SEARCH(f32_i08_i64_filter)
INSTANTIATE(f32_i08_i64_filter, big_dims());
} // namespace raft::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/raft/cpp/test/neighbors | rapidsai_public_repos/raft/cpp/test/neighbors/ann_ivf_pq/test_float_uint32_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// XXX: the uint32_t instance is not compiled in libraft.so. So we allow
// instantiating the template here.
//
// TODO: consider removing this test or consider adding an instantiation to the
// library.
#undef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "../ann_ivf_pq.cuh"
namespace raft::neighbors::ivf_pq {
using f32_f32_u32 = ivf_pq_test<float, float, uint32_t>;
using f32_f32_u32_filter = ivf_pq_filter_test<float, float, uint32_t>;
TEST_BUILD_SEARCH(f32_f32_u32)
TEST_BUILD_SERIALIZE_SEARCH(f32_f32_u32)
INSTANTIATE(f32_f32_u32, defaults() + var_n_probes() + var_k() + special_cases());
TEST_BUILD_SEARCH(f32_f32_u32_filter)
INSTANTIATE(f32_f32_u32_filter, defaults());
} // namespace raft::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/raft/cpp/test/neighbors | rapidsai_public_repos/raft/cpp/test/neighbors/ann_ivf_pq/test_int8_t_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../ann_ivf_pq.cuh"
namespace raft::neighbors::ivf_pq {
using f32_i08_i64 = ivf_pq_test<float, int8_t, int64_t>;
TEST_BUILD_SEARCH(f32_i08_i64)
TEST_BUILD_SERIALIZE_SEARCH(f32_i08_i64)
INSTANTIATE(f32_i08_i64, defaults() + big_dims() + var_k());
} // namespace raft::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/raft/cpp/test/neighbors | rapidsai_public_repos/raft/cpp/test/neighbors/ann_ivf_pq/test_float_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../ann_ivf_pq.cuh"
namespace raft::neighbors::ivf_pq {
using f32_f32_i64 = ivf_pq_test<float, float, int64_t>;
TEST_BUILD_EXTEND_SEARCH(f32_f32_i64)
TEST_BUILD_SERIALIZE_SEARCH(f32_f32_i64)
INSTANTIATE(f32_f32_i64, defaults() + small_dims() + big_dims_moderate_lut());
} // namespace raft::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/raft/cpp/test/neighbors | rapidsai_public_repos/raft/cpp/test/neighbors/ann_ivf_pq/test_filter_float_int64_t.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#undef RAFT_EXPLICIT_INSTANTIATE_ONLY // Enable instantiation of search with filter
#include "../ann_ivf_pq.cuh"
namespace raft::neighbors::ivf_pq {
using f32_f32_i64_filter = ivf_pq_filter_test<float, float, int64_t>;
TEST_BUILD_SEARCH(f32_f32_i64_filter)
INSTANTIATE(f32_f32_i64_filter, defaults() + big_dims_moderate_lut());
} // namespace raft::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/raft/cpp/test/neighbors | rapidsai_public_repos/raft/cpp/test/neighbors/ann_nn_descent/test_uint8_t_uint32_t.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "../ann_nn_descent.cuh"
namespace raft::neighbors::experimental::nn_descent {
typedef AnnNNDescentTest<float, uint8_t, std::uint32_t> AnnNNDescentTestUI8_U32;
TEST_P(AnnNNDescentTestUI8_U32, AnnNNDescent) { this->testNNDescent(); }
INSTANTIATE_TEST_CASE_P(AnnNNDescentTest, AnnNNDescentTestUI8_U32, ::testing::ValuesIn(inputs));
} // namespace raft::neighbors::experimental::nn_descent
| 0 |
rapidsai_public_repos/raft/cpp/test/neighbors | rapidsai_public_repos/raft/cpp/test/neighbors/ann_nn_descent/test_float_uint32_t.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "../ann_nn_descent.cuh"
namespace raft::neighbors::experimental::nn_descent {
typedef AnnNNDescentTest<float, float, std::uint32_t> AnnNNDescentTestF_U32;
TEST_P(AnnNNDescentTestF_U32, AnnNNDescent) { this->testNNDescent(); }
INSTANTIATE_TEST_CASE_P(AnnNNDescentTest, AnnNNDescentTestF_U32, ::testing::ValuesIn(inputs));
} // namespace raft::neighbors::experimental::nn_descent
| 0 |
rapidsai_public_repos/raft/cpp/test/neighbors | rapidsai_public_repos/raft/cpp/test/neighbors/ann_nn_descent/test_int8_t_uint32_t.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "../ann_nn_descent.cuh"
namespace raft::neighbors::experimental::nn_descent {
typedef AnnNNDescentTest<float, int8_t, std::uint32_t> AnnNNDescentTestI8_U32;
TEST_P(AnnNNDescentTestI8_U32, AnnNNDescent) { this->testNNDescent(); }
INSTANTIATE_TEST_CASE_P(AnnNNDescentTest, AnnNNDescentTestI8_U32, ::testing::ValuesIn(inputs));
} // namespace raft::neighbors::experimental::nn_descent
| 0 |
rapidsai_public_repos/raft/cpp/test/neighbors | rapidsai_public_repos/raft/cpp/test/neighbors/ann_ivf_flat/test_uint8_t_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "../ann_ivf_flat.cuh"
namespace raft::neighbors::ivf_flat {
typedef AnnIVFFlatTest<float, uint8_t, std::int64_t> AnnIVFFlatTestF_uint8;
TEST_P(AnnIVFFlatTestF_uint8, AnnIVFFlat) { this->testIVFFlat(); }
INSTANTIATE_TEST_CASE_P(AnnIVFFlatTest, AnnIVFFlatTestF_uint8, ::testing::ValuesIn(inputs));
} // namespace raft::neighbors::ivf_flat
| 0 |
rapidsai_public_repos/raft/cpp/test/neighbors | rapidsai_public_repos/raft/cpp/test/neighbors/ann_ivf_flat/test_int8_t_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "../ann_ivf_flat.cuh"
namespace raft::neighbors::ivf_flat {
typedef AnnIVFFlatTest<float, int8_t, std::int64_t> AnnIVFFlatTestF_int8;
TEST_P(AnnIVFFlatTestF_int8, AnnIVFFlat) { this->testIVFFlat(); }
INSTANTIATE_TEST_CASE_P(AnnIVFFlatTest, AnnIVFFlatTestF_int8, ::testing::ValuesIn(inputs));
} // namespace raft::neighbors::ivf_flat
| 0 |
rapidsai_public_repos/raft/cpp/test/neighbors | rapidsai_public_repos/raft/cpp/test/neighbors/ann_ivf_flat/test_float_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "../ann_ivf_flat.cuh"
namespace raft::neighbors::ivf_flat {
typedef AnnIVFFlatTest<float, float, std::int64_t> AnnIVFFlatTestF;
TEST_P(AnnIVFFlatTestF, AnnIVFFlat)
{
this->testIVFFlat();
this->testPacker();
}
INSTANTIATE_TEST_CASE_P(AnnIVFFlatTest, AnnIVFFlatTestF, ::testing::ValuesIn(inputs));
} // namespace raft::neighbors::ivf_flat
| 0 |
rapidsai_public_repos/raft/cpp/test/neighbors | rapidsai_public_repos/raft/cpp/test/neighbors/ann_ivf_flat/test_filter_float_int64_t.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#undef RAFT_EXPLICIT_INSTANTIATE_ONLY // Enable instantiation of search with filter
#include "../ann_ivf_flat.cuh"
namespace raft::neighbors::ivf_flat {
typedef AnnIVFFlatTest<float, float, std::int64_t> AnnIVFFlatFilterTestF;
TEST_P(AnnIVFFlatFilterTestF, AnnIVFFlatFilter) { this->testFilter(); }
INSTANTIATE_TEST_CASE_P(AnnIVFFlatTest, AnnIVFFlatFilterTestF, ::testing::ValuesIn(inputs));
} // namespace raft::neighbors::ivf_flat
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/entropy.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <raft/core/interruptible.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/entropy.cuh>
#include <raft/util/cudart_utils.hpp>
#include <random>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace stats {
struct entropyParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
double tolerance;
};
// test fixture class
template <typename T>
class entropyTest : public ::testing::TestWithParam<entropyParam> {
protected:
// the constructor
entropyTest() : stream(resource::get_cuda_stream(handle)) {}
void SetUp() override
{
// getting the parameters
params = ::testing::TestWithParam<entropyParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
// generating random value test input
std::vector<int> arr1(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); });
// generating the golden output
int numUniqueClasses = upperLabelRange - lowerLabelRange + 1;
int* p = (int*)malloc(numUniqueClasses * sizeof(int));
memset(p, 0, numUniqueClasses * sizeof(int));
// calculating the bincount array
for (int i = 0; i < nElements; ++i) {
++p[arr1[i] - lowerLabelRange];
}
// calculating the aggregate entropy
for (int i = 0; i < numUniqueClasses; ++i) {
if (p[i])
truthEntropy +=
-1 * (double(p[i]) / double(nElements)) * (log(double(p[i])) - log(double(nElements)));
}
// allocating and initializing memory to the GPU
rmm::device_uvector<T> clusterArray(nElements, stream);
raft::update_device(clusterArray.data(), &arr1[0], (int)nElements, stream);
raft::interruptible::synchronize(stream);
// calling the entropy CUDA implementation
computedEntropy =
raft::stats::entropy(handle,
raft::make_device_vector_view<const T>(clusterArray.data(), nElements),
lowerLabelRange,
upperLabelRange);
}
raft::resources handle;
// declaring the data values
entropyParam params;
T lowerLabelRange, upperLabelRange;
int nElements = 0;
double truthEntropy = 0;
double computedEntropy = 0;
cudaStream_t stream = 0;
};
// setting test parameter values
const std::vector<entropyParam> inputs = {{199, 1, 10, 0.000001},
{200, 15, 100, 0.000001},
{100, 1, 20, 0.000001},
{10, 1, 10, 0.000001},
{198, 1, 100, 0.000001},
{300, 3, 99, 0.000001},
{199, 1, 10, 0.000001},
{200, 15, 100, 0.000001},
{100, 1, 20, 0.000001},
{10, 1, 10, 0.000001},
{198, 1, 100, 0.000001},
{300, 3, 99, 0.000001}};
// writing the test suite
typedef entropyTest<int> entropyTestClass;
TEST_P(entropyTestClass, Result) { ASSERT_NEAR(computedEntropy, truthEntropy, params.tolerance); }
INSTANTIATE_TEST_CASE_P(entropy, entropyTestClass, ::testing::ValuesIn(inputs));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/trustworthiness.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <iostream>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/distance/distance.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/stats/trustworthiness_score.cuh>
#include <vector>
namespace raft {
namespace stats {
class TrustworthinessScoreTest : public ::testing::Test {
public:
TrustworthinessScoreTest()
: d_X(0, resource::get_cuda_stream(handle)), d_X_embedded(0, resource::get_cuda_stream(handle))
{
}
protected:
void basicTest()
{
std::vector<float> X = {
5.6142087, 8.59787, -4.382763, -3.6452143, -5.8816037, -0.6330313, 4.6920023,
-0.79210913, 0.6106314, 2.1210914, 5.919943, -8.43784, -6.4819884, 0.41001374,
-6.1052523, -4.0825715, -5.314755, -2.834671, 5.751696, -6.5012555, -0.4719201,
-7.53353, 7.6789393, -1.4959852, -5.5977287, -9.564147, 1.2902534, 3.559834,
-6.7659483, 8.265964, 4.595404, 9.133477, -6.1553917, -6.319754, -2.9039452,
4.4150834, -3.094395, -4.426273, 9.584571, -5.64133, 6.6209483, 7.4044604,
3.9620576, 5.639907, 10.33007, -0.8792053, 5.143776, -7.464049, 1.2448754,
-5.6300974, 5.4518576, 4.119535, 6.749645, 7.627064, -7.2298336, 1.9681473,
-6.9083176, 6.404673, 0.07186685, 9.0994835, 8.51037, -8.986389, 0.40534487,
2.115397, 4.086756, 1.2284287, -2.6272132, 0.06527536, -9.587425, -7.206078,
7.864875, 7.4397306, -6.9233336, -2.6643622, 3.3466153, 7.0408177, -3.6069896,
-9.971769, 4.4075623, 7.9063697, 2.559074, 4.323717, 1.6867131, -1.1576937,
-9.893141, -3.251416, -7.4889135, -4.0588717, -2.73338, -7.4852257, 3.4460473,
9.759119, -5.4680476, -4.722435, -8.032619, -1.4598992, 4.227361, 3.135568,
1.1950601, 1.1982028, 6.998856, -6.131138, -6.6921015, 0.5361224, -7.1213965,
-5.6104236, -7.2212887, -2.2710054, 8.544764, -6.0254574, 1.4582269, -5.5587835,
8.031556, -0.26328218, -5.2591386, -9.262641, 2.8691363, 5.299787, -9.209455,
8.523085, 5.180329, 10.655528, -5.7171874, -6.7739563, -3.6306462, 4.067106,
-1.5912259, -3.2345476, 8.042973, -3.6364832, 4.1242137, 9.886953, 5.4743724,
6.3058076, 9.369645, -0.5175337, 4.9859877, -7.879498, 1.358422, -4.147944,
3.8984218, 5.894656, 6.4903927, 8.702036, -8.023722, 2.802145, -7.748032,
5.8461113, -0.34215945, 11.298865, 1.4107164, -9.949621, -1.6257563, -10.655836,
2.4528909, 1.1570255, 5.170669, 2.8398793, 7.1838694, 9.088459, 2.631155,
3.964414, 2.8769252, 0.04198391, -0.16993195, 3.6747139, -2.8377378, 6.1782537,
10.759618, -4.5642614, -8.522967, 0.8614642, 6.623416, -1.029324, 5.5488334,
-7.804511, 2.128833, 7.9042315, 7.789576, -2.7944536, 0.72271067, -10.511495,
-0.78634536, -10.661714, 2.9376361, 1.9148129, 6.22859, 0.26264945, 8.028384,
6.8743043, 0.9351067, 7.0690722, 4.2846055, 1.4134506, -0.18144785, 5.2778087,
-1.7140163, 9.217541, 8.602799, -2.6537218, -7.8377395, 1.1244944, 5.4540544,
-0.38506773, 3.9885726, -10.76455, 1.4440702, 9.136163, 6.664117, -5.7046547,
8.038592, -9.229767, -0.2799413, 3.6064725, 4.187257, 1.0516582, -2.0707326,
-0.7615968, -8.561018, -3.7831352, 10.300297, 5.332594, -6.5880876, -4.2508664,
1.7985519, 5.7226253, -4.1223383, -9.6697855, 1.4885283, 7.524974, 1.7206005,
4.890457, 3.7264557, 0.4428284, -9.922455, -4.250455, -6.4410596, -2.107994,
-1.4109765, -6.1325397, 0.32883006, 6.0489736, 7.7257385, -8.281174, 1.0129383,
-10.792166, 8.378851, 10.802716, 9.848448, -9.188757, 1.3151443, 1.9971865,
-2.521849, 4.3268294, -7.775683, -2.2902298, 3.0824065, -7.17559, 9.6100855,
7.3965735, -10.476525, 5.895973, -3.6974669, -7.6688933, 1.7354839, -7.4045196,
-1.7992063, -4.0394845, 5.2471714, -2.250571, 2.528036, -8.343515, -2.2374575,
-10.019771, 0.73371273, 3.1853926, 2.7994921, 2.6637669, 7.620401, 7.515571,
0.68636256, 5.834537, 4.650282, -1.0362619, 0.4461701, 3.7870514, -4.1340904,
7.202998, 9.736904, -3.005512, -8.920467, 1.1228397, 6.2598724, 1.2812365,
4.5442104, -8.791537, 0.92113096, 8.464749, 8.359035, -4.3923397, 1.2252625,
-10.1986475, -1.4409319, -10.013967, 3.9071581, 1.683064, 4.877419, 1.6570637,
9.559105, 7.3546534, 0.36635467, 5.220211, 4.6303267, 0.6601065, 0.16149978,
3.8818731, -3.4438233, 8.42085, 8.659159, -3.0935583, -8.039611, 2.3060374,
5.134666, 1.0458113, 6.0190983, -9.143728, 0.99048865, 9.210842, 6.670241,
-5.9614363, 0.8747396, 7.078824, 8.067469, -10.314754, 0.45977542, -9.28306,
9.1838665, 9.318644, 7.189082, -11.092555, 1.0320464, 3.882163, 0.10953151,
7.9029684, -6.9068265, -1.3526366, 5.3996363, -8.430931, 11.452577, 6.39663,
-11.090514, 4.6662245, -3.1268113, -8.357452, 2.2276728, -10.357126, -0.9291848,
-3.4193344, 3.1289792, -2.5030103, 6.772719, 11.457757, -4.2125936, -6.684548,
-4.7611327, 3.6960156, -2.3030636, -3.0591488, 10.452471, -4.1267314, 5.66614,
7.501461, 5.072407, 6.636537, 8.990381, -0.2559256, 4.737867, -6.2149944,
2.535682, -5.5484023, 5.7113924, 3.4742818, 7.9915137, 7.0052586, -7.156467,
1.4354781, -8.286235, 5.7523417, -2.4175215, 9.678009, 0.05066403, -9.645226,
-2.2658763, -9.518178, 4.493372, 2.3232365, 2.1659086, 0.42507997, 8.360246,
8.23535, 2.6878164, 5.236947, 3.4924245, -0.6089895, 0.8884741, 4.359464,
-4.6073823, 7.83441, 8.958755, -3.4690795, -9.182282, 1.2478025, 5.6311107,
-1.2408862, 3.6316886, -8.684654, 2.1078515, 7.2813864, 7.9265943, -3.6135032,
0.4571511, 8.493568, 10.496853, -7.432897, 0.8625995, -9.607528, 7.2899456,
8.83158, 8.908199, -10.300263, 1.1451302, 3.7871468, -0.97040755, 5.7664757,
-8.9688, -2.146672, 5.9641485, -6.2908535, 10.126465, 6.1553903, -12.066902,
6.301596, -5.0419583, -8.228695, 2.4879954, -8.918582, -3.7434099, -4.1593685,
3.7431836, -1.1704745, 0.5524103, 9.109399, 9.571567, -11.209955, 1.2462777,
-9.554555, 9.091726, 11.477966, 7.630937, -10.450911, 1.9205878, 5.358983,
-0.44546837, 6.7611346, -9.74753, -0.5939732, 3.8892255, -6.437991, 10.294727,
5.6723895, -10.7883, 6.192348, -5.293862, -10.811491, 1.0194173, -7.074576,
-3.192368, -2.5231771, 4.2791643, -0.53309685, 0.501366, 9.636625, 7.710316,
-6.4219728, 1.0975566, -8.218886, 6.9011984, 9.873679, 8.903804, -9.316832,
1.2404599, 4.9039655, 1.2272617, 4.541515, -5.2753224, -3.2196746, 3.1303136,
-7.285681, 9.041425, 5.6417427, -9.93667, 5.7548947, -5.113397, -8.544622,
4.182665, -7.7709813, -3.2810235, -3.312072, 3.8900535, -2.0604856, 6.709082,
-8.461194, 1.2666026, 4.8770437, 2.6955879, 3.0340345, -1.1614609, -3.536341,
-7.090382, -5.36146, 9.072544, 6.4554095, -4.4728956, -1.88395, 3.1095037,
8.782348, -3.316743, -8.65248, 1.6802986, 8.186188, 2.1783829, 4.931278,
4.158475, 1.4033595, -11.320101, -3.7084908, -6.740436, -2.5555193, -1.0451177,
-6.5569925, 0.82810307, 8.505919, 8.332857, -9.488569, -0.21588463, -8.056692,
8.493993, 7.6401625, 8.812983, -9.377281, 2.4369764, 3.1766508, 0.6300803,
5.6666765, -7.913654, -0.42301777, 4.506412, -7.8954244, 10.904591, 5.042256,
-9.626183, 8.347351, -3.605006, -7.923387, 1.1024277, -8.705793, -2.5151258,
-2.5066147, 4.0515003, -2.060757, 6.2635093, 8.286584, -6.0509276, -6.76452,
-3.1158175, 1.6578803, -1.4608748, -1.24211, 8.151246, -4.2970877, 6.093071,
7.4911637, 4.51018, 4.8425875, 9.211085, -2.4386222, 4.5830803, -5.6079445,
2.3713675, -4.0707507, 3.1787417, 5.462342, 6.915912, 6.3928423, -7.2970796,
5.0112796, -9.140893, 4.9990606, 0.38391754, 7.7088532, 1.9340848, 8.18833,
8.16617, -9.42086, -0.3388326, -9.659727, 8.243045, 8.099073, 8.439428,
-7.038694, 2.1077902, 3.3866816, -1.9975324, 7.4972878, -7.2525196, -1.553731,
4.08758, -6.6922374, 9.50525, 4.026735, -9.243538, 7.2740564, -3.9319072,
-6.3228955, 1.6693478, -7.923119, -3.7423058, -2.2813146, 5.3469067, -1.8285407,
3.3118162, 8.826356, -4.4641976, -6.4751124, -9.200089, -2.519147, 4.225298,
2.4105988, -0.4344186, 0.53441775, 5.2836394, -8.2816105, -4.996147, -1.6870759,
-7.8543897, -3.9788852, -7.0346904, -3.1289773, 7.4567637, -5.6227813, 1.0709786,
-8.866012, 8.427324, -1.1755563, -5.789216, -8.197835, 5.3342214, 6.0646234,
-6.8975716, 7.717031, 3.480355, 8.312151, -3.6645212, -3.0976524, -8.090359,
-1.9176173, 2.4257212, 1.9700835, 0.4098958, 2.1341088, 7.652741, -9.9595585,
-5.989757, 0.10119354, -7.935407, -5.792786, -5.22783, -4.318978, 5.414037,
-6.4621663, 1.670883, -6.9224787, 8.696932, -2.0214002, -6.6681314, -8.326418,
4.9049683, 5.4442496, -6.403739, 7.5822453, 7.0972915, -9.072851, -0.23897195,
1.7662339, 5.3096304, 1.983179, -2.222645, -0.34700772, -9.094717, -6.107907,
9.525174, 8.1550665, -5.6940084, -4.1636486, 1.7360662, 8.528821, -3.7299833,
-9.341266, 2.608542, 9.108706, 0.7978509, 4.2488184, 2.454484, 0.9446999,
-10.106636, -3.8973773, -6.6566644, -4.5647273, -0.99837756, -6.568582, 9.324853,
-7.9020953, 2.0910501, 2.2896829, 1.6790711, 1.3159255, -3.5258796, 1.8898442,
-8.105812, -4.924962, 8.771129, 7.1202874, -5.991957, -3.4106019, 2.4450088,
7.796387, -3.055946, -7.8971434, 1.9856719, 9.001636, 1.8511922, 3.019749,
3.1227696, 0.4822102, -10.021213, -3.530504, -6.225959, -3.0029628, -1.7881511,
-7.3879776, 1.3925704, 9.499782, -3.7318087, -3.7074296, -7.7466836, -1.5284524,
4.0535855, 3.112011, 0.10340207, -0.5429599, 6.67026, -9.155924, -4.924038,
0.64248866, -10.0103655, -3.2742946, -4.850029, -3.6707063, 8.586258, -5.855605,
4.906918, -6.7813993, 7.9938135, -2.5473144, -5.688948, -7.822478, 2.1421318,
4.66659, -9.701272, 9.549149, 0.8998125, -8.651497, -0.56899565, -8.639817,
2.3088377, 2.1264515, 3.2764478, 2.341989, 8.594338, 8.630639, 2.8440373,
6.2043204, 4.433932, 0.6320018, -1.8179281, 5.09452, -1.5741565, 8.153934,
8.744339, -3.6945698, -8.883078, 1.5329908, 5.2745943, 0.44716078, 4.8809066,
-7.9594903, 1.134374, 9.233994, 6.5528665, -4.520542, 9.477355, -8.622195,
-0.23191702, 2.0485356, 3.9379985, 1.5916302, -1.4516805, -0.0843819, -7.8554378,
-5.88308, 7.999766, 6.2572145, -5.585321, -4.0097756, 0.42382592, 6.160884,
-3.631315, -8.333449, 2.770595, 7.8495173, 3.3331623, 4.940415, 3.6207345,
-0.037517, -11.034698, -3.185103, -6.614664, -3.2177854, -2.0792234, -6.8879867,
7.821685, -8.455084, 1.0784642, 4.0033927, 2.7343264, 2.6052725, -4.1224284,
-0.89305353, -6.8267674, -4.9715133, 8.880253, 5.6994023, -5.9695024, -4.9181266,
1.3017995, 7.972617, -3.9452884, -10.424556, 2.4504194, 6.21529, 0.93840516,
4.2070026, 6.159839, 0.91979957, -8.706724, -4.317946, -6.6823545, -3.0388,
-2.464262, -7.3716645, 1.3926703, 6.544412, -5.6251183, -5.122411, -8.622049,
-2.3905911, 3.9138813, 1.9779967, -0.05011125, 0.13310997, 7.229751, -9.742043,
-8.08724, 1.2426697, -7.9230795, -3.3162494, -7.129571, -3.5488048, 7.4701195,
-5.2357526, 0.5917681, -6.272206, 6.342328, -2.909731, -4.991607, -8.845513,
3.3228495, 7.033246, -7.8180246, 8.214469, 6.3910093, 9.185153, -6.20472,
-7.713809, -3.8481297, 3.5579286, 0.7078448, -3.2893546, 7.384514, -4.448121,
3.0104196, 9.492943, 8.024847, 4.9114385, 9.965594, -3.014036, 5.182494,
-5.8806014, 2.5312455, -5.9926524, 4.474469, 6.3717875, 6.993105, 6.493093,
-8.935534, 3.004074, -8.055647, 8.315765, -1.3026813, 8.250377, 0.02606229,
6.8508425, 9.655665, -7.0116496, -0.41060972, -10.049198, 7.897801, 6.7791023,
8.3362, -9.821014, 2.491157, 3.5160472, -1.6228812, 7.398063, -8.769123,
-3.1743705, 3.2827861, -6.497855, 10.831924, 5.2761307, -9.704417, 4.3817043,
-3.9841619, -8.111647, 1.1883026, -8.115312, -2.9240117, -5.8879666, 4.20928,
-0.3587938, 6.935672, -10.177582, 0.48819053, 3.1250648, 2.9306343, 3.082544,
-3.477687, -1.3768549, -7.4922366, -3.756631, 10.039836, 3.6670392, -5.9761434,
-4.4728765, 3.244255, 7.027899, -2.3806512, -10.4100685, 1.605716, 7.7953773,
0.5408159, 1.7156523, 3.824097, -1.0604783, -10.142124, -5.246805, -6.5283823,
-4.579547, -2.42714, -6.709197, 2.7782338, 7.33353, -6.454507, -2.9929368,
-7.8362985, -2.695445, 2.4900775, 1.6682367, 0.4641757, -1.0495365, 6.9631333,
-9.291356, -8.23837, -0.34263706, -8.275113, -2.8454232, -5.0864096, -2.681942,
7.5450225, -6.2517986, 0.06810654, -6.470652, 4.9042645, -1.8369255, -6.6937943,
-7.9625087, 2.8510258, 6.180508, -8.282598, 7.919079, 1.4897474, 6.7217417,
-4.2459426, -4.114431, -8.375707, -2.143264, 5.6972933, 1.5574739, 0.39375135,
1.7930849, 5.1737595, -7.826241, -5.160268, -0.80433255, -7.839536, -5.2620406,
-5.4643164, -3.185536, 6.620315, -7.065227, 1.0524757, -6.125088, 5.7126627,
-1.6161644, -3.852159, -9.164279, 2.7005782, 5.946544, -8.468236, 8.2145405,
1.1035942, 6.590157, -4.0461283, -4.8090615, -7.6702685, -2.1121511, 5.1147075,
1.6128504, 2.0064135, 1.0544407, 6.0038295, -7.8282537, -4.801278, 0.32349443,
-8.0649805, -4.372714, -5.61336, -5.21394, 8.176595, -5.4753284, 1.7800134,
-8.267283, 7.2133374, -0.16594432, -6.317046, -9.490406, 4.1261597, 5.473317,
-7.7551675, 7.007468, 7.478628, -8.801905, 0.10975724, 3.5478222, 4.797803,
1.3825226, -3.357369, 0.99262005, -6.94877, -5.4781394, 9.632604, 5.7492557,
-5.9014316, -3.1632116, 2.340859, 8.708098, -3.1255999, -8.848661, 4.5612836,
8.455157, 0.73460823, 4.112301, 4.392744, -0.30759293, -6.8036823, -3.0331545,
-8.269506, -2.82415, -0.9411246, -5.993506, 2.1618164, -8.716055, -0.7432543,
-10.255819, 3.095418, 2.5131428, 4.752442, 0.9907621, 7.8279433, 7.85814,
0.50430876, 5.2840405, 4.457291, 0.03330028, -0.40692952, 3.9244103, -2.117118,
7.6977615, 8.759009, -4.2157164, -9.136053, 3.247858, 4.668686, 0.76162136,
5.3833632, -9.231471, 0.44309422, 8.380872, 6.7211227, -3.091507, 2.173508,
-9.038242, -1.3666698, -9.819077, 0.37825826, 2.3898845, 4.2440815, 1.9161536,
7.24787, 6.9124637, 1.6238527, 5.1140285, 3.1935842, 1.02845, -1.1273454,
5.638998, -2.497932, 8.342559, 8.586319, -2.9069402, -7.6387944, 3.5975037,
4.4115705, 0.41506064, 4.9078383, -9.68327, 1.8159529, 9.744613, 8.40622,
-4.495336, 9.244892, -8.789869, 1.3158468, 4.018167, 3.3922846, 2.652022,
-2.7495477, 0.2528986, -8.268324, -6.004913, 10.428784, 6.6580734, -5.537176,
-1.7177434, 2.7504628, 6.7735, -2.4454272, -9.998361, 2.9483433, 6.8266654,
2.3787718, 4.472637, 2.5871701, 0.7355365, -7.7027745, -4.1879907, -7.172832,
-4.1843605, -0.03646783, -5.419406, 6.958486, 11.011111, -7.1821184, -7.956423,
-3.408451, 4.6850276, -2.348787, -4.398289, 6.9787564, -3.8324208, 5.967827,
8.433518, 4.660108, 5.5657144, 9.964243, -1.3515275, 6.404833, -6.4805903,
2.4379845, -6.0816774, 1.752272, 5.3771873, 6.9613523, 6.9788294, -6.3894596,
3.7521114, -6.8034263, 6.4458385, -0.7233525, 10.512529, 4.362273, 9.231461,
-6.3382263, -7.659, -3.461823, 4.71463, 0.17817476, -3.685746, 7.2962036,
-4.6489477, 5.218017, 11.546999, 4.7218375, 6.8498397, 9.281103, -3.900459,
6.844054, -7.0886965, -0.05019227, -8.233724, 5.5808983, 6.374517, 8.321048,
7.969449, -7.3478637, 1.4917561, -8.003144, 4.780668, -1.1981848, 7.753739,
2.0260844, -8.880096, -3.4258451, -7.141975, 1.9637157, 1.814725, 5.311151,
1.4831505, 7.8483663, 7.257948, 1.395786, 6.417756, 5.376912, 0.59505713,
0.00062552, 3.6634305, -4.159713, 7.3571978, 10.966816, -2.5419605, -8.466229,
1.904205, 5.6338267, -0.52567476, 5.59736, -8.361799, 0.5009981, 8.460681,
7.3891273, -3.5272243, 5.0552278, 9.921456, -7.69693, -7.286378, -1.9198836,
3.1666567, -2.5832257, -2.2445817, 9.888111, -5.076563, 5.677401, 7.497946,
5.662994, 5.414262, 8.566503, -2.5530663, 7.1032815, -6.0612082, 1.3419591,
-4.9595256, 4.3377542, 4.3790717, 6.793512, 8.383502, -7.1278043, 3.3240774,
-9.379446, 6.838661, -0.81241214, 8.694813, 0.79141915, 7.632467, 8.575382,
-8.533798, 0.28954387, -7.5675836, 5.8653326, 8.97235, 7.1649346, -10.575289,
0.9359381, 5.02381, -0.5609511, 5.543464, -7.69131, -2.1792977, 2.4729247,
-6.1917787, 10.373678, 7.6549597, -8.809486, 5.5657206, -3.3169382, -8.042887,
2.0874746, -7.079005, -3.33398, -3.6843317, 4.0172358, -2.0754814, 1.1726758,
7.4618697, 6.9483604, -8.469206, 0.7401797, -10.318176, 8.384557, 10.5476265,
9.146971, -9.250223, 0.6290606, 4.4941425, -0.7514017, 7.2271705, -8.309598,
-1.4761636, 4.0140634, -6.021102, 9.132852, 5.6610966, -11.249811, 8.359293,
-1.9445792, -7.7393436, -0.3931331, -8.824441, -2.5995944, -2.5714035, 4.140213,
-3.6863053, 5.517265, 9.020411, -4.9286127, -7.871219, -3.7446704, 2.5179656,
-1.4543481, -2.2703636, 7.010597, -3.6436229, 6.753862, 7.4129915, 7.1406755,
5.653706, 9.5445175, 0.15698843, 4.761813, -7.698002, 1.6870106, -4.5410123,
4.171763, 5.3747005, 6.341021, 7.456738, -8.231657, 2.763487, -9.208167,
6.676799, -1.1957736, 10.062605, 4.0975976, 7.312957, -2.4981596, -2.9658387,
-8.150425, -2.1075552, 2.64375, 1.6636052, 1.1483809, 0.09276015, 5.8556347,
-7.8481026, -5.9913163, -0.02840613, -9.937289, -1.0486673, -5.2340155, -3.83912,
7.7165728, -8.409944, 0.80863273, -6.9119215, 7.5712357, 0.36031485, -6.056131,
-8.470033, 1.8678337, 3.0121377, -7.3096333, 8.205484, 5.262654, 8.774514,
-4.7603083, -7.2096143, -4.437014, 3.6080024, -1.624254, -4.2787876, 8.880863,
-4.8984556, 5.1782074, 9.944454, 3.911282, 3.5396595, 8.867042, -1.2006199,
5.393288, -5.6455317, 0.7829499, -4.0338907, 2.479272, 6.5080743, 8.582535,
7.0097537, -6.9823785, 3.984318, -7.225381, 5.3135114, -1.0391048, 8.951443,
-0.70119005, -8.510742, -0.42949116, -10.9224825, 2.8176029, 1.6800792, 5.778404,
1.7269998, 7.1975236, 7.7258267, 2.7632928, 5.3399253, 3.4650044, 0.01971426,
-1.6468811, 4.114996, -1.5110453, 6.8689218, 8.269899, -3.1568048, -7.0344677,
1.2911975, 5.950357, 0.19028673, 4.657226, -8.199647, 2.246055, 8.989509,
5.3101015, -4.2400866};
std::vector<float> X_embedded = {
-0.41849962, -0.53906363, 0.46958843, -0.35832694, -0.23779503, -0.29751351, -0.01072748,
-0.21353109, -0.54769957, -0.55086273, 0.37093949, -0.12714292, -0.06639574, -0.36098689,
-0.13060696, -0.07362658, -1.01205945, -0.39285606, 0.2864089, -0.32031146, -0.19595343,
0.08900568, -0.04813879, -0.06563424, -0.42655188, -0.69014251, 0.51459783, -0.1942696,
-0.07767916, -0.6119386, 0.04813685, -0.22557008, -0.56890118, -0.60293794, 0.43429622,
-0.09240723, -0.00624062, -0.25800395, -0.1886092, 0.01655941, -0.01961523, -0.14147359,
0.41414487, -0.8512944, -0.61199242, -0.18586016, 0.14024924, -0.41635606, -0.02890144,
0.1065347, 0.39700791, -1.14060664, -0.95313865, 0.14416681, 0.17306046, -0.53189689,
-0.98987544, -0.67918193, 0.41787854, -0.20878236, -0.06612862, 0.03502904, -0.03765266,
-0.0980606, -0.00971657, 0.29432917, 0.36575687, -1.1645509, -0.89094597, 0.03718805,
0.2310573, -0.38345811, -0.10401925, -0.10653082, 0.38469055, -0.88302094, -0.80197543,
0.03548668, 0.02775662, -0.54374295, 0.03379983, 0.00923623, 0.29320273, -1.05263519,
-0.93360096, 0.03778313, 0.12360487, -0.56437284, 0.0644429, 0.33432651, 0.36450726,
-1.22978747, -0.83822101, -0.18796451, 0.34888434, -0.3801491, -0.45327303, -0.59747899,
0.39697698, -0.15616602, -0.06159166, -0.40301991, -0.11725303, -0.11913263, -0.12406619,
-0.11227967, 0.43083835, -0.90535849, -0.81646025, 0.10012121, -0.0141237, -0.63747931,
0.04805023, 0.34190539, 0.50725192, -1.17861414, -0.74641538, -0.09333111, 0.27992678,
-0.56214809, 0.04970971, 0.36249384, 0.57705611, -1.16913795, -0.69849908, 0.10957897,
0.27983218, -0.62088525, 0.0410459, 0.23973398, 0.40960434, -1.14183664, -0.83321381,
0.02149482, 0.21720445, -0.49869928, -0.95655465, -0.51680422, 0.45761383, -0.08351214,
-0.12151554, 0.00819737, -0.20813803, -0.01055793, 0.25319234, 0.36154974, 0.1822421,
-1.15837133, -0.92209691, -0.0501582, 0.08535917, -0.54003763, -1.08675635, -1.04009593,
0.09408128, 0.07009826, -0.01762833, -0.19180447, -0.18029785, -0.20342001, 0.04034991,
0.1814747, 0.36906669, -1.13532007, -0.8852452, 0.0782818, 0.16825101, -0.50301319,
-0.29128098, -0.65341312, 0.51484352, -0.38758236, -0.22531103, -0.55021971, 0.10804344,
-0.3521522, -0.38849035, -0.74110794, 0.53761131, -0.25142813, -0.1118066, -0.47453368,
0.06347904, -0.23796193, -1.02682328, -0.47594091, 0.39515916, -0.2782529, -0.16566519,
0.08063579, 0.00810116, -0.06213913, -1.059654, -0.62496334, 0.53698546, -0.11806234,
0.00356161, 0.11513405, -0.14213292, 0.04102662, -0.36622161, -0.73686272, 0.48323864,
-0.27338892, -0.14203401, -0.41736352, 0.03332564, -0.21907479, -0.06396769, 0.01831361,
0.46263444, -1.01878166, -0.86486858, 0.17622118, -0.01249686, -0.74530888, -0.9354887,
-0.5027945, 0.38170099, -0.15547098, 0.00677824, -0.04677663, -0.13541745, 0.07253501,
-0.97933143, -0.58001202, 0.48235369, -0.18836913, -0.02430783, 0.07572441, -0.08101331,
0.00630076, -0.16881248, -0.67989182, 0.46083611, -0.43910736, -0.29321918, -0.38735861,
0.07669903, -0.29749861, -0.40047669, -0.56722462, 0.33168188, -0.13118173, -0.06672747,
-0.56856316, -0.26269144, -0.14236671, 0.10651901, 0.4962585, 0.38848072, -1.06653547,
-0.64079332, -0.47378591, 0.43195483, -0.04856951, -0.9840439, -0.70610428, 0.34028092,
-0.2089237, -0.05382041, 0.01625874, -0.02080803, -0.12535211, -0.04146428, -1.24533033,
0.48944879, 0.0578458, 0.26708388, -0.90321028, 0.35377088, -0.36791429, -0.35382384,
-0.52748734, 0.42854419, -0.31744713, -0.19174226, -0.39073724, -0.03258846, -0.19978228,
-0.36185205, -0.57412046, 0.43681973, -0.25414538, -0.12904905, -0.46334973, -0.03123853,
-0.11303604, -0.87073672, -0.45441297, 0.41825858, -0.25303507, -0.21845073, 0.10248682,
-0.11045569, -0.10002795, -0.00572806, 0.16519061, 0.42651513, -1.11417019, -0.83789682,
0.02995787, 0.16843079, -0.53874511, 0.03056994, 0.17877036, 0.49632853, -1.03276777,
-0.74778616, -0.03971953, 0.10907949, -0.67385727, -0.9523471, -0.56550741, 0.40409449,
-0.2703723, -0.10175014, 0.13605487, -0.06306008, -0.01768126, -0.4749442, -0.56964815,
0.39389887, -0.19248079, -0.04161081, -0.38728487, -0.20341556, -0.12656988, -0.35949609,
-0.46137866, 0.28798422, -0.06603147, -0.04363992, -0.60343552, -0.23565227, -0.10242701,
-0.06792886, 0.09689897, 0.33259571, -0.98854214, -0.84444433, 0.00673901, 0.13457057,
-0.43145794, -0.51500046, -0.50821936, 0.38000089, 0.0132636, 0.0580942, -0.40157595,
-0.11967677, 0.02549113, -0.10350953, 0.22918226, 0.40411913, -1.05619383, -0.71218503,
-0.02197581, 0.26422262, -0.34765676, 0.06601537, 0.21712676, 0.34723559, -1.20982027,
-0.95646334, 0.00793948, 0.27620381, -0.43475035, -0.67326003, -0.6137197, 0.43724492,
-0.17666136, -0.06591748, -0.18937394, -0.07400128, -0.06881691, -0.5201112, -0.61088628,
0.4225319, -0.18969463, -0.06921366, -0.33993208, -0.06990873, -0.10288513, -0.70659858,
-0.56003648, 0.46628812, -0.16090363, -0.0185108, -0.1431348, -0.1128775, -0.0078648,
-0.02323332, 0.04292452, 0.39291084, -0.94897962, -0.63863206, -0.16546988, 0.23698957,
-0.30633628};
auto stream = resource::get_cuda_stream(handle);
d_X.resize(X.size(), stream);
d_X_embedded.resize(X_embedded.size(), stream);
raft::update_device(d_X.data(), X.data(), X.size(), stream);
raft::update_device(d_X_embedded.data(), X_embedded.data(), X_embedded.size(), stream);
auto n_sample = 50;
auto n_features_origin = 30;
auto n_features_embedded = 8;
// euclidean test
score = trustworthiness_score<raft::distance::DistanceType::L2SqrtUnexpanded, float>(
handle,
raft::make_device_matrix_view<const float>(d_X.data(), n_sample, n_features_origin),
raft::make_device_matrix_view<const float>(
d_X_embedded.data(), n_sample, n_features_embedded),
5);
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
raft::resources handle;
rmm::device_uvector<float> d_X;
rmm::device_uvector<float> d_X_embedded;
double score;
};
typedef TrustworthinessScoreTest TrustworthinessScoreTestF;
TEST_F(TrustworthinessScoreTestF, Result) { ASSERT_TRUE(0.9375 < score && score < 0.9379); }
}; // namespace stats
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/sum.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/eltwise.cuh>
#include <raft/stats/sum.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <gtest/gtest.h>
namespace raft {
namespace stats {
template <typename T>
struct SumInputs {
T tolerance;
int rows, cols;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const SumInputs<T>& dims)
{
return os;
}
template <typename T>
class SumTest : public ::testing::TestWithParam<SumInputs<T>> {
public:
SumTest()
: params(::testing::TestWithParam<SumInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
rows(params.rows),
cols(params.cols),
data(rows * cols, stream),
sum_act(cols, stream)
{
}
protected:
void SetUp() override
{
int len = rows * cols;
T data_h[len];
for (int i = 0; i < len; i++) {
data_h[i] = T(1);
}
raft::update_device(data.data(), data_h, len, stream);
sum(handle,
raft::make_device_matrix_view<const T>(data.data(), rows, cols),
raft::make_device_vector_view(sum_act.data(), cols));
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
SumInputs<T> params;
int rows, cols;
rmm::device_uvector<T> data, sum_act;
};
const std::vector<SumInputs<float>> inputsf = {{0.05f, 1024, 32, 1234ULL},
{0.05f, 1024, 256, 1234ULL}};
const std::vector<SumInputs<double>> inputsd = {{0.05, 1024, 32, 1234ULL},
{0.05, 1024, 256, 1234ULL}};
typedef SumTest<float> SumTestF;
TEST_P(SumTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(
float(params.rows), sum_act.data(), params.cols, raft::CompareApprox<float>(params.tolerance)));
}
typedef SumTest<double> SumTestD;
TEST_P(SumTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(double(params.rows),
sum_act.data(),
params.cols,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(SumTests, SumTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(SumTests, SumTestD, ::testing::ValuesIn(inputsd));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/minmax.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <limits>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/random/rng.cuh>
#include <raft/stats/minmax.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <stdio.h>
#include <stdlib.h>
namespace raft {
namespace stats {
///@todo: need to add tests for verifying the column subsampling feature
template <typename T>
struct MinMaxInputs {
T tolerance;
int rows, cols;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const MinMaxInputs<T>& dims)
{
return os;
}
template <typename T>
RAFT_KERNEL naiveMinMaxInitKernel(int ncols, T* globalmin, T* globalmax, T init_val)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= ncols) return;
globalmin[tid] = init_val;
globalmax[tid] = -init_val;
}
template <typename T>
RAFT_KERNEL naiveMinMaxKernel(const T* data, int nrows, int ncols, T* globalmin, T* globalmax)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int col = tid / nrows;
if (col < ncols) {
T val = data[tid];
if (!isnan(val)) {
raft::myAtomicMin(&globalmin[col], val);
raft::myAtomicMax(&globalmax[col], val);
}
}
}
template <typename T>
void naiveMinMax(
const T* data, int nrows, int ncols, T* globalmin, T* globalmax, cudaStream_t stream)
{
const int TPB = 128;
int nblks = raft::ceildiv(ncols, TPB);
T init_val = std::numeric_limits<T>::max();
naiveMinMaxInitKernel<<<nblks, TPB, 0, stream>>>(ncols, globalmin, globalmax, init_val);
RAFT_CUDA_TRY(cudaGetLastError());
nblks = raft::ceildiv(nrows * ncols, TPB);
naiveMinMaxKernel<<<nblks, TPB, 0, stream>>>(data, nrows, ncols, globalmin, globalmax);
RAFT_CUDA_TRY(cudaGetLastError());
}
template <typename T>
RAFT_KERNEL nanKernel(T* data, const bool* mask, int len, T nan)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= len) return;
if (!mask[tid]) data[tid] = nan;
}
template <typename T>
class MinMaxTest : public ::testing::TestWithParam<MinMaxInputs<T>> {
protected:
MinMaxTest()
: minmax_act(0, resource::get_cuda_stream(handle)),
minmax_ref(0, resource::get_cuda_stream(handle))
{
}
void SetUp() override
{
auto stream = resource::get_cuda_stream(handle);
params = ::testing::TestWithParam<MinMaxInputs<T>>::GetParam();
raft::random::RngState r(params.seed);
int len = params.rows * params.cols;
rmm::device_uvector<T> data(len, stream);
rmm::device_uvector<bool> mask(len, stream);
minmax_act.resize(2 * params.cols, stream);
minmax_ref.resize(2 * params.cols, stream);
normal(handle, r, data.data(), len, (T)0.0, (T)1.0);
T nan_prob = 0.01;
bernoulli(handle, r, mask.data(), len, nan_prob);
const int TPB = 256;
nanKernel<<<raft::ceildiv(len, TPB), TPB, 0, stream>>>(
data.data(), mask.data(), len, std::numeric_limits<T>::quiet_NaN());
RAFT_CUDA_TRY(cudaPeekAtLastError());
naiveMinMax(data.data(),
params.rows,
params.cols,
minmax_ref.data(),
minmax_ref.data() + params.cols,
stream);
raft::stats::minmax<T, int>(
handle,
raft::make_device_matrix_view<const T, int, raft::layout_f_contiguous>(
data.data(), params.rows, params.cols),
std::nullopt,
std::nullopt,
raft::make_device_vector_view<T, int>(minmax_act.data(), params.cols),
raft::make_device_vector_view<T, int>(minmax_act.data() + params.cols, params.cols),
std::nullopt);
}
protected:
raft::resources handle;
MinMaxInputs<T> params;
rmm::device_uvector<T> minmax_act;
rmm::device_uvector<T> minmax_ref;
};
const std::vector<MinMaxInputs<float>> inputsf = {{0.00001f, 1024, 32, 1234ULL},
{0.00001f, 1024, 64, 1234ULL},
{0.00001f, 1024, 128, 1234ULL},
{0.00001f, 1024, 256, 1234ULL},
{0.00001f, 1024, 512, 1234ULL},
{0.00001f, 1024, 1024, 1234ULL},
{0.00001f, 4096, 32, 1234ULL},
{0.00001f, 4096, 64, 1234ULL},
{0.00001f, 4096, 128, 1234ULL},
{0.00001f, 4096, 256, 1234ULL},
{0.00001f, 4096, 512, 1234ULL},
{0.00001f, 4096, 1024, 1234ULL},
{0.00001f, 8192, 32, 1234ULL},
{0.00001f, 8192, 64, 1234ULL},
{0.00001f, 8192, 128, 1234ULL},
{0.00001f, 8192, 256, 1234ULL},
{0.00001f, 8192, 512, 1234ULL},
{0.00001f, 8192, 1024, 1234ULL},
{0.00001f, 1024, 8192, 1234ULL}};
const std::vector<MinMaxInputs<double>> inputsd = {{0.0000001, 1024, 32, 1234ULL},
{0.0000001, 1024, 64, 1234ULL},
{0.0000001, 1024, 128, 1234ULL},
{0.0000001, 1024, 256, 1234ULL},
{0.0000001, 1024, 512, 1234ULL},
{0.0000001, 1024, 1024, 1234ULL},
{0.0000001, 4096, 32, 1234ULL},
{0.0000001, 4096, 64, 1234ULL},
{0.0000001, 4096, 128, 1234ULL},
{0.0000001, 4096, 256, 1234ULL},
{0.0000001, 4096, 512, 1234ULL},
{0.0000001, 4096, 1024, 1234ULL},
{0.0000001, 8192, 32, 1234ULL},
{0.0000001, 8192, 64, 1234ULL},
{0.0000001, 8192, 128, 1234ULL},
{0.0000001, 8192, 256, 1234ULL},
{0.0000001, 8192, 512, 1234ULL},
{0.0000001, 8192, 1024, 1234ULL},
{0.0000001, 1024, 8192, 1234ULL}};
typedef MinMaxTest<float> MinMaxTestF;
TEST_P(MinMaxTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(minmax_ref.data(),
minmax_act.data(),
2 * params.cols,
raft::CompareApprox<float>(params.tolerance)));
}
typedef MinMaxTest<double> MinMaxTestD;
TEST_P(MinMaxTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(minmax_ref.data(),
minmax_act.data(),
2 * params.cols,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MinMaxTests, MinMaxTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MinMaxTests, MinMaxTestD, ::testing::ValuesIn(inputsd));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/adjusted_rand_index.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/adjusted_rand_index.cuh>
#include <raft/util/cudart_utils.hpp>
#include <random>
namespace raft {
namespace stats {
struct adjustedRandIndexParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
// if this is true, then it is assumed that `sameArrays` is also true
// further it also assumes `lowerLabelRange` and `upperLabelRange` are 0
bool testZeroArray;
};
template <typename T, typename MathT = int>
class adjustedRandIndexTest : public ::testing::TestWithParam<adjustedRandIndexParam> {
protected:
adjustedRandIndexTest()
: stream(resource::get_cuda_stream(handle)),
firstClusterArray(0, stream),
secondClusterArray(0, stream)
{
}
void SetUp() override
{
params = ::testing::TestWithParam<adjustedRandIndexParam>::GetParam();
nElements = params.nElements;
firstClusterArray.resize(nElements, stream);
secondClusterArray.resize(nElements, stream);
RAFT_CUDA_TRY(
cudaMemsetAsync(firstClusterArray.data(), 0, firstClusterArray.size() * sizeof(T), stream));
RAFT_CUDA_TRY(
cudaMemsetAsync(secondClusterArray.data(), 0, secondClusterArray.size() * sizeof(T), stream));
if (!params.testZeroArray) {
SetUpDifferentArrays();
} else {
SetupZeroArray();
}
// allocating and initializing memory to the GPU
computed_adjusted_rand_index = adjusted_rand_index<T, MathT>(
handle,
raft::make_device_vector_view<const T>(firstClusterArray.data(), nElements),
raft::make_device_vector_view<const T>(secondClusterArray.data(), nElements));
}
void SetUpDifferentArrays()
{
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); });
}
// calculating golden output
int numUniqueClasses = upperLabelRange - lowerLabelRange + 1;
size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int);
int* hGoldenOutput = (int*)malloc(sizeOfMat);
memset(hGoldenOutput, 0, sizeOfMat);
for (int i = 0; i < nElements; i++) {
int row = arr1[i] - lowerLabelRange;
int column = arr2[i] - lowerLabelRange;
hGoldenOutput[row * numUniqueClasses + column] += 1;
}
int sumOfNijCTwo = 0;
int* a = (int*)malloc(numUniqueClasses * sizeof(int));
int* b = (int*)malloc(numUniqueClasses * sizeof(int));
memset(a, 0, numUniqueClasses * sizeof(int));
memset(b, 0, numUniqueClasses * sizeof(int));
int sumOfAiCTwo = 0;
int sumOfBiCTwo = 0;
// calculating the sum of number of pairwise points in each index
// and also the reducing contingency matrix along row and column
for (int i = 0; i < numUniqueClasses; ++i) {
for (int j = 0; j < numUniqueClasses; ++j) {
int Nij = hGoldenOutput[i * numUniqueClasses + j];
sumOfNijCTwo += ((Nij) * (Nij - 1)) / 2;
a[i] += hGoldenOutput[i * numUniqueClasses + j];
b[i] += hGoldenOutput[j * numUniqueClasses + i];
}
}
// claculating the sum of number pairwise points in ever column sum
// claculating the sum of number pairwise points in ever row sum
for (int i = 0; i < numUniqueClasses; ++i) {
sumOfAiCTwo += ((a[i]) * (a[i] - 1)) / 2;
sumOfBiCTwo += ((b[i]) * (b[i] - 1)) / 2;
}
// calculating the ARI
double nCTwo = double(nElements) * double(nElements - 1) / 2.0;
double expectedIndex = (double(sumOfBiCTwo) * double(sumOfAiCTwo)) / double(nCTwo);
double maxIndex = (double(sumOfAiCTwo) + double(sumOfBiCTwo)) / 2.0;
double index = (double)sumOfNijCTwo;
if (maxIndex - expectedIndex)
truth_adjusted_rand_index = (index - expectedIndex) / (maxIndex - expectedIndex);
else
truth_adjusted_rand_index = 0;
raft::update_device(firstClusterArray.data(), &arr1[0], nElements, stream);
raft::update_device(secondClusterArray.data(), &arr2[0], nElements, stream);
}
void SetupZeroArray()
{
lowerLabelRange = 0;
upperLabelRange = 0;
truth_adjusted_rand_index = 1.0;
}
raft::resources handle;
cudaStream_t stream = 0;
adjustedRandIndexParam params;
T lowerLabelRange, upperLabelRange;
rmm::device_uvector<T> firstClusterArray;
rmm::device_uvector<T> secondClusterArray;
int nElements = 0;
double truth_adjusted_rand_index = 0;
double computed_adjusted_rand_index = 0;
};
const std::vector<adjustedRandIndexParam> inputs = {
{199, 1, 10, false, 0.000001, false},
{200, 15, 100, false, 0.000001, false},
{100, 1, 20, false, 0.000001, false},
{10, 1, 10, false, 0.000001, false},
{198, 1, 100, false, 0.000001, false},
{300, 3, 99, false, 0.000001, false},
{199, 1, 10, true, 0.000001, false},
{200, 15, 100, true, 0.000001, false},
{100, 1, 20, true, 0.000001, false},
// FIXME: disabled temporarily due to flaky test
// {10, 1, 10, true, 0.000001, false},
{198, 1, 100, true, 0.000001, false},
{300, 3, 99, true, 0.000001, false},
{199, 0, 0, false, 0.000001, true},
{200, 0, 0, false, 0.000001, true},
{100, 0, 0, false, 0.000001, true},
{10, 0, 0, false, 0.000001, true},
{198, 0, 0, false, 0.000001, true},
{300, 0, 0, false, 0.000001, true},
{199, 0, 0, true, 0.000001, true},
{200, 0, 0, true, 0.000001, true},
{100, 0, 0, true, 0.000001, true},
{10, 0, 0, true, 0.000001, true},
{198, 0, 0, true, 0.000001, true},
{300, 0, 0, true, 0.000001, true},
};
const std::vector<adjustedRandIndexParam> large_inputs = {
{2000000, 1, 1000, false, 0.000001, false},
{2000000, 1, 1000, true, 0.000001, false},
{2000000, 0, 0, false, 0.000001, true},
{2000000, 0, 0, true, 0.000001, true},
};
typedef adjustedRandIndexTest<int, int> ARI_ii;
TEST_P(ARI_ii, Result)
{
ASSERT_NEAR(computed_adjusted_rand_index, truth_adjusted_rand_index, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(adjusted_rand_index, ARI_ii, ::testing::ValuesIn(inputs));
typedef adjustedRandIndexTest<int, unsigned long long> ARI_il;
TEST_P(ARI_il, Result)
{
ASSERT_NEAR(computed_adjusted_rand_index, truth_adjusted_rand_index, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(adjusted_rand_index, ARI_il, ::testing::ValuesIn(inputs));
INSTANTIATE_TEST_CASE_P(adjusted_rand_index_large, ARI_il, ::testing::ValuesIn(large_inputs));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/weighted_mean.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <cstdint>
#include <gtest/gtest.h>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/random/rng.cuh>
#include <raft/stats/weighted_mean.cuh>
#include <raft/util/cuda_utils.cuh>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
namespace raft {
namespace stats {
template <typename T>
struct WeightedMeanInputs {
T tolerance;
int M, N;
unsigned long long int seed;
bool along_rows; // Used only for the weightedMean test function
bool row_major;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const WeightedMeanInputs<T>& I)
{
return os << "{ " << I.tolerance << ", " << I.M << ", " << I.N << ", " << I.seed << ", "
<< I.along_rows << "}" << std::endl;
}
///// weighted row-wise mean test and support functions
template <typename T>
void naiveRowWeightedMean(T* R, T* D, T* W, int M, int N, bool rowMajor)
{
int istr = rowMajor ? 1 : M;
int jstr = rowMajor ? N : 1;
// sum the weights
T WS = 0;
for (int i = 0; i < N; i++)
WS += W[i];
for (int j = 0; j < M; j++) {
R[j] = (T)0;
for (int i = 0; i < N; i++) {
// R[j] += (W[i]*D[i*istr + j*jstr] - R[j])/(T)(i+1);
R[j] += (W[i] * D[i * istr + j * jstr]) / WS;
}
}
}
template <typename T>
class RowWeightedMeanTest : public ::testing::TestWithParam<WeightedMeanInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam();
raft::random::RngState r(params.seed);
int rows = params.M, cols = params.N, len = rows * cols;
auto stream = resource::get_cuda_stream(handle);
// device-side data
din.resize(len);
dweights.resize(cols);
dexp.resize(rows);
dact.resize(rows);
// create random matrix and weights
uniform(handle, r, din.data().get(), len, T(-1.0), T(1.0));
uniform(handle, r, dweights.data().get(), cols, T(-1.0), T(1.0));
// host-side data
thrust::host_vector<T> hin = din;
thrust::host_vector<T> hweights = dweights;
thrust::host_vector<T> hexp(rows);
// compute naive result & copy to GPU
naiveRowWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols, params.row_major);
dexp = hexp;
auto output = raft::make_device_vector_view<T, std::uint32_t>(dact.data().get(), rows);
auto weights =
raft::make_device_vector_view<const T, std::uint32_t>(dweights.data().get(), cols);
if (params.row_major) {
auto input = raft::make_device_matrix_view<const T, std::uint32_t, raft::row_major>(
din.data().get(), rows, cols);
// compute result
row_weighted_mean(handle, input, weights, output);
} else {
auto input = raft::make_device_matrix_view<const T, std::uint32_t, raft::col_major>(
din.data().get(), rows, cols);
// compute result
row_weighted_mean(handle, input, weights, output);
}
// adjust tolerance to account for round-off accumulation
params.tolerance *= params.N;
}
protected:
raft::resources handle;
WeightedMeanInputs<T> params;
thrust::host_vector<T> hin, hweights;
thrust::device_vector<T> din, dweights, dexp, dact;
};
///// weighted column-wise mean test and support functions
template <typename T>
void naiveColWeightedMean(T* R, T* D, T* W, int M, int N, bool rowMajor)
{
int istr = rowMajor ? 1 : M;
int jstr = rowMajor ? N : 1;
// sum the weights
T WS = 0;
for (int j = 0; j < M; j++)
WS += W[j];
for (int i = 0; i < N; i++) {
R[i] = (T)0;
for (int j = 0; j < M; j++) {
// R[i] += (W[j]*D[i*istr + j*jstr] - R[i])/(T)(j+1);
R[i] += (W[j] * D[i * istr + j * jstr]) / WS;
}
}
}
template <typename T>
class ColWeightedMeanTest : public ::testing::TestWithParam<WeightedMeanInputs<T>> {
void SetUp() override
{
params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam();
raft::random::RngState r(params.seed);
int rows = params.M, cols = params.N, len = rows * cols;
auto stream = resource::get_cuda_stream(handle);
// device-side data
din.resize(len);
dweights.resize(rows);
dexp.resize(cols);
dact.resize(cols);
// create random matrix and weights
uniform(handle, r, din.data().get(), len, T(-1.0), T(1.0));
uniform(handle, r, dweights.data().get(), rows, T(-1.0), T(1.0));
// host-side data
thrust::host_vector<T> hin = din;
thrust::host_vector<T> hweights = dweights;
thrust::host_vector<T> hexp(cols);
// compute naive result & copy to GPU
naiveColWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols, params.row_major);
dexp = hexp;
auto output = raft::make_device_vector_view<T, std::uint32_t>(dact.data().get(), cols);
auto weights =
raft::make_device_vector_view<const T, std::uint32_t>(dweights.data().get(), rows);
if (params.row_major) {
auto input = raft::make_device_matrix_view<const T, std::uint32_t, raft::row_major>(
din.data().get(), rows, cols);
// compute result
col_weighted_mean(handle, input, weights, output);
} else {
auto input = raft::make_device_matrix_view<const T, std::uint32_t, raft::col_major>(
din.data().get(), rows, cols);
// compute result
col_weighted_mean(handle, input, weights, output);
}
// adjust tolerance to account for round-off accumulation
params.tolerance *= params.M;
}
protected:
raft::resources handle;
WeightedMeanInputs<T> params;
thrust::host_vector<T> hin, hweights;
thrust::device_vector<T> din, dweights, dexp, dact;
};
template <typename T>
class WeightedMeanTest : public ::testing::TestWithParam<WeightedMeanInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<WeightedMeanInputs<T>>::GetParam();
raft::random::RngState r(params.seed);
auto stream = resource::get_cuda_stream(handle);
int rows = params.M, cols = params.N, len = rows * cols;
auto weight_size = params.along_rows ? cols : rows;
auto mean_size = params.along_rows ? rows : cols;
// device-side data
din.resize(len);
dweights.resize(weight_size);
dexp.resize(mean_size);
dact.resize(mean_size);
// create random matrix and weights
uniform(handle, r, din.data().get(), len, T(-1.0), T(1.0));
uniform(handle, r, dweights.data().get(), weight_size, T(-1.0), T(1.0));
// host-side data
thrust::host_vector<T> hin = din;
thrust::host_vector<T> hweights = dweights;
thrust::host_vector<T> hexp(mean_size);
// compute naive result & copy to GPU
if (params.along_rows)
naiveRowWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols, params.row_major);
else
naiveColWeightedMean(hexp.data(), hin.data(), hweights.data(), rows, cols, params.row_major);
dexp = hexp;
auto output = raft::make_device_vector_view<T, std::uint32_t>(dact.data().get(), mean_size);
auto weights =
raft::make_device_vector_view<const T, std::uint32_t>(dweights.data().get(), weight_size);
if (params.row_major) {
auto input = raft::make_device_matrix_view<const T, std::uint32_t, raft::row_major>(
din.data().get(), rows, cols);
// compute result
weighted_mean(handle, input, weights, output, params.along_rows);
} else {
auto input = raft::make_device_matrix_view<const T, std::uint32_t, raft::col_major>(
din.data().get(), rows, cols);
// compute result
weighted_mean(handle, input, weights, output, params.along_rows);
}
// adjust tolerance to account for round-off accumulation
params.tolerance *= params.N;
}
protected:
raft::resources handle;
WeightedMeanInputs<T> params;
thrust::host_vector<T> hin, hweights;
thrust::device_vector<T> din, dweights, dexp, dact;
};
////// Parameter sets and test instantiation
static const float tolF = 128 * std::numeric_limits<float>::epsilon();
static const double tolD = 256 * std::numeric_limits<double>::epsilon();
const std::vector<WeightedMeanInputs<float>> inputsf = {{tolF, 4, 4, 1234, true, true},
{tolF, 32, 32, 1234, true, false},
{tolF, 32, 64, 1234, false, false},
{tolF, 32, 256, 1234, true, true},
{tolF, 32, 256, 1234, false, false},
{tolF, 1024, 32, 1234, true, false},
{tolF, 1024, 64, 1234, true, true},
{tolF, 1024, 128, 1234, true, false},
{tolF, 1024, 256, 1234, true, true},
{tolF, 1024, 32, 1234, false, false},
{tolF, 1024, 64, 1234, false, true},
{tolF, 1024, 128, 1234, false, false},
{tolF, 1024, 256, 1234, false, true}};
const std::vector<WeightedMeanInputs<double>> inputsd = {{tolD, 4, 4, 1234, true, true},
{tolD, 32, 32, 1234, true, false},
{tolD, 32, 64, 1234, false, false},
{tolD, 32, 256, 1234, true, true},
{tolD, 32, 256, 1234, false, false},
{tolD, 1024, 32, 1234, true, false},
{tolD, 1024, 64, 1234, true, true},
{tolD, 1024, 128, 1234, true, false},
{tolD, 1024, 256, 1234, true, true},
{tolD, 1024, 32, 1234, false, false},
{tolD, 1024, 64, 1234, false, true},
{tolD, 1024, 128, 1234, false, false},
{tolD, 1024, 256, 1234, false, true}};
using RowWeightedMeanTestF = RowWeightedMeanTest<float>;
TEST_P(RowWeightedMeanTestF, Result)
{
ASSERT_TRUE(devArrMatch(
dexp.data().get(), dact.data().get(), params.M, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(RowWeightedMeanTest, RowWeightedMeanTestF, ::testing::ValuesIn(inputsf));
using RowWeightedMeanTestD = RowWeightedMeanTest<double>;
TEST_P(RowWeightedMeanTestD, Result)
{
ASSERT_TRUE(devArrMatch(
dexp.data().get(), dact.data().get(), params.M, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(RowWeightedMeanTest, RowWeightedMeanTestD, ::testing::ValuesIn(inputsd));
using ColWeightedMeanTestF = ColWeightedMeanTest<float>;
TEST_P(ColWeightedMeanTestF, Result)
{
ASSERT_TRUE(devArrMatch(
dexp.data().get(), dact.data().get(), params.N, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ColWeightedMeanTest, ColWeightedMeanTestF, ::testing::ValuesIn(inputsf));
using ColWeightedMeanTestD = ColWeightedMeanTest<double>;
TEST_P(ColWeightedMeanTestD, Result)
{
ASSERT_TRUE(devArrMatch(
dexp.data().get(), dact.data().get(), params.N, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ColWeightedMeanTest, ColWeightedMeanTestD, ::testing::ValuesIn(inputsd));
using WeightedMeanTestF = WeightedMeanTest<float>;
TEST_P(WeightedMeanTestF, Result)
{
auto mean_size = params.along_rows ? params.M : params.N;
ASSERT_TRUE(devArrMatch(
dexp.data().get(), dact.data().get(), mean_size, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(WeightedMeanTest, WeightedMeanTestF, ::testing::ValuesIn(inputsf));
using WeightedMeanTestD = WeightedMeanTest<double>;
TEST_P(WeightedMeanTestD, Result)
{
auto mean_size = params.along_rows ? params.M : params.N;
ASSERT_TRUE(devArrMatch(dexp.data().get(),
dact.data().get(),
mean_size,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(WeightedMeanTest, WeightedMeanTestD, ::testing::ValuesIn(inputsd));
}; // end namespace stats
}; // end namespace raft | 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/v_measure.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/homogeneity_score.cuh>
#include <raft/stats/v_measure.cuh>
#include <raft/util/cudart_utils.hpp>
#include <random>
namespace raft {
namespace stats {
// parameter structure definition
struct vMeasureParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
double beta;
bool sameArrays;
double tolerance;
};
// test fixture class
template <typename T>
class vMeasureTest : public ::testing::TestWithParam<vMeasureParam> {
protected:
// the constructor
void SetUp() override
{
// getting the parameters
params = ::testing::TestWithParam<vMeasureParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
// generating random value test input
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); });
}
// allocating and initializing memory to the GPU
stream = resource::get_cuda_stream(handle);
rmm::device_uvector<T> truthClusterArray(nElements, stream);
rmm::device_uvector<T> predClusterArray(nElements, stream);
raft::update_device(truthClusterArray.data(), &arr1[0], (int)nElements, stream);
raft::update_device(predClusterArray.data(), &arr2[0], (int)nElements, stream);
// calculating the golden output
double truthHomogeity, truthCompleteness;
truthHomogeity = raft::stats::homogeneity_score(truthClusterArray.data(),
predClusterArray.data(),
nElements,
lowerLabelRange,
upperLabelRange,
stream);
truthCompleteness = raft::stats::homogeneity_score(predClusterArray.data(),
truthClusterArray.data(),
nElements,
lowerLabelRange,
upperLabelRange,
stream);
if (truthCompleteness + truthHomogeity == 0.0)
truthVMeasure = 0.0;
else
truthVMeasure = ((1 + params.beta) * truthHomogeity * truthCompleteness /
(params.beta * truthHomogeity + truthCompleteness));
// calling the v_measure CUDA implementation
computedVMeasure = raft::stats::v_measure(
handle,
raft::make_device_vector_view<const T>(truthClusterArray.data(), nElements),
raft::make_device_vector_view<const T>(predClusterArray.data(), nElements),
lowerLabelRange,
upperLabelRange,
params.beta);
}
// declaring the data values
raft::resources handle;
vMeasureParam params;
T lowerLabelRange, upperLabelRange;
int nElements = 0;
double truthVMeasure = 0;
double computedVMeasure = 0;
cudaStream_t stream = 0;
};
// setting test parameter values
const std::vector<vMeasureParam> inputs = {{199, 1, 10, 1.0, false, 0.000001},
{200, 15, 100, 1.0, false, 0.000001},
{100, 1, 20, 1.0, false, 0.000001},
{10, 1, 10, 1.0, false, 0.000001},
{198, 1, 100, 1.0, false, 0.000001},
{300, 3, 99, 1.0, false, 0.000001},
{199, 1, 10, 1.0, true, 0.000001},
{200, 15, 100, 1.0, true, 0.000001},
{100, 1, 20, 1.0, true, 0.000001},
{10, 1, 10, 1.0, true, 0.000001},
{198, 1, 100, 1.0, true, 0.000001},
{300, 3, 99, 1.0, true, 0.000001}};
// writing the test suite
typedef vMeasureTest<int> vMeasureTestClass;
TEST_P(vMeasureTestClass, Result)
{
ASSERT_NEAR(computedVMeasure, truthVMeasure, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(vMeasure, vMeasureTestClass, ::testing::ValuesIn(inputs));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/contingencyMatrix.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <raft/core/interruptible.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/contingency_matrix.cuh>
#include <raft/util/cudart_utils.hpp>
#include <random>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace stats {
struct ContingencyMatrixParam {
int nElements;
int minClass;
int maxClass;
bool calcCardinality;
bool skipLabels;
float tolerance;
};
template <typename T>
class ContingencyMatrixTest : public ::testing::TestWithParam<ContingencyMatrixParam> {
protected:
ContingencyMatrixTest()
: stream(resource::get_cuda_stream(handle)),
dY(0, stream),
dYHat(0, stream),
dComputedOutput(0, stream),
dGoldenOutput(0, stream)
{
}
void SetUp() override
{
params = ::testing::TestWithParam<ContingencyMatrixParam>::GetParam();
int numElements = params.nElements;
int lowerLabelRange = params.minClass;
int upperLabelRange = params.maxClass;
std::vector<int> y(numElements, 0);
std::vector<int> y_hat(numElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(y.begin(), y.end(), [&]() { return intGenerator(dre); });
std::generate(y_hat.begin(), y_hat.end(), [&]() { return intGenerator(dre); });
if (params.skipLabels) {
// remove two label value from input arrays
int y1 = (upperLabelRange - lowerLabelRange) / 2;
int y2 = y1 + (upperLabelRange - lowerLabelRange) / 4;
// replacement values
int y1_R = y1 + 1;
int y2_R = y2 + 1;
std::replace(y.begin(), y.end(), y1, y1_R);
std::replace(y.begin(), y.end(), y2, y2_R);
std::replace(y_hat.begin(), y_hat.end(), y1, y1_R);
std::replace(y_hat.begin(), y_hat.end(), y2, y2_R);
}
dY.resize(numElements, stream);
dYHat.resize(numElements, stream);
raft::update_device(dYHat.data(), &y_hat[0], numElements, stream);
raft::update_device(dY.data(), &y[0], numElements, stream);
if (params.calcCardinality) {
raft::stats::get_input_class_cardinality(
handle,
raft::make_device_vector_view<const T>(dY.data(), numElements),
raft::make_host_scalar_view(&minLabel),
raft::make_host_scalar_view(&maxLabel));
} else {
minLabel = lowerLabelRange;
maxLabel = upperLabelRange;
}
numUniqueClasses = maxLabel - minLabel + 1;
dComputedOutput.resize(numUniqueClasses * numUniqueClasses, stream);
dGoldenOutput.resize(numUniqueClasses * numUniqueClasses, stream);
// generate golden output on CPU
size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int);
std::vector<int> hGoldenOutput(sizeOfMat, 0);
for (int i = 0; i < numElements; i++) {
auto row = y[i] - minLabel;
auto column = y_hat[i] - minLabel;
hGoldenOutput[row * numUniqueClasses + column] += 1;
}
raft::update_device(
dGoldenOutput.data(), hGoldenOutput.data(), numUniqueClasses * numUniqueClasses, stream);
raft::interruptible::synchronize(stream);
}
void RunTest()
{
int numElements = params.nElements;
raft::stats::contingency_matrix(
handle,
raft::make_device_vector_view<const T>(dY.data(), numElements),
raft::make_device_vector_view<const T>(dYHat.data(), numElements),
raft::make_device_matrix_view(dComputedOutput.data(), numUniqueClasses, numUniqueClasses),
std::make_optional(minLabel),
std::make_optional(maxLabel));
raft::interruptible::synchronize(stream);
ASSERT_TRUE(raft::devArrMatch(dComputedOutput.data(),
dGoldenOutput.data(),
numUniqueClasses * numUniqueClasses,
raft::Compare<T>()));
}
raft::resources handle;
ContingencyMatrixParam params;
int numUniqueClasses = -1;
T minLabel, maxLabel;
cudaStream_t stream = 0;
rmm::device_uvector<T> dY, dYHat;
rmm::device_uvector<int> dComputedOutput, dGoldenOutput;
};
const std::vector<ContingencyMatrixParam> inputs = {
{10000, 1, 10, true, false, 0.000001},
{10000, 1, 5000, true, false, 0.000001},
{10000, 1, 10000, true, false, 0.000001},
{10000, 1, 20000, true, false, 0.000001},
{10000, 1, 10, false, false, 0.000001},
{10000, 1, 5000, false, false, 0.000001},
{10000, 1, 10000, false, false, 0.000001},
{10000, 1, 20000, false, false, 0.000001},
{100000, 1, 100, false, false, 0.000001},
{1000000, 1, 1200, true, false, 0.000001},
{1000000, 1, 10000, false, false, 0.000001},
{100000, 1, 100, false, true, 0.000001},
};
typedef ContingencyMatrixTest<int> ContingencyMatrixTestS;
TEST_P(ContingencyMatrixTestS, Result) { RunTest(); }
INSTANTIATE_TEST_CASE_P(ContingencyMatrix, ContingencyMatrixTestS, ::testing::ValuesIn(inputs));
} // namespace stats
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/mean_center.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../linalg/matrix_vector_op.cuh"
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/random/rng.cuh>
#include <raft/stats/mean.cuh>
#include <raft/stats/mean_center.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace stats {
template <typename T, typename IdxType>
struct MeanCenterInputs {
T tolerance, mean;
IdxType rows, cols;
bool sample, rowMajor, bcastAlongRows;
unsigned long long int seed;
};
template <typename T, typename IdxType>
::std::ostream& operator<<(::std::ostream& os, const MeanCenterInputs<T, IdxType>& dims)
{
return os;
}
template <typename T, typename IdxType>
class MeanCenterTest : public ::testing::TestWithParam<MeanCenterInputs<T, IdxType>> {
public:
MeanCenterTest()
: params(::testing::TestWithParam<MeanCenterInputs<T, IdxType>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
rows(params.rows),
cols(params.cols),
out(rows * cols, stream),
out_ref(rows * cols, stream),
data(rows * cols, stream),
meanVec(params.bcastAlongRows ? cols : rows, stream)
{
}
protected:
void SetUp() override
{
raft::random::RngState r(params.seed);
auto len = rows * cols;
auto meanVecSize = params.bcastAlongRows ? cols : rows;
normal(handle, r, data.data(), len, params.mean, (T)1.0);
raft::stats::mean(
meanVec.data(), data.data(), cols, rows, params.sample, params.rowMajor, stream);
if (params.rowMajor) {
using layout = raft::row_major;
mean_center(handle,
raft::make_device_matrix_view<const T, int, layout>(data.data(), rows, cols),
raft::make_device_vector_view<const T, int>(meanVec.data(), meanVecSize),
raft::make_device_matrix_view<T, int, layout>(out.data(), rows, cols),
params.bcastAlongRows);
} else {
using layout = raft::col_major;
mean_center(handle,
raft::make_device_matrix_view<const T, int, layout>(data.data(), rows, cols),
raft::make_device_vector_view<const T, int>(meanVec.data(), meanVecSize),
raft::make_device_matrix_view<T, int, layout>(out.data(), rows, cols),
params.bcastAlongRows);
}
raft::linalg::naiveMatVec(out_ref.data(),
data.data(),
meanVec.data(),
cols,
rows,
params.rowMajor,
params.bcastAlongRows,
(T)-1.0,
stream);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
MeanCenterInputs<T, IdxType> params;
int rows, cols;
rmm::device_uvector<T> data, meanVec, out, out_ref;
};
const std::vector<MeanCenterInputs<float, int>> inputsf_i32 = {
{0.05f, 1.f, 1024, 32, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, false, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<float, int> MeanCenterTestF_i32;
TEST_P(MeanCenterTestF_i32, Result)
{
ASSERT_TRUE(devArrMatch(
out.data(), out_ref.data(), params.cols, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MeanCenterTests, MeanCenterTestF_i32, ::testing::ValuesIn(inputsf_i32));
const std::vector<MeanCenterInputs<float, size_t>> inputsf_i64 = {
{0.05f, 1.f, 1024, 32, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, false, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<float, size_t> MeanCenterTestF_i64;
TEST_P(MeanCenterTestF_i64, Result)
{
ASSERT_TRUE(devArrMatch(
out.data(), out_ref.data(), params.cols, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MeanCenterTests, MeanCenterTestF_i64, ::testing::ValuesIn(inputsf_i64));
const std::vector<MeanCenterInputs<double, int>> inputsd_i32 = {
{0.05, 1.0, 1024, 32, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, false, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<double, int> MeanCenterTestD_i32;
TEST_P(MeanCenterTestD_i32, Result)
{
ASSERT_TRUE(devArrMatch(
out.data(), out_ref.data(), params.cols, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MeanCenterTests, MeanCenterTestD_i32, ::testing::ValuesIn(inputsd_i32));
const std::vector<MeanCenterInputs<double, size_t>> inputsd_i64 = {
{0.05, 1.0, 1024, 32, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, false, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<double, size_t> MeanCenterTestD_i64;
TEST_P(MeanCenterTestD_i64, Result)
{
ASSERT_TRUE(devArrMatch(
out.data(), out_ref.data(), params.cols, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MeanCenterTests, MeanCenterTestD_i64, ::testing::ValuesIn(inputsd_i64));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/mean.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/random/rng.cuh>
#include <raft/stats/mean.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <stdio.h>
#include <stdlib.h>
namespace raft {
namespace stats {
template <typename T>
struct MeanInputs {
T tolerance, mean;
int rows, cols;
bool sample, rowMajor;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const MeanInputs<T>& dims)
{
return os;
}
template <typename T>
class MeanTest : public ::testing::TestWithParam<MeanInputs<T>> {
public:
MeanTest()
: params(::testing::TestWithParam<MeanInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
rows(params.rows),
cols(params.cols),
data(rows * cols, stream),
mean_act(cols, stream)
{
}
protected:
void SetUp() override
{
raft::random::RngState r(params.seed);
int len = rows * cols;
normal(handle, r, data.data(), len, params.mean, (T)1.0);
meanSGtest(data.data(), stream);
}
void meanSGtest(T* data, cudaStream_t stream)
{
int rows = params.rows, cols = params.cols;
if (params.rowMajor) {
using layout = raft::row_major;
mean(handle,
raft::make_device_matrix_view<const T, int, layout>(data, rows, cols),
raft::make_device_vector_view<T, int>(mean_act.data(), cols),
params.sample);
} else {
using layout = raft::col_major;
mean(handle,
raft::make_device_matrix_view<const T, int, layout>(data, rows, cols),
raft::make_device_vector_view<T, int>(mean_act.data(), cols),
params.sample);
}
}
protected:
raft::resources handle;
cudaStream_t stream;
MeanInputs<T> params;
int rows, cols;
rmm::device_uvector<T> data, mean_act;
};
// Note: For 1024 samples, 256 experiments, a mean of 1.0 with stddev=1.0, the
// measured mean (of a normal distribution) will fall outside of an epsilon of
// 0.15 only 4/10000 times. (epsilon of 0.1 will fail 30/100 times)
const std::vector<MeanInputs<float>> inputsf = {{0.15f, 1.f, 1024, 32, true, false, 1234ULL},
{0.15f, 1.f, 1024, 64, true, false, 1234ULL},
{0.15f, 1.f, 1024, 128, true, false, 1234ULL},
{0.15f, 1.f, 1024, 256, true, false, 1234ULL},
{0.15f, -1.f, 1024, 32, false, false, 1234ULL},
{0.15f, -1.f, 1024, 64, false, false, 1234ULL},
{0.15f, -1.f, 1024, 128, false, false, 1234ULL},
{0.15f, -1.f, 1024, 256, false, false, 1234ULL},
{0.15f, 1.f, 1024, 32, true, true, 1234ULL},
{0.15f, 1.f, 1024, 64, true, true, 1234ULL},
{0.15f, 1.f, 1024, 128, true, true, 1234ULL},
{0.15f, 1.f, 1024, 256, true, true, 1234ULL},
{0.15f, -1.f, 1024, 32, false, true, 1234ULL},
{0.15f, -1.f, 1024, 64, false, true, 1234ULL},
{0.15f, -1.f, 1024, 128, false, true, 1234ULL},
{0.15f, -1.f, 1024, 256, false, true, 1234ULL}};
const std::vector<MeanInputs<double>> inputsd = {{0.15, 1.0, 1024, 32, true, false, 1234ULL},
{0.15, 1.0, 1024, 64, true, false, 1234ULL},
{0.15, 1.0, 1024, 128, true, false, 1234ULL},
{0.15, 1.0, 1024, 256, true, false, 1234ULL},
{0.15, -1.0, 1024, 32, false, false, 1234ULL},
{0.15, -1.0, 1024, 64, false, false, 1234ULL},
{0.15, -1.0, 1024, 128, false, false, 1234ULL},
{0.15, -1.0, 1024, 256, false, false, 1234ULL},
{0.15, 1.0, 1024, 32, true, true, 1234ULL},
{0.15, 1.0, 1024, 64, true, true, 1234ULL},
{0.15, 1.0, 1024, 128, true, true, 1234ULL},
{0.15, 1.0, 1024, 256, true, true, 1234ULL},
{0.15, -1.0, 1024, 32, false, true, 1234ULL},
{0.15, -1.0, 1024, 64, false, true, 1234ULL},
{0.15, -1.0, 1024, 128, false, true, 1234ULL},
{0.15, -1.0, 1024, 256, false, true, 1234ULL}};
typedef MeanTest<float> MeanTestF;
TEST_P(MeanTestF, Result)
{
ASSERT_TRUE(
devArrMatch(params.mean, mean_act.data(), params.cols, CompareApprox<float>(params.tolerance)));
}
typedef MeanTest<double> MeanTestD;
TEST_P(MeanTestD, Result)
{
ASSERT_TRUE(devArrMatch(
params.mean, mean_act.data(), params.cols, CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MeanTests, MeanTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(MeanTests, MeanTestD, ::testing::ValuesIn(inputsd));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/meanvar.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/math.cuh>
#include <raft/random/rng.cuh>
#include <raft/stats/meanvar.cuh>
#include <raft/util/cudart_utils.hpp>
#include <algorithm>
namespace raft {
namespace stats {
template <typename T>
struct MeanVarInputs {
T mean, stddev;
int rows, cols;
bool sample, rowMajor;
unsigned long long int seed;
static const int N_SIGMAS = 6;
T mean_tol() const { return T(N_SIGMAS) * stddev / sqrt(T(rows)); }
T var_tol() const
{
return T(N_SIGMAS) * stddev * stddev * sqrt(T(2.0) / T(std::max(1, rows - 1)));
}
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const MeanVarInputs<T>& ps)
{
return os << "rows: " << ps.rows << "; cols: " << ps.cols << "; "
<< (ps.rowMajor ? "row-major" : "col-major") << " (tolerance: mean = " << ps.mean_tol()
<< ", var = " << ps.var_tol() << ")";
}
template <typename T>
class MeanVarTest : public ::testing::TestWithParam<MeanVarInputs<T>> {
public:
MeanVarTest()
: params(::testing::TestWithParam<MeanVarInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
data(params.rows * params.cols, stream),
mean_act(params.cols, stream),
vars_act(params.cols, stream)
{
}
protected:
void SetUp() override
{
random::RngState r(params.seed);
normal(handle, r, data.data(), params.cols * params.rows, params.mean, params.stddev);
if (params.rowMajor) {
using layout = raft::row_major;
meanvar(
handle,
raft::make_device_matrix_view<const T, int, layout>(data.data(), params.rows, params.cols),
raft::make_device_vector_view<T, int>(mean_act.data(), params.cols),
raft::make_device_vector_view<T, int>(vars_act.data(), params.cols),
params.sample);
} else {
using layout = raft::col_major;
meanvar(
handle,
raft::make_device_matrix_view<const T, int, layout>(data.data(), params.rows, params.cols),
raft::make_device_vector_view<T, int>(mean_act.data(), params.cols),
raft::make_device_vector_view<T, int>(vars_act.data(), params.cols),
params.sample);
}
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
}
protected:
raft::resources handle;
cudaStream_t stream;
MeanVarInputs<T> params;
rmm::device_uvector<T> data, mean_act, vars_act;
};
const std::vector<MeanVarInputs<float>> inputsf = {
{1.f, 2.f, 1024, 32, true, false, 1234ULL}, {1.f, 2.f, 1024, 64, true, false, 1234ULL},
{1.f, 2.f, 1024, 128, true, false, 1234ULL}, {1.f, 2.f, 1024, 256, true, false, 1234ULL},
{-1.f, 2.f, 1024, 32, false, false, 1234ULL}, {-1.f, 2.f, 1024, 64, false, false, 1234ULL},
{-1.f, 2.f, 1024, 128, false, false, 1234ULL}, {-1.f, 2.f, 1024, 256, false, false, 1234ULL},
{-1.f, 2.f, 1024, 256, false, false, 1234ULL}, {-1.f, 2.f, 1024, 257, false, false, 1234ULL},
{1.f, 2.f, 1024, 32, true, true, 1234ULL}, {1.f, 2.f, 1024, 64, true, true, 1234ULL},
{1.f, 2.f, 1024, 128, true, true, 1234ULL}, {1.f, 2.f, 1024, 256, true, true, 1234ULL},
{-1.f, 2.f, 1024, 32, false, true, 1234ULL}, {-1.f, 2.f, 1024, 64, false, true, 1234ULL},
{-1.f, 2.f, 1024, 128, false, true, 1234ULL}, {-1.f, 2.f, 1024, 256, false, true, 1234ULL},
{-1.f, 2.f, 1024, 257, false, true, 1234ULL}, {-1.f, 2.f, 700, 13, false, true, 1234ULL},
{10.f, 2.f, 500000, 811, false, true, 1234ULL}};
const std::vector<MeanVarInputs<double>> inputsd = {{1.0, 2.0, 1024, 32, true, false, 1234ULL},
{1.0, 2.0, 1024, 64, true, false, 1234ULL},
{1.0, 2.0, 1024, 128, true, false, 1234ULL},
{1.0, 2.0, 1024, 256, true, false, 1234ULL},
{-1.0, 2.0, 1024, 32, false, false, 1234ULL},
{-1.0, 2.0, 1024, 64, false, false, 1234ULL},
{-1.0, 2.0, 1024, 128, false, false, 1234ULL},
{-1.0, 2.0, 1024, 256, false, false, 1234ULL},
{1.0, 2.0, 1024, 32, true, true, 1234ULL},
{1.0, 2.0, 1024, 64, true, true, 1234ULL},
{1.0, 2.0, 1024, 128, true, true, 1234ULL},
{1.0, 2.0, 1024, 256, true, true, 1234ULL},
{-1.0, 2.0, 1024, 32, false, true, 1234ULL},
{-1.0, 2.0, 1024, 64, false, true, 1234ULL},
{-1.0, 2.0, 1024, 128, false, true, 1234ULL},
{-1.0, 2.0, 1024, 256, false, true, 1234ULL}};
typedef MeanVarTest<float> MeanVarTestF;
TEST_P(MeanVarTestF, Result)
{
ASSERT_TRUE(devArrMatch(
params.mean, mean_act.data(), params.cols, CompareApprox<float>(params.mean_tol()), stream));
ASSERT_TRUE(devArrMatch(params.stddev * params.stddev,
vars_act.data(),
params.cols,
CompareApproxNoScaling<float>(params.var_tol()),
stream));
}
typedef MeanVarTest<double> MeanVarTestD;
TEST_P(MeanVarTestD, Result)
{
ASSERT_TRUE(devArrMatch(
params.mean, mean_act.data(), params.cols, CompareApprox<double>(params.mean_tol()), stream));
ASSERT_TRUE(devArrMatch(params.stddev * params.stddev,
vars_act.data(),
params.cols,
CompareApproxNoScaling<double>(params.var_tol()),
stream));
}
INSTANTIATE_TEST_SUITE_P(MeanVarTests, MeanVarTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(MeanVarTests, MeanVarTestD, ::testing::ValuesIn(inputsd));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/kl_divergence.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/kl_divergence.cuh>
#include <raft/util/cudart_utils.hpp>
#include <random>
namespace raft {
namespace stats {
// parameter structure definition
struct klDivergenceParam {
int nElements;
double tolerance;
};
// test fixture class
template <typename DataT>
class klDivergenceTest : public ::testing::TestWithParam<klDivergenceParam> {
protected:
// the constructor
void SetUp() override
{
// getting the parameters
params = ::testing::TestWithParam<klDivergenceParam>::GetParam();
stream = resource::get_cuda_stream(handle);
nElements = params.nElements;
// generating random value test input
std::vector<DataT> h_modelPDF(nElements, 0);
std::vector<DataT> h_candidatePDF(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_real_distribution<DataT> realGenerator(0.0, 1.0);
std::generate(h_modelPDF.begin(), h_modelPDF.end(), [&]() { return realGenerator(dre); });
std::generate(
h_candidatePDF.begin(), h_candidatePDF.end(), [&]() { return realGenerator(dre); });
// allocating and initializing memory to the GPU
rmm::device_uvector<DataT> d_modelPDF(nElements, stream);
rmm::device_uvector<DataT> d_candidatePDF(nElements, stream);
RAFT_CUDA_TRY(cudaMemset(d_modelPDF.data(), 0, d_modelPDF.size() * sizeof(DataT)));
RAFT_CUDA_TRY(cudaMemset(d_candidatePDF.data(), 0, d_candidatePDF.size() * sizeof(DataT)));
raft::update_device(d_modelPDF.data(), &h_modelPDF[0], (int)nElements, stream);
raft::update_device(d_candidatePDF.data(), &h_candidatePDF[0], (int)nElements, stream);
// generating the golden output
for (int i = 0; i < nElements; ++i) {
if (h_modelPDF[i] == 0.0)
truthklDivergence += 0;
else
truthklDivergence += h_modelPDF[i] * log(h_modelPDF[i] / h_candidatePDF[i]);
}
// calling the kl_divergence CUDA implementation
computedklDivergence = raft::stats::kl_divergence(
handle,
raft::make_device_vector_view<const DataT>(d_modelPDF.data(), nElements),
raft::make_device_vector_view<const DataT>(d_candidatePDF.data(), nElements));
}
// declaring the data values
raft::resources handle;
klDivergenceParam params;
int nElements = 0;
DataT truthklDivergence = 0;
DataT computedklDivergence = 0;
cudaStream_t stream = 0;
};
// setting test parameter values
const std::vector<klDivergenceParam> inputs = {
{500, 0.000001}, {200, 0.001}, {5000, 0.000001}, {500000, 0.000001}
};
// writing the test suite
typedef klDivergenceTest<double> klDivergenceTestClass;
TEST_P(klDivergenceTestClass, Result)
{
ASSERT_NEAR(computedklDivergence, truthklDivergence, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(klDivergence, klDivergenceTestClass, ::testing::ValuesIn(inputs));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/stddev.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/math.cuh>
#include <raft/random/rng.cuh>
#include <raft/stats/mean.cuh>
#include <raft/stats/stddev.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace stats {
template <typename T>
struct StdDevInputs {
T tolerance, mean, stddev;
int rows, cols;
bool sample, rowMajor;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const StdDevInputs<T>& dims)
{
return os;
}
template <typename T>
class StdDevTest : public ::testing::TestWithParam<StdDevInputs<T>> {
public:
StdDevTest()
: params(::testing::TestWithParam<StdDevInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
rows(params.rows),
cols(params.cols),
data(rows * cols, stream),
mean_act(cols, stream),
stddev_act(cols, stream),
vars_act(cols, stream)
{
}
protected:
void SetUp() override
{
random::RngState r(params.seed);
int len = rows * cols;
data.resize(len, stream);
mean_act.resize(cols, stream);
stddev_act.resize(cols, stream);
vars_act.resize(cols, stream);
normal(handle, r, data.data(), len, params.mean, params.stddev);
stdVarSGtest(data.data(), stream);
resource::sync_stream(handle, stream);
}
void stdVarSGtest(T* data, cudaStream_t stream)
{
int rows = params.rows, cols = params.cols;
if (params.rowMajor) {
using layout_t = raft::row_major;
mean(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<T, int>(mean_act.data(), cols),
params.sample);
stddev(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<const T, int>(mean_act.data(), cols),
raft::make_device_vector_view<T, int>(stddev_act.data(), cols),
params.sample);
vars(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<const T, int>(mean_act.data(), cols),
raft::make_device_vector_view<T, int>(vars_act.data(), cols),
params.sample);
} else {
using layout_t = raft::col_major;
mean(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<T>(mean_act.data(), cols),
params.sample);
stddev(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<const T, int>(mean_act.data(), cols),
raft::make_device_vector_view<T, int>(stddev_act.data(), cols),
params.sample);
vars(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<const T, int>(mean_act.data(), cols),
raft::make_device_vector_view<T, int>(vars_act.data(), cols),
params.sample);
}
raft::matrix::seqRoot(vars_act.data(), T(1), cols, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
StdDevInputs<T> params;
int rows, cols;
rmm::device_uvector<T> data, mean_act, stddev_act, vars_act;
};
const std::vector<StdDevInputs<float>> inputsf = {
{0.1f, 1.f, 2.f, 1024, 32, true, false, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 64, true, false, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 128, true, false, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 256, true, false, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 32, false, false, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 64, false, false, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 128, false, false, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 256, false, false, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 32, true, true, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 64, true, true, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 128, true, true, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 256, true, true, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 32, false, true, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 64, false, true, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 128, false, true, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 256, false, true, 1234ULL}};
const std::vector<StdDevInputs<double>> inputsd = {
{0.1, 1.0, 2.0, 1024, 32, true, false, 1234ULL},
{0.1, 1.0, 2.0, 1024, 64, true, false, 1234ULL},
{0.1, 1.0, 2.0, 1024, 128, true, false, 1234ULL},
{0.1, 1.0, 2.0, 1024, 256, true, false, 1234ULL},
{0.1, -1.0, 2.0, 1024, 32, false, false, 1234ULL},
{0.1, -1.0, 2.0, 1024, 64, false, false, 1234ULL},
{0.1, -1.0, 2.0, 1024, 128, false, false, 1234ULL},
{0.1, -1.0, 2.0, 1024, 256, false, false, 1234ULL},
{0.1, 1.0, 2.0, 1024, 32, true, true, 1234ULL},
{0.1, 1.0, 2.0, 1024, 64, true, true, 1234ULL},
{0.1, 1.0, 2.0, 1024, 128, true, true, 1234ULL},
{0.1, 1.0, 2.0, 1024, 256, true, true, 1234ULL},
{0.1, -1.0, 2.0, 1024, 32, false, true, 1234ULL},
{0.1, -1.0, 2.0, 1024, 64, false, true, 1234ULL},
{0.1, -1.0, 2.0, 1024, 128, false, true, 1234ULL},
{0.1, -1.0, 2.0, 1024, 256, false, true, 1234ULL}};
typedef StdDevTest<float> StdDevTestF;
TEST_P(StdDevTestF, Result)
{
ASSERT_TRUE(devArrMatch(
params.stddev, stddev_act.data(), params.cols, CompareApprox<float>(params.tolerance), stream));
ASSERT_TRUE(devArrMatch(stddev_act.data(),
vars_act.data(),
params.cols,
CompareApprox<float>(params.tolerance),
stream));
}
typedef StdDevTest<double> StdDevTestD;
TEST_P(StdDevTestD, Result)
{
ASSERT_TRUE(devArrMatch(params.stddev,
stddev_act.data(),
params.cols,
CompareApprox<double>(params.tolerance),
stream));
ASSERT_TRUE(devArrMatch(stddev_act.data(),
vars_act.data(),
params.cols,
CompareApprox<double>(params.tolerance),
stream));
}
INSTANTIATE_TEST_SUITE_P(StdDevTests, StdDevTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(StdDevTests, StdDevTestD, ::testing::ValuesIn(inputsd));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/accuracy.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <optional>
#include <raft/core/interruptible.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/random/rng.cuh>
#include <raft/stats/accuracy.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_uvector.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
namespace raft {
namespace stats {
template <typename T>
struct AccuracyInputs {
T tolerance;
int nrows;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const AccuracyInputs<T>& dims)
{
return os;
}
template <typename T>
class AccuracyTest : public ::testing::TestWithParam<AccuracyInputs<T>> {
protected:
AccuracyTest() : stream(resource::get_cuda_stream(handle)) {}
void SetUp() override
{
params = ::testing::TestWithParam<AccuracyInputs<T>>::GetParam();
raft::random::RngState r(params.seed);
rmm::device_uvector<int> predictions(params.nrows, stream);
rmm::device_uvector<int> ref_predictions(params.nrows, stream);
uniformInt(handle, r, predictions.data(), params.nrows, 0, 10);
uniformInt(handle, r, ref_predictions.data(), params.nrows, 0, 10);
actualVal =
accuracy(handle,
raft::make_device_vector_view<const int>(predictions.data(), params.nrows),
raft::make_device_vector_view<const int>(ref_predictions.data(), params.nrows));
expectedVal = T(0);
std::vector<int> h_predictions(params.nrows, 0);
std::vector<int> h_ref_predictions(params.nrows, 0);
raft::update_host(h_predictions.data(), predictions.data(), params.nrows, stream);
raft::update_host(h_ref_predictions.data(), ref_predictions.data(), params.nrows, stream);
unsigned long long correctly_predicted = 0ULL;
for (int i = 0; i < params.nrows; ++i) {
correctly_predicted += (h_predictions[i] - h_ref_predictions[i]) == 0;
}
expectedVal = correctly_predicted * 1.0f / params.nrows;
raft::interruptible::synchronize(stream);
}
protected:
AccuracyInputs<T> params;
raft::resources handle;
cudaStream_t stream = 0;
T expectedVal, actualVal;
};
const std::vector<AccuracyInputs<float>> inputsf = {
{0.001f, 30, 1234ULL}, {0.001f, 100, 1234ULL}, {0.001f, 1000, 1234ULL}};
typedef AccuracyTest<float> AccuracyTestF;
TEST_P(AccuracyTestF, Result)
{
auto eq = raft::CompareApprox<float>(params.tolerance);
ASSERT_TRUE(match(expectedVal, actualVal, eq));
}
INSTANTIATE_TEST_CASE_P(AccuracyTests, AccuracyTestF, ::testing::ValuesIn(inputsf));
const std::vector<AccuracyInputs<double>> inputsd = {
{0.001, 30, 1234ULL}, {0.001, 100, 1234ULL}, {0.001, 1000, 1234ULL}};
typedef AccuracyTest<double> AccuracyTestD;
TEST_P(AccuracyTestD, Result)
{
auto eq = raft::CompareApprox<double>(params.tolerance);
ASSERT_TRUE(match(expectedVal, actualVal, eq));
}
INSTANTIATE_TEST_CASE_P(AccuracyTests, AccuracyTestD, ::testing::ValuesIn(inputsd));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/completeness_score.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/completeness_score.cuh>
#include <raft/stats/entropy.cuh>
#include <raft/stats/mutual_info_score.cuh>
#include <raft/util/cudart_utils.hpp>
#include <random>
namespace raft {
namespace stats {
// parameter structure definition
struct completenessParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
};
// test fixture class
template <typename T>
class completenessTest : public ::testing::TestWithParam<completenessParam> {
protected:
// the constructor
completenessTest() : stream(resource::get_cuda_stream(handle)) {}
void SetUp() override
{
// getting the parameters
params = ::testing::TestWithParam<completenessParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
// generating random value test input
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); });
}
// allocating and initializing memory to the GPU
rmm::device_uvector<T> truthClusterArray(nElements, stream);
rmm::device_uvector<T> predClusterArray(nElements, stream);
raft::update_device(truthClusterArray.data(), arr1.data(), (int)nElements, stream);
raft::update_device(predClusterArray.data(), arr2.data(), (int)nElements, stream);
// calculating the golden output
double truthMI, truthEntropy;
truthMI = raft::stats::mutual_info_score(truthClusterArray.data(),
predClusterArray.data(),
nElements,
lowerLabelRange,
upperLabelRange,
stream);
truthEntropy = raft::stats::entropy(
predClusterArray.data(), nElements, lowerLabelRange, upperLabelRange, stream);
if (truthEntropy) {
truthCompleteness = truthMI / truthEntropy;
} else
truthCompleteness = 1.0;
if (nElements == 0) truthCompleteness = 1.0;
// calling the completeness CUDA implementation
computedCompleteness = raft::stats::completeness_score(
handle,
raft::make_device_vector_view<const T>(truthClusterArray.data(), nElements),
raft::make_device_vector_view<const T>(predClusterArray.data(), nElements),
lowerLabelRange,
upperLabelRange);
}
// declaring the data values
raft::resources handle;
completenessParam params;
T lowerLabelRange, upperLabelRange;
int nElements = 0;
double truthCompleteness = 0;
double computedCompleteness = 0;
cudaStream_t stream = 0;
};
// setting test parameter values
const std::vector<completenessParam> inputs = {{199, 1, 10, false, 0.000001},
{200, 15, 100, false, 0.000001},
{100, 1, 20, false, 0.000001},
{10, 1, 10, false, 0.000001},
{198, 1, 100, false, 0.000001},
{300, 3, 99, false, 0.000001},
{199, 1, 10, true, 0.000001},
{200, 15, 100, true, 0.000001},
{100, 1, 20, true, 0.000001},
{10, 1, 10, true, 0.000001},
{198, 1, 100, true, 0.000001},
{300, 3, 99, true, 0.000001}};
// writing the test suite
typedef completenessTest<int> completenessTestClass;
TEST_P(completenessTestClass, Result)
{
ASSERT_NEAR(computedCompleteness, truthCompleteness, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(completeness, completenessTestClass, ::testing::ValuesIn(inputs));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/dispersion.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <optional>
#include <raft/core/interruptible.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/random/rng.cuh>
#include <raft/stats/dispersion.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_uvector.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
namespace raft {
namespace stats {
template <typename T>
struct DispersionInputs {
T tolerance;
int dim, clusters;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const DispersionInputs<T>& dims)
{
return os;
}
template <typename T>
class DispersionTest : public ::testing::TestWithParam<DispersionInputs<T>> {
protected:
DispersionTest()
: stream(resource::get_cuda_stream(handle)), exp_mean(0, stream), act_mean(0, stream)
{
}
void SetUp() override
{
params = ::testing::TestWithParam<DispersionInputs<T>>::GetParam();
raft::random::RngState r(params.seed);
int len = params.clusters * params.dim;
rmm::device_uvector<T> data(len, stream);
rmm::device_uvector<int> counts(params.clusters, stream);
exp_mean.resize(params.dim, stream);
act_mean.resize(params.dim, stream);
uniform(handle, r, data.data(), len, (T)-1.0, (T)1.0);
uniformInt(handle, r, counts.data(), params.clusters, 1, 100);
std::vector<int> h_counts(params.clusters, 0);
raft::update_host(&(h_counts[0]), counts.data(), params.clusters, stream);
npoints = 0;
for (const auto& val : h_counts) {
npoints += val;
}
actualVal = cluster_dispersion(
handle,
raft::make_device_matrix_view<const T, int>(data.data(), params.clusters, params.dim),
raft::make_device_vector_view<const int, int>(counts.data(), params.clusters),
std::make_optional(raft::make_device_vector_view<T, int>(act_mean.data(), params.dim)),
npoints);
expectedVal = T(0);
std::vector<T> h_data(len, T(0));
raft::update_host(&(h_data[0]), data.data(), len, stream);
std::vector<T> mean(params.dim, T(0));
for (int i = 0; i < params.clusters; ++i) {
for (int j = 0; j < params.dim; ++j) {
mean[j] += h_data[i * params.dim + j] * T(h_counts[i]);
}
}
for (int i = 0; i < params.dim; ++i) {
mean[i] /= T(npoints);
}
raft::update_device(exp_mean.data(), &(mean[0]), params.dim, stream);
for (int i = 0; i < params.clusters; ++i) {
for (int j = 0; j < params.dim; ++j) {
auto diff = h_data[i * params.dim + j] - mean[j];
expectedVal += diff * diff * T(h_counts[i]);
}
}
expectedVal = sqrt(expectedVal);
raft::interruptible::synchronize(stream);
}
protected:
DispersionInputs<T> params;
raft::resources handle;
rmm::device_uvector<T> exp_mean, act_mean;
cudaStream_t stream = 0;
int npoints;
T expectedVal, actualVal;
};
const std::vector<DispersionInputs<float>> inputsf = {
{0.001f, 10, 1000, 1234ULL}, {0.001f, 100, 100, 1234ULL}, {0.001f, 1000, 1000, 1234ULL}};
typedef DispersionTest<float> DispersionTestF;
TEST_P(DispersionTestF, Result)
{
auto eq = raft::CompareApprox<float>(params.tolerance);
ASSERT_TRUE(devArrMatch(exp_mean.data(), act_mean.data(), params.dim, eq));
ASSERT_TRUE(match(expectedVal, actualVal, eq));
}
INSTANTIATE_TEST_CASE_P(DispersionTests, DispersionTestF, ::testing::ValuesIn(inputsf));
const std::vector<DispersionInputs<double>> inputsd = {
{0.001, 10, 1000, 1234ULL}, {0.001, 100, 100, 1234ULL}, {0.001, 1000, 1000, 1234ULL}};
typedef DispersionTest<double> DispersionTestD;
TEST_P(DispersionTestD, Result)
{
auto eq = raft::CompareApprox<double>(params.tolerance);
ASSERT_TRUE(devArrMatch(exp_mean.data(), act_mean.data(), params.dim, eq));
ASSERT_TRUE(match(expectedVal, actualVal, eq));
}
INSTANTIATE_TEST_CASE_P(DispersionTests, DispersionTestD, ::testing::ValuesIn(inputsd));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/regression_metrics.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <algorithm>
#include <gtest/gtest.h>
#include <optional>
#include <raft/core/interruptible.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/random/rng.cuh>
#include <raft/stats/regression_metrics.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_uvector.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
namespace raft {
namespace stats {
template <typename T>
struct RegressionInputs {
T tolerance;
int len;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const RegressionInputs<T>& dims)
{
return os;
}
template <typename T>
void naive_reg_metrics(std::vector<T>& predictions,
std::vector<T>& ref_predictions,
double& mean_abs_error,
double& mean_squared_error,
double& median_abs_error)
{
auto len = predictions.size();
double abs_diff = 0;
double sq_diff = 0;
std::vector<double> abs_errors(len);
for (std::size_t i = 0; i < len; ++i) {
auto diff = predictions[i] - ref_predictions[i];
abs_diff += abs(diff);
sq_diff += diff * diff;
abs_errors[i] = abs(diff);
}
mean_abs_error = abs_diff / len;
mean_squared_error = sq_diff / len;
std::sort(abs_errors.begin(), abs_errors.end());
auto middle = len / 2;
if (len % 2 == 1) {
median_abs_error = abs_errors[middle];
} else {
median_abs_error = (abs_errors[middle] + abs_errors[middle - 1]) / 2;
}
}
template <typename T>
class RegressionTest : public ::testing::TestWithParam<RegressionInputs<T>> {
protected:
RegressionTest() : stream(resource::get_cuda_stream(handle)) {}
void SetUp() override
{
params = ::testing::TestWithParam<RegressionInputs<T>>::GetParam();
raft::random::RngState r(params.seed);
rmm::device_uvector<T> predictions(params.len, stream);
rmm::device_uvector<T> ref_predictions(params.len, stream);
uniform(handle, r, predictions.data(), params.len, T(-10.0), T(10.0));
uniform(handle, r, ref_predictions.data(), params.len, T(-10.0), T(10.0));
regression_metrics(handle,
raft::make_device_vector_view<const T>(predictions.data(), params.len),
raft::make_device_vector_view<const T>(ref_predictions.data(), params.len),
raft::make_host_scalar_view(&mean_abs_error),
raft::make_host_scalar_view(&mean_squared_error),
raft::make_host_scalar_view(&median_abs_error));
std::vector<T> h_predictions(params.len, 0);
std::vector<T> h_ref_predictions(params.len, 0);
raft::update_host(h_predictions.data(), predictions.data(), params.len, stream);
raft::update_host(h_ref_predictions.data(), ref_predictions.data(), params.len, stream);
naive_reg_metrics(h_predictions,
h_ref_predictions,
ref_mean_abs_error,
ref_mean_squared_error,
ref_median_abs_error);
raft::interruptible::synchronize(stream);
}
protected:
raft::resources handle;
RegressionInputs<T> params;
cudaStream_t stream = 0;
double mean_abs_error = 0;
double mean_squared_error = 0;
double median_abs_error = 0;
double ref_mean_abs_error = 0;
double ref_mean_squared_error = 0;
double ref_median_abs_error = 0;
};
const std::vector<RegressionInputs<float>> inputsf = {
{0.001f, 30, 1234ULL}, {0.001f, 100, 1234ULL}, {0.001f, 4000, 1234ULL}};
typedef RegressionTest<float> RegressionTestF;
TEST_P(RegressionTestF, Result)
{
auto eq = raft::CompareApprox<float>(params.tolerance);
ASSERT_TRUE(match(ref_mean_abs_error, mean_abs_error, eq));
ASSERT_TRUE(match(ref_mean_squared_error, mean_squared_error, eq));
ASSERT_TRUE(match(ref_median_abs_error, median_abs_error, eq));
}
INSTANTIATE_TEST_CASE_P(RegressionTests, RegressionTestF, ::testing::ValuesIn(inputsf));
const std::vector<RegressionInputs<double>> inputsd = {
{0.001, 30, 1234ULL}, {0.001, 100, 1234ULL}, {0.001, 4000, 1234ULL}};
typedef RegressionTest<double> RegressionTestD;
TEST_P(RegressionTestD, Result)
{
auto eq = raft::CompareApprox<double>(params.tolerance);
ASSERT_TRUE(match(ref_mean_abs_error, mean_abs_error, eq));
ASSERT_TRUE(match(ref_mean_squared_error, mean_squared_error, eq));
ASSERT_TRUE(match(ref_median_abs_error, median_abs_error, eq));
}
INSTANTIATE_TEST_CASE_P(RegressionTests, RegressionTestD, ::testing::ValuesIn(inputsd));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/homogeneity_score.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/homogeneity_score.cuh>
#include <raft/stats/mutual_info_score.cuh>
#include <raft/util/cudart_utils.hpp>
#include <random>
namespace raft {
namespace stats {
// parameter structure definition
struct homogeneityParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
};
// test fixture class
template <typename T>
class homogeneityTest : public ::testing::TestWithParam<homogeneityParam> {
protected:
// the constructor
void SetUp() override
{
// getting the parameters
params = ::testing::TestWithParam<homogeneityParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
stream = resource::get_cuda_stream(handle);
// generating random value test input
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); });
}
// allocating and initializing memory to the GPU
rmm::device_uvector<T> truthClusterArray(nElements, stream);
rmm::device_uvector<T> predClusterArray(nElements, stream);
raft::update_device(truthClusterArray.data(), &arr1[0], (int)nElements, stream);
raft::update_device(predClusterArray.data(), &arr2[0], (int)nElements, stream);
// calculating the golden output
double truthMI, truthEntropy;
truthMI = raft::stats::mutual_info_score(truthClusterArray.data(),
predClusterArray.data(),
nElements,
lowerLabelRange,
upperLabelRange,
stream);
truthEntropy = raft::stats::entropy(
truthClusterArray.data(), nElements, lowerLabelRange, upperLabelRange, stream);
if (truthEntropy) {
truthHomogeneity = truthMI / truthEntropy;
} else
truthHomogeneity = 1.0;
if (nElements == 0) truthHomogeneity = 1.0;
// calling the homogeneity CUDA implementation
computedHomogeneity = raft::stats::homogeneity_score(
handle,
raft::make_device_vector_view<const T>(truthClusterArray.data(), nElements),
raft::make_device_vector_view<const T>(predClusterArray.data(), nElements),
lowerLabelRange,
upperLabelRange);
}
// declaring the data values
raft::resources handle;
homogeneityParam params;
T lowerLabelRange, upperLabelRange;
int nElements = 0;
double truthHomogeneity = 0;
double computedHomogeneity = 0;
cudaStream_t stream = 0;
};
// setting test parameter values
const std::vector<homogeneityParam> inputs = {{199, 1, 10, false, 0.000001},
{200, 15, 100, false, 0.000001},
{100, 1, 20, false, 0.000001},
{10, 1, 10, false, 0.000001},
{198, 1, 100, false, 0.000001},
{300, 3, 99, false, 0.000001},
{199, 1, 10, true, 0.000001},
{200, 15, 100, true, 0.000001},
{100, 1, 20, true, 0.000001},
{10, 1, 10, true, 0.000001},
{198, 1, 100, true, 0.000001},
{300, 3, 99, true, 0.000001}};
// writing the test suite
typedef homogeneityTest<int> homogeneityTestClass;
TEST_P(homogeneityTestClass, Result)
{
ASSERT_NEAR(computedHomogeneity, truthHomogeneity, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(homogeneity, homogeneityTestClass, ::testing::ValuesIn(inputs));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/neighborhood_recall.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../neighbors/ann_utils.cuh"
#include "../test_utils.h"
#include <raft/core/host_mdarray.hpp>
#include <raft/core/mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft_internal/neighbors/naive_knn.cuh>
#include <raft/stats/neighborhood_recall.cuh>
#include <raft/util/itertools.hpp>
#include <gtest/gtest.h>
namespace raft::stats {
struct NeighborhoodRecallInputs {
int n_rows;
int n_cols;
int k;
};
template <typename DistanceT, typename IdxT>
class NeighborhoodRecallTest : public ::testing::TestWithParam<NeighborhoodRecallInputs> {
public:
NeighborhoodRecallTest()
: ps{::testing::TestWithParam<NeighborhoodRecallInputs>::GetParam()},
data_1{raft::make_device_matrix<DistanceT, IdxT>(res, ps.n_rows, ps.n_cols)},
data_2{raft::make_device_matrix<DistanceT, IdxT>(res, ps.n_rows, ps.n_cols)}
{
}
protected:
void test_recall()
{
size_t queries_size = ps.n_rows * ps.k;
// calculate nn for dataset 1
auto distances_1 = raft::make_device_matrix<DistanceT, IdxT>(res, ps.n_rows, ps.k);
auto indices_1 = raft::make_device_matrix<IdxT, IdxT>(res, ps.n_rows, ps.k);
raft::neighbors::naive_knn<DistanceT, DistanceT, IdxT>(
res,
distances_1.data_handle(),
indices_1.data_handle(),
data_1.data_handle(),
data_1.data_handle(),
ps.n_rows,
ps.n_rows,
ps.n_cols,
ps.k,
raft::distance::DistanceType::L2Expanded);
std::vector<DistanceT> distances_1_h(queries_size);
std::vector<IdxT> indices_1_h(queries_size);
raft::copy(distances_1_h.data(),
distances_1.data_handle(),
ps.n_rows * ps.k,
raft::resource::get_cuda_stream(res));
raft::copy(indices_1_h.data(),
indices_1.data_handle(),
ps.n_rows * ps.k,
raft::resource::get_cuda_stream(res));
// calculate nn for dataset 2
auto distances_2 = raft::make_device_matrix<DistanceT, IdxT>(res, ps.n_rows, ps.k);
auto indices_2 = raft::make_device_matrix<IdxT, IdxT>(res, ps.n_rows, ps.k);
raft::neighbors::naive_knn<DistanceT, DistanceT, IdxT>(
res,
distances_2.data_handle(),
indices_2.data_handle(),
data_2.data_handle(),
data_2.data_handle(),
ps.n_rows,
ps.n_rows,
ps.n_cols,
ps.k,
raft::distance::DistanceType::L2Expanded);
std::vector<DistanceT> distances_2_h(queries_size);
std::vector<IdxT> indices_2_h(queries_size);
raft::copy(distances_2_h.data(),
distances_2.data_handle(),
ps.n_rows * ps.k,
raft::resource::get_cuda_stream(res));
raft::copy(indices_2_h.data(),
indices_2.data_handle(),
ps.n_rows * ps.k,
raft::resource::get_cuda_stream(res));
raft::resource::sync_stream(res);
// find CPU recall scores
[[maybe_unused]] auto [indices_only_recall_h, mc1, tc1] =
raft::neighbors::calc_recall(indices_1_h, indices_2_h, ps.n_rows, ps.k);
[[maybe_unused]] auto [recall_h, mc2, tc2] = raft::neighbors::calc_recall(
indices_1_h, indices_2_h, distances_1_h, distances_2_h, ps.n_rows, ps.k, 0.001);
// find GPU recall scores
auto s1 = 0;
auto indices_only_recall_scalar = raft::make_host_scalar<double>(s1);
neighborhood_recall(res,
raft::make_const_mdspan(indices_1.view()),
raft::make_const_mdspan(indices_2.view()),
indices_only_recall_scalar.view());
auto s2 = 0;
auto recall_scalar = raft::make_host_scalar<double>(s2);
DistanceT s3 = 0.001;
auto eps_mda = raft::make_host_scalar<DistanceT>(s3);
neighborhood_recall<IdxT, IdxT, double, DistanceT>(res,
raft::make_const_mdspan(indices_1.view()),
raft::make_const_mdspan(indices_2.view()),
recall_scalar.view(),
raft::make_const_mdspan(distances_1.view()),
raft::make_const_mdspan(distances_2.view()));
// assert correctness
ASSERT_TRUE(raft::match(indices_only_recall_h,
*indices_only_recall_scalar.data_handle(),
raft::CompareApprox<double>(0.01)));
ASSERT_TRUE(
raft::match(recall_h, *recall_scalar.data_handle(), raft::CompareApprox<double>(0.01)));
}
void SetUp() override
{
// form two random datasets
raft::random::Rng r1(1234ULL);
r1.normal(data_1.data_handle(),
ps.n_rows * ps.n_cols,
DistanceT(0.1),
DistanceT(2.0),
raft::resource::get_cuda_stream(res));
raft::random::Rng r2(21111ULL);
r2.normal(data_2.data_handle(),
ps.n_rows * ps.n_cols,
DistanceT(0.1),
DistanceT(2.0),
raft::resource::get_cuda_stream(res));
resource::sync_stream(res);
}
private:
raft::resources res;
NeighborhoodRecallInputs ps;
raft::device_matrix<DistanceT, IdxT> data_1;
raft::device_matrix<DistanceT, IdxT> data_2;
};
const std::vector<NeighborhoodRecallInputs> inputs =
raft::util::itertools::product<NeighborhoodRecallInputs>({10, 50, 100}, // n_rows
{80, 100}, // n_cols
{32, 64}); // k
using NeighborhoodRecallTestF_U32 = NeighborhoodRecallTest<float, std::uint32_t>;
TEST_P(NeighborhoodRecallTestF_U32, AnnCagra) { this->test_recall(); }
INSTANTIATE_TEST_CASE_P(NeighborhoodRecallTest,
NeighborhoodRecallTestF_U32,
::testing::ValuesIn(inputs));
} // end namespace raft::stats
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/r2_score.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <optional>
#include <raft/core/interruptible.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/random/rng.cuh>
#include <raft/stats/r2_score.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_uvector.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
namespace raft {
namespace stats {
template <typename T>
struct R2_scoreInputs {
T tolerance;
int nrows;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const R2_scoreInputs<T>& dims)
{
return os;
}
template <typename T>
class R2_scoreTest : public ::testing::TestWithParam<R2_scoreInputs<T>> {
protected:
R2_scoreTest() : stream(resource::get_cuda_stream(handle)) {}
void SetUp() override
{
params = ::testing::TestWithParam<R2_scoreInputs<T>>::GetParam();
raft::random::RngState r(params.seed);
rmm::device_uvector<T> y(params.nrows, stream);
rmm::device_uvector<T> y_hat(params.nrows, stream);
uniform(handle, r, y.data(), params.nrows, (T)-1.0, (T)1.0);
uniform(handle, r, y_hat.data(), params.nrows, (T)-1.0, (T)1.0);
actualVal = r2_score(handle,
raft::make_device_vector_view<const T>(y.data(), params.nrows),
raft::make_device_vector_view<const T>(y_hat.data(), params.nrows));
expectedVal = T(0);
std::vector<T> h_y(params.nrows, 0);
std::vector<T> h_y_hat(params.nrows, 0);
raft::update_host(h_y.data(), y.data(), params.nrows, stream);
raft::update_host(h_y_hat.data(), y_hat.data(), params.nrows, stream);
T mean = T(0);
for (int i = 0; i < params.nrows; ++i) {
mean += h_y[i];
}
mean /= params.nrows;
std::vector<T> sse_arr(params.nrows, 0);
std::vector<T> ssto_arr(params.nrows, 0);
T sse = T(0);
T ssto = T(0);
for (int i = 0; i < params.nrows; ++i) {
sse += (h_y[i] - h_y_hat[i]) * (h_y[i] - h_y_hat[i]);
ssto += (h_y[i] - mean) * (h_y[i] - mean);
}
expectedVal = 1.0 - sse / ssto;
raft::interruptible::synchronize(stream);
}
protected:
R2_scoreInputs<T> params;
raft::resources handle;
cudaStream_t stream = 0;
T expectedVal, actualVal;
};
const std::vector<R2_scoreInputs<float>> inputsf = {
{0.001f, 30, 1234ULL}, {0.001f, 100, 1234ULL}, {0.001f, 1000, 1234ULL}};
typedef R2_scoreTest<float> R2_scoreTestF;
TEST_P(R2_scoreTestF, Result)
{
auto eq = raft::CompareApprox<float>(params.tolerance);
ASSERT_TRUE(match(expectedVal, actualVal, eq));
}
INSTANTIATE_TEST_CASE_P(R2_scoreTests, R2_scoreTestF, ::testing::ValuesIn(inputsf));
const std::vector<R2_scoreInputs<double>> inputsd = {
{0.001, 30, 1234ULL}, {0.001, 100, 1234ULL}, {0.001, 1000, 1234ULL}};
typedef R2_scoreTest<double> R2_scoreTestD;
TEST_P(R2_scoreTestD, Result)
{
auto eq = raft::CompareApprox<double>(params.tolerance);
ASSERT_TRUE(match(expectedVal, actualVal, eq));
}
INSTANTIATE_TEST_CASE_P(R2_scoreTests, R2_scoreTestD, ::testing::ValuesIn(inputsd));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/information_criterion.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/information_criterion.cuh>
#include <raft/core/resources.hpp>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <gtest/gtest.h>
#include <cmath>
#include <random>
#include <vector>
namespace raft {
namespace stats {
template <typename T>
void naive_ic(
T* h_ic, const T* h_loglike, IC_Type ic_type, int n_params, int batch_size, int n_samples)
{
T ic_base{};
T N = static_cast<T>(n_params);
T M = static_cast<T>(n_samples);
switch (ic_type) {
case AIC: ic_base = (T)2 * N; break;
case AICc: ic_base = (T)2 * (N + (N * (N + (T)1)) / (M - N - (T)1)); break;
case BIC: ic_base = std::log(M) * N; break;
}
#pragma omp parallel for
for (int bid = 0; bid < batch_size; bid++) {
h_ic[bid] = ic_base - (T)2.0 * h_loglike[bid];
}
}
template <typename T>
struct BatchedICInputs {
int batch_size;
int n_params;
int n_samples;
IC_Type ic_type;
T tolerance;
};
template <typename T>
class BatchedICTest : public ::testing::TestWithParam<BatchedICInputs<T>> {
public:
BatchedICTest()
: params(::testing::TestWithParam<BatchedICInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
res_d(sizeof(T) * params.batch_size, stream)
{
}
protected:
void SetUp() override
{
using std::vector;
// Create arrays
std::vector<T> loglike_h = std::vector<T>(params.batch_size);
res_h.resize(params.batch_size);
rmm::device_uvector<T> loglike_d(sizeof(T) * params.batch_size, stream);
// Generate random data
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<T> udis(0.001, 1.0); // 0 has no log
for (int i = 0; i < params.batch_size; i++)
loglike_h[i] = std::log(udis(gen));
// Copy the data to the device
raft::update_device(loglike_d.data(), loglike_h.data(), params.batch_size, stream);
// Compute the tested results
information_criterion_batched(
handle,
raft::make_device_vector_view<const T>(loglike_d.data(), params.batch_size),
raft::make_device_vector_view(res_d.data(), params.batch_size),
params.ic_type,
params.n_params,
params.n_samples);
// Compute the expected results
naive_ic(res_h.data(),
loglike_h.data(),
params.ic_type,
params.n_params,
params.batch_size,
params.n_samples);
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
}
protected:
raft::resources handle;
cudaStream_t stream = 0;
BatchedICInputs<T> params;
rmm::device_uvector<T> res_d;
std::vector<T> res_h;
};
// Test parameters (op, n_batches, m, n, p, q, tolerance)
const std::vector<BatchedICInputs<double>> inputsd = {
{1, 5, 52, AIC, 1e-3}, {10, 7, 100, AICc, 1e-3}, {67, 2, 350, BIC, 1e-3}};
// Test parameters (op, n_batches, m, n, p, q, tolerance)
const std::vector<BatchedICInputs<float>> inputsf = {
{1, 5, 52, AIC, 1e-3}, {10, 7, 100, AICc, 1e-3}, {67, 2, 350, BIC, 1e-3}};
using BatchedICTestD = BatchedICTest<double>;
using BatchedICTestF = BatchedICTest<float>;
TEST_P(BatchedICTestD, Result)
{
ASSERT_TRUE(devArrMatchHost(res_h.data(),
res_d.data(),
params.batch_size,
raft::CompareApprox<double>(params.tolerance),
stream));
}
TEST_P(BatchedICTestF, Result)
{
ASSERT_TRUE(devArrMatchHost(res_h.data(),
res_d.data(),
params.batch_size,
raft::CompareApprox<float>(params.tolerance),
stream));
}
INSTANTIATE_TEST_CASE_P(BatchedICTests, BatchedICTestD, ::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(BatchedICTests, BatchedICTestF, ::testing::ValuesIn(inputsf));
} // namespace stats
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/cov.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/random/rng.cuh>
#include <raft/stats/cov.cuh>
#include <raft/stats/mean.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace stats {
template <typename T>
struct CovInputs {
T tolerance, mean, var;
int rows, cols;
bool sample, rowMajor, stable;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const CovInputs<T>& dims)
{
return os;
}
template <typename T>
class CovTest : public ::testing::TestWithParam<CovInputs<T>> {
protected:
CovTest()
: data(0, stream),
mean_act(0, stream),
cov_act(0, stream),
cov_cm(0, stream),
cov_cm_ref(0, stream)
{
}
void SetUp() override
{
raft::resources handle;
cudaStream_t stream = resource::get_cuda_stream(handle);
params = ::testing::TestWithParam<CovInputs<T>>::GetParam();
params.tolerance *= 2;
raft::random::RngState r(params.seed);
int rows = params.rows, cols = params.cols;
auto len = rows * cols;
T var = params.var;
data.resize(len, stream);
mean_act.resize(cols, stream);
cov_act.resize(cols * cols, stream);
normal(handle, r, data.data(), len, params.mean, var);
raft::stats::mean(
mean_act.data(), data.data(), cols, rows, params.sample, params.rowMajor, stream);
if (params.rowMajor) {
using layout = raft::row_major;
cov(handle,
raft::make_device_matrix_view<T, std::uint32_t, layout>(data.data(), rows, cols),
raft::make_device_vector_view<const T, std::uint32_t>(mean_act.data(), cols),
raft::make_device_matrix_view<T, std::uint32_t, layout>(cov_act.data(), cols, cols),
params.sample,
params.stable);
} else {
using layout = raft::col_major;
cov(handle,
raft::make_device_matrix_view<T, std::uint32_t, layout>(data.data(), rows, cols),
raft::make_device_vector_view<const T, std::uint32_t>(mean_act.data(), cols),
raft::make_device_matrix_view<T, std::uint32_t, layout>(cov_act.data(), cols, cols),
params.sample,
params.stable);
}
T data_h[6] = {1.0, 2.0, 5.0, 4.0, 2.0, 1.0};
T cov_cm_ref_h[4] = {4.3333, -2.8333, -2.8333, 2.333};
cov_cm.resize(4, stream);
cov_cm_ref.resize(4, stream);
rmm::device_uvector<T> data_cm(6, stream);
rmm::device_uvector<T> mean_cm(2, stream);
raft::update_device(data_cm.data(), data_h, 6, stream);
raft::update_device(cov_cm_ref.data(), cov_cm_ref_h, 4, stream);
raft::stats::mean(mean_cm.data(), data_cm.data(), 2, 3, true, false, stream);
cov(handle, cov_cm.data(), data_cm.data(), mean_cm.data(), 2, 3, true, false, true, stream);
}
protected:
cublasHandle_t handle;
cudaStream_t stream = 0;
CovInputs<T> params;
rmm::device_uvector<T> data, mean_act, cov_act, cov_cm, cov_cm_ref;
};
///@todo: add stable=false after it has been implemented
const std::vector<CovInputs<float>> inputsf = {
{0.03f, 1.f, 2.f, 32 * 1024, 32, true, false, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 64, true, false, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 128, true, false, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 256, true, false, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 32, false, false, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 64, false, false, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 128, false, false, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 256, false, false, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 32, true, true, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 64, true, true, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 128, true, true, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 256, true, true, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 32, false, true, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 64, false, true, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 128, false, true, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 256, false, true, true, 1234ULL}};
const std::vector<CovInputs<double>> inputsd = {
{0.03, 1.0, 2.0, 32 * 1024, 32, true, false, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 64, true, false, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 128, true, false, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 256, true, false, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 32, false, false, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 64, false, false, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 128, false, false, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 256, false, false, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 32, true, true, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 64, true, true, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 128, true, true, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 256, true, true, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 32, false, true, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 64, false, true, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 128, false, true, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 256, false, true, true, 1234ULL}};
typedef CovTest<float> CovTestF;
TEST_P(CovTestF, Result)
{
ASSERT_TRUE(raft::diagonalMatch(params.var * params.var,
cov_act.data(),
params.cols,
params.cols,
raft::CompareApprox<float>(params.tolerance)));
}
typedef CovTest<double> CovTestD;
TEST_P(CovTestD, Result)
{
ASSERT_TRUE(raft::diagonalMatch(params.var * params.var,
cov_act.data(),
params.cols,
params.cols,
raft::CompareApprox<double>(params.tolerance)));
}
typedef CovTest<float> CovTestSmallF;
TEST_P(CovTestSmallF, Result)
{
ASSERT_TRUE(raft::devArrMatch(
cov_cm_ref.data(), cov_cm.data(), 2, 2, raft::CompareApprox<float>(params.tolerance)));
}
typedef CovTest<double> CovTestSmallD;
TEST_P(CovTestSmallD, Result)
{
ASSERT_TRUE(raft::devArrMatch(
cov_cm_ref.data(), cov_cm.data(), 2, 2, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(CovTests, CovTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CovTests, CovTestD, ::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(CovTests, CovTestSmallF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CovTests, CovTestSmallD, ::testing::ValuesIn(inputsd));
} // namespace stats
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/silhouette_score.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/util/cudart_utils.hpp>
#include <raft/stats/silhouette_score.cuh>
#include <random>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace stats {
// parameter structure definition
struct silhouetteScoreParam {
int nRows;
int nCols;
int nLabels;
raft::distance::DistanceType metric;
int chunk;
double tolerance;
};
// test fixture class
template <typename LabelT, typename DataT>
class silhouetteScoreTest : public ::testing::TestWithParam<silhouetteScoreParam> {
protected:
silhouetteScoreTest()
: d_X(0, resource::get_cuda_stream(handle)),
sampleSilScore(0, resource::get_cuda_stream(handle)),
d_labels(0, resource::get_cuda_stream(handle))
{
}
void host_silhouette_score()
{
// generating random value test input
std::vector<double> h_X(nElements, 0.0);
std::vector<int> h_labels(nRows, 0);
std::random_device rd;
std::default_random_engine dre(nElements * nLabels);
std::uniform_int_distribution<int> intGenerator(0, nLabels - 1);
std::uniform_real_distribution<double> realGenerator(0, 100);
std::generate(h_X.begin(), h_X.end(), [&]() { return realGenerator(dre); });
std::generate(h_labels.begin(), h_labels.end(), [&]() { return intGenerator(dre); });
// allocating and initializing memory to the GPU
auto stream = resource::get_cuda_stream(handle);
d_X.resize(nElements, stream);
d_labels.resize(nElements, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(d_X.data(), 0, d_X.size() * sizeof(DataT), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(d_labels.data(), 0, d_labels.size() * sizeof(LabelT), stream));
sampleSilScore.resize(nElements, stream);
raft::update_device(d_X.data(), &h_X[0], (int)nElements, stream);
raft::update_device(d_labels.data(), &h_labels[0], (int)nElements, stream);
// finding the distance matrix
rmm::device_uvector<double> d_distanceMatrix(nRows * nRows, stream);
double* h_distanceMatrix = (double*)malloc(nRows * nRows * sizeof(double*));
raft::distance::pairwise_distance(
handle, d_X.data(), d_X.data(), d_distanceMatrix.data(), nRows, nRows, nCols, params.metric);
resource::sync_stream(handle, stream);
raft::update_host(h_distanceMatrix, d_distanceMatrix.data(), nRows * nRows, stream);
// finding the bincount array
double* binCountArray = (double*)malloc(nLabels * sizeof(double*));
memset(binCountArray, 0, nLabels * sizeof(double));
for (int i = 0; i < nRows; ++i) {
binCountArray[h_labels[i]] += 1;
}
// finding the average intra cluster distance for every element
double* a = (double*)malloc(nRows * sizeof(double*));
for (int i = 0; i < nRows; ++i) {
int myLabel = h_labels[i];
double sumOfIntraClusterD = 0;
for (int j = 0; j < nRows; ++j) {
if (h_labels[j] == myLabel) { sumOfIntraClusterD += h_distanceMatrix[i * nRows + j]; }
}
if (binCountArray[myLabel] <= 1)
a[i] = -1;
else
a[i] = sumOfIntraClusterD / (binCountArray[myLabel] - 1);
}
// finding the average inter cluster distance for every element
double* b = (double*)malloc(nRows * sizeof(double*));
for (int i = 0; i < nRows; ++i) {
int myLabel = h_labels[i];
double minAvgInterCD = ULLONG_MAX;
for (int j = 0; j < nLabels; ++j) {
int curClLabel = j;
if (curClLabel == myLabel) continue;
double avgInterCD = 0;
for (int k = 0; k < nRows; ++k) {
if (h_labels[k] == curClLabel) { avgInterCD += h_distanceMatrix[i * nRows + k]; }
}
if (binCountArray[curClLabel])
avgInterCD /= binCountArray[curClLabel];
else
avgInterCD = ULLONG_MAX;
minAvgInterCD = min(minAvgInterCD, avgInterCD);
}
b[i] = minAvgInterCD;
}
// finding the silhouette score for every element
double* truthSampleSilScore = (double*)malloc(nRows * sizeof(double*));
for (int i = 0; i < nRows; ++i) {
if (a[i] == -1)
truthSampleSilScore[i] = 0;
else if (a[i] == 0 && b[i] == 0)
truthSampleSilScore[i] = 0;
else
truthSampleSilScore[i] = (b[i] - a[i]) / max(a[i], b[i]);
truthSilhouetteScore += truthSampleSilScore[i];
}
truthSilhouetteScore /= nRows;
}
// the constructor
void SetUp() override
{
// getting the parameters
params = ::testing::TestWithParam<silhouetteScoreParam>::GetParam();
nRows = params.nRows;
nCols = params.nCols;
nLabels = params.nLabels;
chunk = params.chunk;
nElements = nRows * nCols;
host_silhouette_score();
// calling the silhouette_score CUDA implementation
computedSilhouetteScore = raft::stats::silhouette_score(
handle,
raft::make_device_matrix_view<const DataT>(d_X.data(), nRows, nCols),
raft::make_device_vector_view<const LabelT>(d_labels.data(), nRows),
std::make_optional(raft::make_device_vector_view(sampleSilScore.data(), nRows)),
nLabels,
params.metric);
batchedSilhouetteScore = raft::stats::silhouette_score_batched(
handle,
raft::make_device_matrix_view<const DataT>(d_X.data(), nRows, nCols),
raft::make_device_vector_view<const LabelT>(d_labels.data(), nRows),
std::make_optional(raft::make_device_vector_view(sampleSilScore.data(), nRows)),
nLabels,
chunk,
params.metric);
}
// declaring the data values
raft::resources handle;
silhouetteScoreParam params;
int nLabels;
rmm::device_uvector<DataT> d_X;
rmm::device_uvector<DataT> sampleSilScore;
rmm::device_uvector<LabelT> d_labels;
int nRows;
int nCols;
int nElements;
double truthSilhouetteScore = 0;
double computedSilhouetteScore = 0;
double batchedSilhouetteScore = 0;
int chunk;
};
// setting test parameter values
const std::vector<silhouetteScoreParam> inputs = {
{4, 2, 3, raft::distance::DistanceType::L2Expanded, 4, 0.00001},
{4, 2, 2, raft::distance::DistanceType::L2SqrtUnexpanded, 2, 0.00001},
{8, 8, 3, raft::distance::DistanceType::L2Unexpanded, 4, 0.00001},
{11, 2, 5, raft::distance::DistanceType::L2Expanded, 3, 0.00001},
{40, 2, 8, raft::distance::DistanceType::L2Expanded, 10, 0.00001},
{12, 7, 3, raft::distance::DistanceType::CosineExpanded, 8, 0.00001},
{7, 5, 5, raft::distance::DistanceType::L1, 2, 0.00001}};
// writing the test suite
typedef silhouetteScoreTest<int, double> silhouetteScoreTestClass;
TEST_P(silhouetteScoreTestClass, Result)
{
ASSERT_NEAR(computedSilhouetteScore, truthSilhouetteScore, params.tolerance);
ASSERT_NEAR(batchedSilhouetteScore, truthSilhouetteScore, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(silhouetteScore, silhouetteScoreTestClass, ::testing::ValuesIn(inputs));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/histogram.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/interruptible.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/random/rng.cuh>
#include <raft/stats/histogram.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace stats {
// Note: this kernel also updates the input vector to take care of OOB bins!
RAFT_KERNEL naiveHistKernel(int* bins, int nbins, int* in, int nrows)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
auto offset = blockIdx.y * nrows;
auto binOffset = blockIdx.y * nbins;
for (; tid < nrows; tid += stride) {
int id = in[offset + tid];
if (id < 0)
id = 0;
else if (id >= nbins)
id = nbins - 1;
in[offset + tid] = id;
raft::myAtomicAdd(bins + binOffset + id, 1);
}
}
void naiveHist(int* bins, int nbins, int* in, int nrows, int ncols, cudaStream_t stream)
{
const int TPB = 128;
int nblksx = raft::ceildiv(nrows, TPB);
dim3 blks(nblksx, ncols);
naiveHistKernel<<<blks, TPB, 0, stream>>>(bins, nbins, in, nrows);
RAFT_CUDA_TRY(cudaGetLastError());
}
struct HistInputs {
int nrows, ncols, nbins;
bool isNormal;
HistType type;
int start, end;
unsigned long long int seed;
};
class HistTest : public ::testing::TestWithParam<HistInputs> {
protected:
HistTest()
: in(0, resource::get_cuda_stream(handle)),
bins(0, resource::get_cuda_stream(handle)),
ref_bins(0, resource::get_cuda_stream(handle))
{
}
void SetUp() override
{
params = ::testing::TestWithParam<HistInputs>::GetParam();
raft::random::RngState r(params.seed);
auto stream = resource::get_cuda_stream(handle);
int len = params.nrows * params.ncols;
in.resize(len, stream);
if (params.isNormal) {
normalInt(handle, r, in.data(), len, params.start, params.end);
} else {
uniformInt(handle, r, in.data(), len, params.start, params.end);
}
bins.resize(params.nbins * params.ncols, stream);
ref_bins.resize(params.nbins * params.ncols, stream);
RAFT_CUDA_TRY(
cudaMemsetAsync(ref_bins.data(), 0, sizeof(int) * params.nbins * params.ncols, stream));
naiveHist(ref_bins.data(), params.nbins, in.data(), params.nrows, params.ncols, stream);
histogram(handle,
params.type,
raft::make_device_matrix_view<const int, int, raft::col_major>(
in.data(), params.nrows, params.ncols),
raft::make_device_matrix_view<int, int, raft::col_major>(
bins.data(), params.nbins, params.ncols));
resource::sync_stream(handle);
}
protected:
raft::resources handle;
HistInputs params;
rmm::device_uvector<int> in, bins, ref_bins;
};
class HistMdspanTest : public ::testing::TestWithParam<HistInputs> {
protected:
HistMdspanTest()
: in(0, resource::get_cuda_stream(handle)),
bins(0, resource::get_cuda_stream(handle)),
ref_bins(0, resource::get_cuda_stream(handle))
{
}
void SetUp() override
{
params = ::testing::TestWithParam<HistInputs>::GetParam();
raft::random::RngState r(params.seed);
auto stream = resource::get_cuda_stream(handle);
int len = params.nrows * params.ncols;
in.resize(len, stream);
raft::device_vector_view<int, int> in_view(in.data(), in.size());
if (params.isNormal) {
normalInt(handle, r, in_view, params.start, params.end);
} else {
uniformInt(handle, r, in_view, params.start, params.end);
}
bins.resize(params.nbins * params.ncols, stream);
ref_bins.resize(params.nbins * params.ncols, stream);
RAFT_CUDA_TRY(
cudaMemsetAsync(ref_bins.data(), 0, sizeof(int) * params.nbins * params.ncols, stream));
naiveHist(ref_bins.data(), params.nbins, in.data(), params.nrows, params.ncols, stream);
histogram<int>(
params.type, bins.data(), params.nbins, in.data(), params.nrows, params.ncols, stream);
resource::sync_stream(handle);
}
protected:
raft::resources handle;
HistInputs params;
rmm::device_uvector<int> in, bins, ref_bins;
};
static const int oneK = 1024;
static const int oneM = oneK * oneK;
const std::vector<HistInputs> inputs = {
{oneM, 1, 2 * oneM, false, HistTypeGmem, 0, 2 * oneM, 1234ULL},
{oneM, 1, 2 * oneM, true, HistTypeGmem, 1000, 50, 1234ULL},
{oneM + 1, 1, 2 * oneM, false, HistTypeGmem, 0, 2 * oneM, 1234ULL},
{oneM + 1, 1, 2 * oneM, true, HistTypeGmem, 1000, 50, 1234ULL},
{oneM + 2, 1, 2 * oneM, false, HistTypeGmem, 0, 2 * oneM, 1234ULL},
{oneM + 2, 1, 2 * oneM, true, HistTypeGmem, 1000, 50, 1234ULL},
{oneM, 21, 2 * oneM, false, HistTypeGmem, 0, 2 * oneM, 1234ULL},
{oneM, 21, 2 * oneM, true, HistTypeGmem, 1000, 50, 1234ULL},
{oneM + 1, 21, 2 * oneM, false, HistTypeGmem, 0, 2 * oneM, 1234ULL},
{oneM + 1, 21, 2 * oneM, true, HistTypeGmem, 1000, 50, 1234ULL},
{oneM + 2, 21, 2 * oneM, false, HistTypeGmem, 0, 2 * oneM, 1234ULL},
{oneM + 2, 21, 2 * oneM, true, HistTypeGmem, 1000, 50, 1234ULL},
{oneM, 1, 2 * oneK, false, HistTypeSmem, 0, 2 * oneK, 1234ULL},
{oneM, 1, 2 * oneK, true, HistTypeSmem, 1000, 50, 1234ULL},
{oneM + 1, 1, 2 * oneK, false, HistTypeSmem, 0, 2 * oneK, 1234ULL},
{oneM + 1, 1, 2 * oneK, true, HistTypeSmem, 1000, 50, 1234ULL},
{oneM + 2, 1, 2 * oneK, false, HistTypeSmem, 0, 2 * oneK, 1234ULL},
{oneM + 2, 1, 2 * oneK, true, HistTypeSmem, 1000, 50, 1234ULL},
{oneM, 21, 2 * oneK, false, HistTypeSmem, 0, 2 * oneK, 1234ULL},
{oneM, 21, 2 * oneK, true, HistTypeSmem, 1000, 50, 1234ULL},
{oneM + 1, 21, 2 * oneK, false, HistTypeSmem, 0, 2 * oneK, 1234ULL},
{oneM + 1, 21, 2 * oneK, true, HistTypeSmem, 1000, 50, 1234ULL},
{oneM + 2, 21, 2 * oneK, false, HistTypeSmem, 0, 2 * oneK, 1234ULL},
{oneM + 2, 21, 2 * oneK, true, HistTypeSmem, 1000, 50, 1234ULL},
{oneM, 1, 2 * oneK, false, HistTypeSmemMatchAny, 0, 2 * oneK, 1234ULL},
{oneM, 1, 2 * oneK, true, HistTypeSmemMatchAny, 1000, 50, 1234ULL},
{oneM + 1, 1, 2 * oneK, false, HistTypeSmemMatchAny, 0, 2 * oneK, 1234ULL},
{oneM + 1, 1, 2 * oneK, true, HistTypeSmemMatchAny, 1000, 50, 1234ULL},
{oneM + 2, 1, 2 * oneK, false, HistTypeSmemMatchAny, 0, 2 * oneK, 1234ULL},
{oneM + 2, 1, 2 * oneK, true, HistTypeSmemMatchAny, 1000, 50, 1234ULL},
{oneM, 21, 2 * oneK, false, HistTypeSmemMatchAny, 0, 2 * oneK, 1234ULL},
{oneM, 21, 2 * oneK, true, HistTypeSmemMatchAny, 1000, 50, 1234ULL},
{oneM + 1, 21, 2 * oneK, false, HistTypeSmemMatchAny, 0, 2 * oneK, 1234ULL},
{oneM + 1, 21, 2 * oneK, true, HistTypeSmemMatchAny, 1000, 50, 1234ULL},
{oneM + 2, 21, 2 * oneK, false, HistTypeSmemMatchAny, 0, 2 * oneK, 1234ULL},
{oneM + 2, 21, 2 * oneK, true, HistTypeSmemMatchAny, 1000, 50, 1234ULL},
{oneM, 1, 2 * oneK, false, HistTypeSmemBits16, 0, 2 * oneK, 1234ULL},
{oneM, 1, 2 * oneK, true, HistTypeSmemBits16, 1000, 50, 1234ULL},
{oneM + 1, 1, 2 * oneK, false, HistTypeSmemBits16, 0, 2 * oneK, 1234ULL},
{oneM + 1, 1, 2 * oneK, true, HistTypeSmemBits16, 1000, 50, 1234ULL},
{oneM + 2, 1, 2 * oneK, false, HistTypeSmemBits16, 0, 2 * oneK, 1234ULL},
{oneM + 2, 1, 2 * oneK, true, HistTypeSmemBits16, 1000, 50, 1234ULL},
{oneM, 21, 2 * oneK, false, HistTypeSmemBits16, 0, 2 * oneK, 1234ULL},
{oneM, 21, 2 * oneK, true, HistTypeSmemBits16, 1000, 50, 1234ULL},
{oneM + 1, 21, 2 * oneK, false, HistTypeSmemBits16, 0, 2 * oneK, 1234ULL},
{oneM + 1, 21, 2 * oneK, true, HistTypeSmemBits16, 1000, 50, 1234ULL},
{oneM + 2, 21, 2 * oneK, false, HistTypeSmemBits16, 0, 2 * oneK, 1234ULL},
{oneM + 2, 21, 2 * oneK, true, HistTypeSmemBits16, 1000, 50, 1234ULL},
{oneM, 1, 2 * oneK, false, HistTypeSmemBits8, 0, 2 * oneK, 1234ULL},
{oneM, 1, 2 * oneK, true, HistTypeSmemBits8, 1000, 50, 1234ULL},
{oneM + 1, 1, 2 * oneK, false, HistTypeSmemBits8, 0, 2 * oneK, 1234ULL},
{oneM + 1, 1, 2 * oneK, true, HistTypeSmemBits8, 1000, 50, 1234ULL},
{oneM + 2, 1, 2 * oneK, false, HistTypeSmemBits8, 0, 2 * oneK, 1234ULL},
{oneM + 2, 1, 2 * oneK, true, HistTypeSmemBits8, 1000, 50, 1234ULL},
{oneM, 21, 2 * oneK, false, HistTypeSmemBits8, 0, 2 * oneK, 1234ULL},
{oneM, 21, 2 * oneK, true, HistTypeSmemBits8, 1000, 50, 1234ULL},
{oneM + 1, 21, 2 * oneK, false, HistTypeSmemBits8, 0, 2 * oneK, 1234ULL},
{oneM + 1, 21, 2 * oneK, true, HistTypeSmemBits8, 1000, 50, 1234ULL},
{oneM + 2, 21, 2 * oneK, false, HistTypeSmemBits8, 0, 2 * oneK, 1234ULL},
{oneM + 2, 21, 2 * oneK, true, HistTypeSmemBits8, 1000, 50, 1234ULL},
{oneM, 1, 2 * oneK, false, HistTypeSmemBits4, 0, 2 * oneK, 1234ULL},
{oneM, 1, 2 * oneK, true, HistTypeSmemBits4, 1000, 50, 1234ULL},
{oneM + 1, 1, 2 * oneK, false, HistTypeSmemBits4, 0, 2 * oneK, 1234ULL},
{oneM + 1, 1, 2 * oneK, true, HistTypeSmemBits4, 1000, 50, 1234ULL},
{oneM + 2, 1, 2 * oneK, false, HistTypeSmemBits4, 0, 2 * oneK, 1234ULL},
{oneM + 2, 1, 2 * oneK, true, HistTypeSmemBits4, 1000, 50, 1234ULL},
{oneM, 21, 2 * oneK, false, HistTypeSmemBits4, 0, 2 * oneK, 1234ULL},
{oneM, 21, 2 * oneK, true, HistTypeSmemBits4, 1000, 50, 1234ULL},
{oneM + 1, 21, 2 * oneK, false, HistTypeSmemBits4, 0, 2 * oneK, 1234ULL},
{oneM + 1, 21, 2 * oneK, true, HistTypeSmemBits4, 1000, 50, 1234ULL},
{oneM + 2, 21, 2 * oneK, false, HistTypeSmemBits4, 0, 2 * oneK, 1234ULL},
{oneM + 2, 21, 2 * oneK, true, HistTypeSmemBits4, 1000, 50, 1234ULL},
{oneM, 1, 2 * oneK, false, HistTypeSmemBits2, 0, 2 * oneK, 1234ULL},
{oneM, 1, 2 * oneK, true, HistTypeSmemBits2, 1000, 50, 1234ULL},
{oneM + 1, 1, 2 * oneK, false, HistTypeSmemBits2, 0, 2 * oneK, 1234ULL},
{oneM + 1, 1, 2 * oneK, true, HistTypeSmemBits2, 1000, 50, 1234ULL},
{oneM + 2, 1, 2 * oneK, false, HistTypeSmemBits2, 0, 2 * oneK, 1234ULL},
{oneM + 2, 1, 2 * oneK, true, HistTypeSmemBits2, 1000, 50, 1234ULL},
{oneM, 21, 2 * oneK, false, HistTypeSmemBits2, 0, 2 * oneK, 1234ULL},
{oneM, 21, 2 * oneK, true, HistTypeSmemBits2, 1000, 50, 1234ULL},
{oneM + 1, 21, 2 * oneK, false, HistTypeSmemBits2, 0, 2 * oneK, 1234ULL},
{oneM + 1, 21, 2 * oneK, true, HistTypeSmemBits2, 1000, 50, 1234ULL},
{oneM + 2, 21, 2 * oneK, false, HistTypeSmemBits2, 0, 2 * oneK, 1234ULL},
{oneM + 2, 21, 2 * oneK, true, HistTypeSmemBits2, 1000, 50, 1234ULL},
{oneM, 1, 2 * oneK, false, HistTypeSmemBits1, 0, 2 * oneK, 1234ULL},
{oneM, 1, 2 * oneK, true, HistTypeSmemBits1, 1000, 50, 1234ULL},
{oneM + 1, 1, 2 * oneK, false, HistTypeSmemBits1, 0, 2 * oneK, 1234ULL},
{oneM + 1, 1, 2 * oneK, true, HistTypeSmemBits1, 1000, 50, 1234ULL},
{oneM + 2, 1, 2 * oneK, false, HistTypeSmemBits1, 0, 2 * oneK, 1234ULL},
{oneM + 2, 1, 2 * oneK, true, HistTypeSmemBits1, 1000, 50, 1234ULL},
{oneM, 21, 2 * oneK, false, HistTypeSmemBits1, 0, 2 * oneK, 1234ULL},
{oneM, 21, 2 * oneK, true, HistTypeSmemBits1, 1000, 50, 1234ULL},
{oneM + 1, 21, 2 * oneK, false, HistTypeSmemBits1, 0, 2 * oneK, 1234ULL},
{oneM + 1, 21, 2 * oneK, true, HistTypeSmemBits1, 1000, 50, 1234ULL},
{oneM + 2, 21, 2 * oneK, false, HistTypeSmemBits1, 0, 2 * oneK, 1234ULL},
{oneM + 2, 21, 2 * oneK, true, HistTypeSmemBits1, 1000, 50, 1234ULL},
{oneM, 1, 2 * oneM, false, HistTypeSmemHash, 0, 2 * oneM, 1234ULL},
{oneM, 1, 2 * oneM, true, HistTypeSmemHash, 1000, 50, 1234ULL},
{oneM + 1, 1, 2 * oneM, false, HistTypeSmemHash, 0, 2 * oneM, 1234ULL},
{oneM + 1, 1, 2 * oneM, true, HistTypeSmemHash, 1000, 50, 1234ULL},
{oneM + 2, 1, 2 * oneM, false, HistTypeSmemHash, 0, 2 * oneM, 1234ULL},
{oneM + 2, 1, 2 * oneM, true, HistTypeSmemHash, 1000, 50, 1234ULL},
{oneM, 1, 2 * oneK, false, HistTypeSmemHash, 0, 2 * oneK, 1234ULL},
{oneM, 1, 2 * oneK, true, HistTypeSmemHash, 1000, 50, 1234ULL},
{oneM + 1, 1, 2 * oneK, false, HistTypeSmemHash, 0, 2 * oneK, 1234ULL},
{oneM + 1, 1, 2 * oneK, true, HistTypeSmemHash, 1000, 50, 1234ULL},
{oneM + 2, 1, 2 * oneK, false, HistTypeSmemHash, 0, 2 * oneK, 1234ULL},
{oneM + 2, 1, 2 * oneK, true, HistTypeSmemHash, 1000, 50, 1234ULL},
{oneM, 21, 2 * oneM, false, HistTypeSmemHash, 0, 2 * oneM, 1234ULL},
{oneM, 21, 2 * oneM, true, HistTypeSmemHash, 1000, 50, 1234ULL},
{oneM + 1, 21, 2 * oneM, false, HistTypeSmemHash, 0, 2 * oneM, 1234ULL},
{oneM + 1, 21, 2 * oneM, true, HistTypeSmemHash, 1000, 50, 1234ULL},
{oneM + 2, 21, 2 * oneM, false, HistTypeSmemHash, 0, 2 * oneM, 1234ULL},
{oneM + 2, 21, 2 * oneM, true, HistTypeSmemHash, 1000, 50, 1234ULL},
{oneM, 21, 2 * oneK, false, HistTypeSmemHash, 0, 2 * oneK, 1234ULL},
{oneM, 21, 2 * oneK, true, HistTypeSmemHash, 1000, 50, 1234ULL},
{oneM + 1, 21, 2 * oneK, false, HistTypeSmemHash, 0, 2 * oneK, 1234ULL},
{oneM + 1, 21, 2 * oneK, true, HistTypeSmemHash, 1000, 50, 1234ULL},
{oneM + 2, 21, 2 * oneK, false, HistTypeSmemHash, 0, 2 * oneK, 1234ULL},
{oneM + 2, 21, 2 * oneK, true, HistTypeSmemHash, 1000, 50, 1234ULL},
{oneM, 1, 2 * oneM, false, HistTypeAuto, 0, 2 * oneM, 1234ULL},
{oneM, 1, 2 * oneM, true, HistTypeAuto, 1000, 50, 1234ULL},
{oneM + 1, 1, 2 * oneM, false, HistTypeAuto, 0, 2 * oneM, 1234ULL},
{oneM + 1, 1, 2 * oneM, true, HistTypeAuto, 1000, 50, 1234ULL},
{oneM + 2, 1, 2 * oneM, false, HistTypeAuto, 0, 2 * oneM, 1234ULL},
{oneM + 2, 1, 2 * oneM, true, HistTypeAuto, 1000, 50, 1234ULL},
{oneM, 1, 2 * oneK, false, HistTypeAuto, 0, 2 * oneK, 1234ULL},
{oneM, 1, 2 * oneK, true, HistTypeAuto, 1000, 50, 1234ULL},
{oneM + 1, 1, 2 * oneK, false, HistTypeAuto, 0, 2 * oneK, 1234ULL},
{oneM + 1, 1, 2 * oneK, true, HistTypeAuto, 1000, 50, 1234ULL},
{oneM + 2, 1, 2 * oneK, false, HistTypeAuto, 0, 2 * oneK, 1234ULL},
{oneM + 2, 1, 2 * oneK, true, HistTypeAuto, 1000, 50, 1234ULL},
{oneM, 21, 2 * oneM, false, HistTypeAuto, 0, 2 * oneM, 1234ULL},
{oneM, 21, 2 * oneM, true, HistTypeAuto, 1000, 50, 1234ULL},
{oneM + 1, 21, 2 * oneM, false, HistTypeAuto, 0, 2 * oneM, 1234ULL},
{oneM + 1, 21, 2 * oneM, true, HistTypeAuto, 1000, 50, 1234ULL},
{oneM + 2, 21, 2 * oneM, false, HistTypeAuto, 0, 2 * oneM, 1234ULL},
{oneM + 2, 21, 2 * oneM, true, HistTypeAuto, 1000, 50, 1234ULL},
{oneM, 21, 2 * oneK, false, HistTypeAuto, 0, 2 * oneK, 1234ULL},
{oneM, 21, 2 * oneK, true, HistTypeAuto, 1000, 50, 1234ULL},
{oneM + 1, 21, 2 * oneK, false, HistTypeAuto, 0, 2 * oneK, 1234ULL},
{oneM + 1, 21, 2 * oneK, true, HistTypeAuto, 1000, 50, 1234ULL},
{oneM + 2, 21, 2 * oneK, false, HistTypeAuto, 0, 2 * oneK, 1234ULL},
{oneM + 2, 21, 2 * oneK, true, HistTypeAuto, 1000, 50, 1234ULL},
};
TEST_P(HistTest, Result)
{
ASSERT_TRUE(raft::devArrMatch(
ref_bins.data(), bins.data(), params.nbins * params.ncols, raft::Compare<int>()));
}
INSTANTIATE_TEST_CASE_P(HistTests, HistTest, ::testing::ValuesIn(inputs));
TEST_P(HistMdspanTest, Result)
{
ASSERT_TRUE(raft::devArrMatch(
ref_bins.data(), bins.data(), params.nbins * params.ncols, raft::Compare<int>()));
}
INSTANTIATE_TEST_CASE_P(HistMdspanTests, HistMdspanTest, ::testing::ValuesIn(inputs));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/rand_index.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/util/cudart_utils.hpp>
#include <gtest/gtest.h>
#include <algorithm>
#include <iostream>
#include <raft/core/resources.hpp>
#include <raft/stats/rand_index.cuh>
#include <random>
namespace raft {
namespace stats {
// parameter structure definition
struct randIndexParam {
uint64_t nElements;
int lowerLabelRange;
int upperLabelRange;
double tolerance;
};
// test fixture class
template <typename T>
class randIndexTest : public ::testing::TestWithParam<randIndexParam> {
protected:
// the constructor
void SetUp() override
{
// getting the parameters
params = ::testing::TestWithParam<randIndexParam>::GetParam();
size = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
// generating random value test input
std::vector<int> arr1(size, 0);
std::vector<int> arr2(size, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); });
std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); });
// generating the golden output
int64_t a_truth = 0;
int64_t b_truth = 0;
for (uint64_t iter = 0; iter < size; ++iter) {
for (uint64_t jiter = 0; jiter < iter; ++jiter) {
if (arr1[iter] == arr1[jiter] && arr2[iter] == arr2[jiter]) {
++a_truth;
} else if (arr1[iter] != arr1[jiter] && arr2[iter] != arr2[jiter]) {
++b_truth;
}
}
}
uint64_t nChooseTwo = (size * (size - 1)) / 2;
truthRandIndex = (double)(((double)(a_truth + b_truth)) / (double)nChooseTwo);
// allocating and initializing memory to the GPU
stream = resource::get_cuda_stream(handle);
rmm::device_uvector<T> firstClusterArray(size, stream);
rmm::device_uvector<T> secondClusterArray(size, stream);
RAFT_CUDA_TRY(
cudaMemsetAsync(firstClusterArray.data(), 0, firstClusterArray.size() * sizeof(T), stream));
RAFT_CUDA_TRY(
cudaMemsetAsync(secondClusterArray.data(), 0, secondClusterArray.size() * sizeof(T), stream));
raft::update_device(firstClusterArray.data(), &arr1[0], (int)size, stream);
raft::update_device(secondClusterArray.data(), &arr2[0], (int)size, stream);
// calling the rand_index CUDA implementation
computedRandIndex = raft::stats::rand_index(
handle,
raft::make_device_vector_view<const T>(firstClusterArray.data(), size),
raft::make_device_vector_view<const T>(secondClusterArray.data(), size));
}
// declaring the data values
raft::resources handle;
randIndexParam params;
int lowerLabelRange = 0, upperLabelRange = 2;
uint64_t size = 0;
double truthRandIndex = 0;
double computedRandIndex = 0;
cudaStream_t stream = 0;
};
// setting test parameter values
const std::vector<randIndexParam> inputs = {{199, 1, 10, 0.000001},
{200, 1, 100, 0.000001},
{10, 1, 1200, 0.000001},
{100, 1, 10000, 0.000001},
{198, 1, 100, 0.000001},
{300, 3, 99, 0.000001},
{2, 0, 0, 0.00001}};
// writing the test suite
typedef randIndexTest<int> randIndexTestClass;
TEST_P(randIndexTestClass, Result)
{
ASSERT_NEAR(computedRandIndex, truthRandIndex, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(randIndex, randIndexTestClass, ::testing::ValuesIn(inputs));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/stats/mutual_info_score.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/stats/mutual_info_score.cuh>
#include <raft/util/cudart_utils.hpp>
#include <random>
namespace raft {
namespace stats {
// parameter structure definition
struct mutualInfoParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
};
// test fixture class
template <typename T>
class mutualInfoTest : public ::testing::TestWithParam<mutualInfoParam> {
protected:
// the constructor
void SetUp() override
{
// getting the parameters
params = ::testing::TestWithParam<mutualInfoParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
// generating random value test input
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); });
}
// generating the golden output
// calculating the contingency matrix
int numUniqueClasses = upperLabelRange - lowerLabelRange + 1;
size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int);
int* hGoldenOutput = (int*)malloc(sizeOfMat);
memset(hGoldenOutput, 0, sizeOfMat);
int i, j;
for (i = 0; i < nElements; i++) {
int row = arr1[i] - lowerLabelRange;
int column = arr2[i] - lowerLabelRange;
hGoldenOutput[row * numUniqueClasses + column] += 1;
}
int* a = (int*)malloc(numUniqueClasses * sizeof(int));
int* b = (int*)malloc(numUniqueClasses * sizeof(int));
memset(a, 0, numUniqueClasses * sizeof(int));
memset(b, 0, numUniqueClasses * sizeof(int));
// and also the reducing contingency matrix along row and column
for (i = 0; i < numUniqueClasses; ++i) {
for (j = 0; j < numUniqueClasses; ++j) {
a[i] += hGoldenOutput[i * numUniqueClasses + j];
b[i] += hGoldenOutput[j * numUniqueClasses + i];
}
}
// calculating the truth mutual information
for (int i = 0; i < numUniqueClasses; ++i) {
for (int j = 0; j < numUniqueClasses; ++j) {
if (a[i] * b[j] != 0 && hGoldenOutput[i * numUniqueClasses + j] != 0) {
truthmutualInfo +=
(double)(hGoldenOutput[i * numUniqueClasses + j]) *
(log((double)(double(nElements) * hGoldenOutput[i * numUniqueClasses + j])) -
log((double)(a[i] * b[j])));
}
}
}
truthmutualInfo /= nElements;
// allocating and initializing memory to the GPU
stream = resource::get_cuda_stream(handle);
rmm::device_uvector<T> firstClusterArray(nElements, stream);
rmm::device_uvector<T> secondClusterArray(nElements, stream);
RAFT_CUDA_TRY(
cudaMemsetAsync(firstClusterArray.data(), 0, firstClusterArray.size() * sizeof(T), stream));
RAFT_CUDA_TRY(
cudaMemsetAsync(secondClusterArray.data(), 0, secondClusterArray.size() * sizeof(T), stream));
raft::update_device(firstClusterArray.data(), &arr1[0], (int)nElements, stream);
raft::update_device(secondClusterArray.data(), &arr2[0], (int)nElements, stream);
// calling the mutualInfo CUDA implementation
computedmutualInfo = raft::stats::mutual_info_score(
handle,
raft::make_device_vector_view<const T>(firstClusterArray.data(), nElements),
raft::make_device_vector_view<const T>(secondClusterArray.data(), nElements),
lowerLabelRange,
upperLabelRange);
}
// declaring the data values
raft::resources handle;
mutualInfoParam params;
T lowerLabelRange, upperLabelRange;
int nElements = 0;
double truthmutualInfo = 0;
double computedmutualInfo = 0;
cudaStream_t stream = 0;
};
// setting test parameter values
const std::vector<mutualInfoParam> inputs = {{199, 1, 10, false, 0.000001},
{200, 15, 100, false, 0.000001},
{100, 1, 20, false, 0.000001},
{10, 1, 10, false, 0.000001},
{198, 1, 100, false, 0.000001},
{300, 3, 99, false, 0.000001},
{199, 1, 10, true, 0.000001},
{200, 15, 100, true, 0.000001},
{100, 1, 20, true, 0.000001},
{10, 1, 10, true, 0.000001},
{198, 1, 100, true, 0.000001},
{300, 3, 99, true, 0.000001}};
// writing the test suite
typedef mutualInfoTest<int> mutualInfoTestClass;
TEST_P(mutualInfoTestClass, Result)
{
ASSERT_NEAR(computedmutualInfo, truthmutualInfo, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(mutualInfo, mutualInfoTestClass, ::testing::ValuesIn(inputs));
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/randomized_svd.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/device_resources.hpp>
#include <raft/linalg/rsvd.cuh>
#include <raft/linalg/svd.cuh>
#include <raft/matrix/diagonal.cuh>
#include <raft/matrix/matrix.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename T>
struct randomized_svdInputs {
T tolerance;
int n_row;
int n_col;
int k;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const randomized_svdInputs<T>& dims)
{
return os;
}
template <typename T>
class randomized_svdTest : public ::testing::TestWithParam<randomized_svdInputs<T>> {
public:
randomized_svdTest()
: params(::testing::TestWithParam<randomized_svdInputs<T>>::GetParam()),
stream(handle.get_stream()),
data(params.n_row * params.n_col, stream),
reconst(params.n_row * params.n_col, stream),
left_eig_vectors_act(params.n_row * params.k, stream),
right_eig_vectors_act(params.k * params.n_col, stream),
sing_vals_act(params.k, stream),
left_eig_vectors_ref(params.n_row * params.n_col, stream),
right_eig_vectors_ref(params.n_col * params.n_col, stream),
sing_vals_ref(params.k, stream)
{
}
protected:
void basicTest()
{
int len = params.n_row * params.n_col;
ASSERT(params.n_row == 5 && params.n_col == 5, "This test only supports nrows=5 && ncols=5!");
T data_h[] = {0.76420743, 0.61411544, 0.81724151, 0.42040879, 0.03446089,
0.03697287, 0.85962444, 0.67584086, 0.45594666, 0.02074835,
0.42018265, 0.39204509, 0.12657948, 0.90250559, 0.23076218,
0.50339844, 0.92974961, 0.21213988, 0.63962457, 0.58124562,
0.58325673, 0.11589871, 0.39831112, 0.21492685, 0.00540355};
raft::update_device(data.data(), data_h, len, stream);
T left_eig_vectors_ref_h[] = {0.42823088,
0.59131151,
0.4220887,
0.50441194,
0.18541506,
0.27047497,
-0.17195579,
0.69362791,
-0.43253894,
-0.47860724};
T right_eig_vectors_ref_h[] = {0.53005494,
0.44104121,
0.40720732,
0.54337293,
0.25189773,
0.5789401,
0.15264214,
-0.45215699,
-0.53184873,
0.3927082};
T sing_vals_ref_h[] = {2.36539241, 0.81117785, 0.68562255, 0.41390509, 0.01519322};
raft::update_device(
left_eig_vectors_ref.data(), left_eig_vectors_ref_h, params.n_row * params.k, stream);
raft::update_device(
right_eig_vectors_ref.data(), right_eig_vectors_ref_h, params.k * params.n_col, stream);
raft::update_device(sing_vals_ref.data(), sing_vals_ref_h, params.k, stream);
randomized_svd(handle,
raft::make_device_matrix_view<const T, uint32_t, raft::col_major>(
data.data(), params.n_row, params.n_col),
raft::make_device_vector_view<T, uint32_t>(sing_vals_act.data(), params.k),
std::make_optional(raft::make_device_matrix_view<T, uint32_t, raft::col_major>(
left_eig_vectors_act.data(), params.n_row, params.k)),
std::make_optional(raft::make_device_matrix_view<T, uint32_t, raft::col_major>(
right_eig_vectors_act.data(), params.k, params.n_col)),
2,
2);
handle.sync_stream(stream);
}
void apiTest()
{
int len = params.n_row * params.n_col;
ASSERT(params.n_row == 5 && params.n_col == 5, "This test only supports nrows=5 && ncols=5!");
T data_h[] = {0.76420743, 0.61411544, 0.81724151, 0.42040879, 0.03446089,
0.03697287, 0.85962444, 0.67584086, 0.45594666, 0.02074835,
0.42018265, 0.39204509, 0.12657948, 0.90250559, 0.23076218,
0.50339844, 0.92974961, 0.21213988, 0.63962457, 0.58124562,
0.58325673, 0.11589871, 0.39831112, 0.21492685, 0.00540355};
raft::update_device(data.data(), data_h, len, stream);
T left_eig_vectors_ref_h[] = {0.42823088,
0.59131151,
0.4220887,
0.50441194,
0.18541506,
0.27047497,
-0.17195579,
0.69362791,
-0.43253894,
-0.47860724};
T right_eig_vectors_ref_h[] = {0.53005494,
0.44104121,
0.40720732,
0.54337293,
0.25189773,
0.5789401,
0.15264214,
-0.45215699,
-0.53184873,
0.3927082};
T sing_vals_ref_h[] = {2.36539241, 0.81117785, 0.68562255, 0.41390509, 0.01519322};
raft::update_device(
left_eig_vectors_ref.data(), left_eig_vectors_ref_h, params.n_row * params.k, stream);
raft::update_device(
right_eig_vectors_ref.data(), right_eig_vectors_ref_h, params.k * params.n_col, stream);
raft::update_device(sing_vals_ref.data(), sing_vals_ref_h, params.k, stream);
randomized_svd(handle,
raft::make_device_matrix_view<const T, uint32_t, raft::col_major>(
data.data(), params.n_row, params.n_col),
raft::make_device_vector_view<T, uint32_t>(sing_vals_act.data(), params.k),
std::nullopt,
std::make_optional(raft::make_device_matrix_view<T, uint32_t, raft::col_major>(
right_eig_vectors_act.data(), params.k, params.n_col)),
2,
2);
randomized_svd(handle,
raft::make_device_matrix_view<const T, uint32_t, raft::col_major>(
data.data(), params.n_row, params.n_col),
raft::make_device_vector_view<T, uint32_t>(sing_vals_act.data(), params.k),
std::make_optional(raft::make_device_matrix_view<T, uint32_t, raft::col_major>(
left_eig_vectors_act.data(), params.n_row, params.k)),
std::nullopt,
2,
2);
randomized_svd(handle,
raft::make_device_matrix_view<const T, uint32_t, raft::col_major>(
data.data(), params.n_row, params.n_col),
raft::make_device_vector_view<T, uint32_t>(sing_vals_act.data(), params.k),
std::nullopt,
std::nullopt,
2,
2);
handle.sync_stream(stream);
}
void SetUp() override
{
int major = 0;
int minor = 0;
cusolverGetProperty(MAJOR_VERSION, &major);
cusolverGetProperty(MINOR_VERSION, &minor);
int cusolv_version = major * 1000 + minor * 10;
if (cusolv_version >= 11050) apiTest();
basicTest();
}
protected:
raft::device_resources handle;
cudaStream_t stream;
randomized_svdInputs<T> params;
rmm::device_uvector<T> data, left_eig_vectors_act, right_eig_vectors_act, sing_vals_act,
left_eig_vectors_ref, right_eig_vectors_ref, sing_vals_ref, reconst;
};
const std::vector<randomized_svdInputs<float>> inputsf1 = {{0.0001f, 5, 5, 2, 1234ULL}};
const std::vector<randomized_svdInputs<double>> inputsd1 = {{0.0001, 5, 5, 2, 1234ULL}};
typedef randomized_svdTest<float> randomized_svdTestF;
TEST_P(randomized_svdTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(sing_vals_ref.data(),
sing_vals_act.data(),
params.k,
raft::CompareApproxAbs<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(left_eig_vectors_ref.data(),
left_eig_vectors_act.data(),
params.n_row * params.k,
raft::CompareApproxAbs<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(right_eig_vectors_ref.data(),
right_eig_vectors_act.data(),
params.k * params.n_col,
raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef randomized_svdTest<double> randomized_svdTestD;
TEST_P(randomized_svdTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(sing_vals_ref.data(),
sing_vals_act.data(),
params.k,
raft::CompareApproxAbs<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(left_eig_vectors_ref.data(),
left_eig_vectors_act.data(),
params.n_row * params.k,
raft::CompareApproxAbs<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(right_eig_vectors_ref.data(),
right_eig_vectors_act.data(),
params.k * params.n_col,
raft::CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(randomized_svdTests1, randomized_svdTestF, ::testing::ValuesIn(inputsf1));
INSTANTIATE_TEST_SUITE_P(randomized_svdTests1, randomized_svdTestD, ::testing::ValuesIn(inputsd1));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/subtract.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/subtract.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename Type>
RAFT_KERNEL naiveSubtractElemKernel(Type* out, const Type* in1, const Type* in2, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) { out[idx] = in1[idx] - in2[idx]; }
}
template <typename Type>
void naiveSubtractElem(Type* out, const Type* in1, const Type* in2, int len, cudaStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(len, TPB);
naiveSubtractElemKernel<Type><<<nblks, TPB, 0, stream>>>(out, in1, in2, len);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename Type>
RAFT_KERNEL naiveSubtractScalarKernel(Type* out, const Type* in1, const Type in2, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) { out[idx] = in1[idx] - in2; }
}
template <typename Type>
void naiveSubtractScalar(Type* out, const Type* in1, const Type in2, int len, cudaStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(len, TPB);
naiveSubtractScalarKernel<Type><<<nblks, TPB, 0, stream>>>(out, in1, in2, len);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename T>
struct SubtractInputs {
T tolerance;
int len;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const SubtractInputs<T>& dims)
{
return os;
}
template <typename T>
class SubtractTest : public ::testing::TestWithParam<SubtractInputs<T>> {
public:
SubtractTest()
: params(::testing::TestWithParam<SubtractInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in1(params.len, stream),
in2(params.len, stream),
out_ref(params.len, stream),
out(params.len, stream)
{
}
protected:
void SetUp() override
{
raft::random::RngState r(params.seed);
int len = params.len;
uniform(handle, r, in1.data(), len, T(-1.0), T(1.0));
uniform(handle, r, in2.data(), len, T(-1.0), T(1.0));
naiveSubtractElem(out_ref.data(), in1.data(), in2.data(), len, stream);
naiveSubtractScalar(out_ref.data(), out_ref.data(), T(1), len, stream);
auto out_view = raft::make_device_vector_view(out.data(), len);
auto in1_view = raft::make_device_vector_view(in1.data(), len);
auto const_out_view = raft::make_device_vector_view<const T>(out.data(), len);
auto const_in1_view = raft::make_device_vector_view<const T>(in1.data(), len);
auto const_in2_view = raft::make_device_vector_view<const T>(in2.data(), len);
const auto scalar = static_cast<T>(1);
auto scalar_view = raft::make_host_scalar_view(&scalar);
subtract(handle, const_in1_view, const_in2_view, out_view);
subtract_scalar(handle, const_out_view, out_view, scalar_view);
subtract(handle, const_in1_view, const_in2_view, in1_view);
subtract_scalar(handle, const_in1_view, in1_view, scalar_view);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
SubtractInputs<T> params;
rmm::device_uvector<T> in1, in2, out_ref, out;
};
const std::vector<SubtractInputs<float>> inputsf2 = {{0.000001f, 1024 * 1024, 1234ULL}};
const std::vector<SubtractInputs<double>> inputsd2 = {{0.00000001, 1024 * 1024, 1234ULL}};
typedef SubtractTest<float> SubtractTestF;
TEST_P(SubtractTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(
out_ref.data(), in1.data(), params.len, raft::CompareApprox<float>(params.tolerance)));
}
typedef SubtractTest<double> SubtractTestD;
TEST_P(SubtractTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(
out_ref.data(), in1.data(), params.len, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(SubtractTests, SubtractTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(SubtractTests, SubtractTestD, ::testing::ValuesIn(inputsd2));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/unary_op.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../test_utils.cuh"
#include <raft/linalg/unary_op.cuh>
#include <raft/util/cuda_utils.cuh>
namespace raft {
namespace linalg {
template <typename InType, typename OutType, typename IdxType>
RAFT_KERNEL naiveScaleKernel(OutType* out, const InType* in, InType scalar, IdxType len)
{
IdxType idx = threadIdx.x + ((IdxType)blockIdx.x * (IdxType)blockDim.x);
if (idx < len) {
if (in == nullptr) {
// used for testing write_only_unary_op
out[idx] = static_cast<OutType>(scalar * idx);
} else {
out[idx] = static_cast<OutType>(scalar * in[idx]);
}
}
}
template <typename InType, typename IdxType = int, typename OutType = InType>
void naiveScale(OutType* out, const InType* in, InType scalar, int len, cudaStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(len, TPB);
naiveScaleKernel<InType, OutType, IdxType><<<nblks, TPB, 0, stream>>>(out, in, scalar, len);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename InType, typename IdxType = int, typename OutType = InType>
struct UnaryOpInputs {
OutType tolerance;
IdxType len;
InType scalar;
unsigned long long int seed;
};
template <typename InType, typename IdxType = int, typename OutType = InType>
::std::ostream& operator<<(::std::ostream& os, const UnaryOpInputs<InType, IdxType, OutType>& d)
{
return os;
}
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/power.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/power.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename Type>
RAFT_KERNEL naivePowerElemKernel(Type* out, const Type* in1, const Type* in2, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) { out[idx] = raft::pow(in1[idx], in2[idx]); }
}
template <typename Type>
void naivePowerElem(Type* out, const Type* in1, const Type* in2, int len, cudaStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(len, TPB);
naivePowerElemKernel<Type><<<nblks, TPB, 0, stream>>>(out, in1, in2, len);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename Type>
RAFT_KERNEL naivePowerScalarKernel(Type* out, const Type* in1, const Type in2, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) { out[idx] = raft::pow(in1[idx], in2); }
}
template <typename Type>
void naivePowerScalar(Type* out, const Type* in1, const Type in2, int len, cudaStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(len, TPB);
naivePowerScalarKernel<Type><<<nblks, TPB, 0, stream>>>(out, in1, in2, len);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename T>
struct PowerInputs {
T tolerance;
int len;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const PowerInputs<T>& dims)
{
return os;
}
template <typename T>
class PowerTest : public ::testing::TestWithParam<PowerInputs<T>> {
protected:
PowerTest()
: in1(0, resource::get_cuda_stream(handle)),
in2(0, resource::get_cuda_stream(handle)),
out_ref(0, resource::get_cuda_stream(handle)),
out(0, resource::get_cuda_stream(handle))
{
}
void SetUp() override
{
params = ::testing::TestWithParam<PowerInputs<T>>::GetParam();
raft::random::RngState r(params.seed);
int len = params.len;
cudaStream_t stream = resource::get_cuda_stream(handle);
in1.resize(len, stream);
in2.resize(len, stream);
out_ref.resize(len, stream);
out.resize(len, stream);
uniform(handle, r, in1.data(), len, T(1.0), T(2.0));
uniform(handle, r, in2.data(), len, T(1.0), T(2.0));
naivePowerElem(out_ref.data(), in1.data(), in2.data(), len, stream);
naivePowerScalar(out_ref.data(), out_ref.data(), T(2), len, stream);
auto out_view = raft::make_device_vector_view(out.data(), len);
auto in1_view = raft::make_device_vector_view(in1.data(), len);
auto const_out_view = raft::make_device_vector_view<const T>(out.data(), len);
auto const_in1_view = raft::make_device_vector_view<const T>(in1.data(), len);
auto const_in2_view = raft::make_device_vector_view<const T>(in2.data(), len);
const auto scalar = static_cast<T>(2);
auto scalar_view = raft::make_host_scalar_view(&scalar);
power(handle, const_in1_view, const_in2_view, out_view);
power_scalar(handle, const_out_view, out_view, scalar_view);
power(handle, const_in1_view, const_in2_view, in1_view);
power_scalar(handle, const_in1_view, in1_view, scalar_view);
resource::sync_stream(handle);
}
protected:
raft::resources handle;
PowerInputs<T> params;
rmm::device_uvector<T> in1, in2, out_ref, out;
int device_count = 0;
};
const std::vector<PowerInputs<float>> inputsf2 = {{0.000001f, 1024 * 1024, 1234ULL}};
const std::vector<PowerInputs<double>> inputsd2 = {{0.00000001, 1024 * 1024, 1234ULL}};
typedef PowerTest<float> PowerTestF;
TEST_P(PowerTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(
out_ref.data(), in1.data(), params.len, raft::CompareApprox<float>(params.tolerance)));
}
typedef PowerTest<double> PowerTestD;
TEST_P(PowerTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(
out_ref.data(), in1.data(), params.len, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(PowerTests, PowerTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PowerTests, PowerTestD, ::testing::ValuesIn(inputsd2));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/reduce.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cublas_v2.h>
#include <raft/core/operators.hpp>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/linalg/unary_op.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_uvector.hpp>
#include <thrust/device_ptr.h>
#include <thrust/memory.h>
namespace raft {
namespace linalg {
template <typename InType,
typename OutType,
typename IdxType,
typename MainLambda,
typename ReduceLambda,
typename FinalLambda>
RAFT_KERNEL naiveCoalescedReductionKernel(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
OutType init,
bool inplace,
MainLambda main_op,
ReduceLambda reduce_op,
FinalLambda fin_op)
{
OutType acc = init;
IdxType rowStart = threadIdx.x + static_cast<IdxType>(blockIdx.x) * blockDim.x;
if (rowStart < N) {
for (IdxType i = 0; i < D; ++i) {
acc = reduce_op(acc, main_op(data[rowStart * D + i], i));
}
if (inplace) {
dots[rowStart] = fin_op(reduce_op(dots[rowStart], acc));
} else {
dots[rowStart] = fin_op(acc);
}
}
}
template <typename InType,
typename OutType,
typename IdxType,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void naiveCoalescedReduction(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
cudaStream_t stream,
OutType init,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda fin_op = raft::identity_op())
{
static const IdxType TPB = 64;
IdxType nblks = raft::ceildiv(N, TPB);
naiveCoalescedReductionKernel<<<nblks, TPB, 0, stream>>>(
dots, data, D, N, init, inplace, main_op, reduce_op, fin_op);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename InType,
typename OutType,
typename IdxType,
typename MainLambda,
typename ReduceLambda,
typename FinalLambda>
RAFT_KERNEL naiveStridedReductionKernel(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
OutType init,
bool inplace,
MainLambda main_op,
ReduceLambda reduce_op,
FinalLambda fin_op)
{
OutType acc = init;
IdxType col = threadIdx.x + static_cast<IdxType>(blockIdx.x) * blockDim.x;
if (col < D) {
for (IdxType i = 0; i < N; ++i) {
acc = reduce_op(acc, main_op(data[i * D + col], i));
}
if (inplace) {
dots[col] = fin_op(reduce_op(dots[col], acc));
} else {
dots[col] = fin_op(acc);
}
}
}
template <typename InType,
typename OutType,
typename IdxType,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void naiveStridedReduction(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
cudaStream_t stream,
OutType init,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda fin_op = raft::identity_op())
{
static const IdxType TPB = 64;
IdxType nblks = raft::ceildiv(D, TPB);
naiveStridedReductionKernel<<<nblks, TPB, 0, stream>>>(
dots, data, D, N, init, inplace, main_op, reduce_op, fin_op);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename InType,
typename OutType,
typename IdxType,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void naiveReduction(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
bool rowMajor,
bool alongRows,
cudaStream_t stream,
OutType init,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda fin_op = raft::identity_op())
{
if (rowMajor && alongRows) {
naiveCoalescedReduction(dots, data, D, N, stream, init, inplace, main_op, reduce_op, fin_op);
} else if (rowMajor && !alongRows) {
naiveStridedReduction(dots, data, D, N, stream, init, inplace, main_op, reduce_op, fin_op);
} else if (!rowMajor && alongRows) {
naiveStridedReduction(dots, data, N, D, stream, init, inplace, main_op, reduce_op, fin_op);
} else {
naiveCoalescedReduction(dots, data, N, D, stream, init, inplace, main_op, reduce_op, fin_op);
}
RAFT_CUDA_TRY(cudaDeviceSynchronize());
}
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/matrix_vector.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include "matrix_vector_op.cuh"
#include <gtest/gtest.h>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/matrix_vector.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename T, typename IdxType = int>
struct MatrixVectorInputs {
T tolerance;
IdxType rows, cols;
int operation_type;
bool row_major, bcast_along_rows;
unsigned long long int seed;
};
template <typename T, typename IdxType>
::std::ostream& operator<<(::std::ostream& os, const MatrixVectorInputs<T, IdxType>& dims)
{
return os;
}
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename T, typename IdxType>
void matrix_vector_op_launch(const raft::resources& handle,
T* in,
const T* vec1,
IdxType D,
IdxType N,
bool row_major,
bool bcast_along_rows,
int operation_type)
{
auto in_row_major = raft::make_device_matrix_view<T, IdxType, raft::row_major>(in, N, D);
auto in_col_major = raft::make_device_matrix_view<T, IdxType, raft::col_major>(in, N, D);
auto apply = bcast_along_rows ? Apply::ALONG_ROWS : Apply::ALONG_COLUMNS;
auto len = bcast_along_rows ? D : N;
auto vec1_view = raft::make_device_vector_view<const T, IdxType>(vec1, len);
if (operation_type == 0) {
if (row_major) {
binary_mult_skip_zero(handle, in_row_major, vec1_view, apply);
} else {
binary_mult_skip_zero(handle, in_col_major, vec1_view, apply);
}
} else if (operation_type == 1) {
if (row_major) {
binary_div(handle, in_row_major, vec1_view, apply);
} else {
binary_div(handle, in_col_major, vec1_view, apply);
}
} else if (operation_type == 2) {
if (row_major) {
binary_div_skip_zero(handle, in_row_major, vec1_view, apply);
} else {
binary_div_skip_zero(handle, in_col_major, vec1_view, apply);
}
} else if (operation_type == 3) {
if (row_major) {
binary_add(handle, in_row_major, vec1_view, apply);
} else {
binary_add(handle, in_col_major, vec1_view, apply);
}
} else if (operation_type == 4) {
if (row_major) {
binary_sub(handle, in_row_major, vec1_view, apply);
} else {
binary_sub(handle, in_col_major, vec1_view, apply);
}
} else {
THROW("Unknown operation type '%d'!", (int)operation_type);
}
}
template <typename T, typename IdxType>
void naive_matrix_vector_op_launch(const raft::resources& handle,
T* in,
const T* vec1,
IdxType D,
IdxType N,
bool row_major,
bool bcast_along_rows,
int operation_type)
{
auto stream = resource::get_cuda_stream(handle);
auto operation_bin_mult_skip_zero = [] __device__(T mat_element, T vec_element) {
if (vec_element != T(0)) {
return mat_element * vec_element;
} else {
return mat_element;
}
};
auto operation_bin_div_skip_zero = [] __device__(T mat_element, T vec_element) {
if (raft::abs(vec_element) < T(1e-10))
return T(0);
else
return mat_element / vec_element;
};
if (operation_type == 0) {
naiveMatVec(
in, in, vec1, D, N, row_major, bcast_along_rows, operation_bin_mult_skip_zero, stream);
} else if (operation_type == 1) {
naiveMatVec(in, in, vec1, D, N, row_major, bcast_along_rows, raft::div_op{}, stream);
} else if (operation_type == 2) {
naiveMatVec(
in, in, vec1, D, N, row_major, bcast_along_rows, operation_bin_div_skip_zero, stream);
} else if (operation_type == 3) {
naiveMatVec(in, in, vec1, D, N, row_major, bcast_along_rows, raft::add_op{}, stream);
} else if (operation_type == 4) {
naiveMatVec(in, in, vec1, D, N, row_major, bcast_along_rows, raft::sub_op{}, stream);
} else {
THROW("Unknown operation type '%d'!", (int)operation_type);
}
}
template <typename T, typename IdxType>
class MatrixVectorTest : public ::testing::TestWithParam<MatrixVectorInputs<T, IdxType>> {
public:
MatrixVectorTest()
: params(::testing::TestWithParam<MatrixVectorInputs<T, IdxType>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in(params.rows * params.cols, stream),
out_ref(params.rows * params.cols, stream),
out(params.rows * params.cols, stream),
vec1(params.bcast_along_rows ? params.cols : params.rows, stream)
{
}
protected:
void SetUp() override
{
raft::random::RngState r(params.seed);
IdxType N = params.rows, D = params.cols;
IdxType len = N * D;
IdxType vecLen = params.bcast_along_rows ? D : N;
uniform(handle, r, in.data(), len, (T)-1.0, (T)1.0);
uniform(handle, r, vec1.data(), vecLen, (T)-1.0, (T)1.0);
raft::copy(out_ref.data(), in.data(), len, resource::get_cuda_stream(handle));
raft::copy(out.data(), in.data(), len, resource::get_cuda_stream(handle));
naive_matrix_vector_op_launch(handle,
out_ref.data(),
vec1.data(),
D,
N,
params.row_major,
params.bcast_along_rows,
params.operation_type);
matrix_vector_op_launch(handle,
out.data(),
vec1.data(),
D,
N,
params.row_major,
params.bcast_along_rows,
params.operation_type);
resource::sync_stream(handle);
}
protected:
raft::resources handle;
cudaStream_t stream;
MatrixVectorInputs<T, IdxType> params;
rmm::device_uvector<T> in, out, out_ref, vec1;
};
const std::vector<MatrixVectorInputs<float, int>> inputsf_i32 = {
{0.00001f, 1024, 32, 0, true, true, 1234ULL},
{0.00001f, 1024, 64, 1, true, true, 1234ULL},
{0.00001f, 1024, 32, 2, true, false, 1234ULL},
{0.00001f, 1024, 64, 3, true, false, 1234ULL},
{0.00001f, 1024, 32, 4, false, true, 1234ULL},
{0.00001f, 1024, 64, 0, false, true, 1234ULL},
{0.00001f, 1024, 32, 1, false, false, 1234ULL},
{0.00001f, 1024, 64, 2, false, false, 1234ULL},
{0.00001f, 1024, 32, 3, true, true, 1234ULL},
{0.00001f, 1024, 64, 4, true, true, 1234ULL},
{0.00001f, 1024, 32, 0, true, false, 1234ULL},
{0.00001f, 1024, 64, 1, true, false, 1234ULL},
{0.00001f, 1024, 32, 2, false, true, 1234ULL},
{0.00001f, 1024, 64, 3, false, true, 1234ULL},
{0.00001f, 1024, 32, 4, false, false, 1234ULL},
{0.00001f, 1024, 64, 0, false, false, 1234ULL}};
typedef MatrixVectorTest<float, int> MatrixVectorTestF_i32;
TEST_P(MatrixVectorTestF_i32, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.rows * params.cols, CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MatrixVectorTests,
MatrixVectorTestF_i32,
::testing::ValuesIn(inputsf_i32));
const std::vector<MatrixVectorInputs<float, size_t>> inputsf_i64 = {
{0.00001f, 2500, 250, 0, false, false, 1234ULL}, {0.00001f, 2500, 250, 1, false, false, 1234ULL}};
typedef MatrixVectorTest<float, size_t> MatrixVectorTestF_i64;
TEST_P(MatrixVectorTestF_i64, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.rows * params.cols, CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MatrixVectorTests,
MatrixVectorTestF_i64,
::testing::ValuesIn(inputsf_i64));
const std::vector<MatrixVectorInputs<double, int>> inputsd_i32 = {
{0.0000001, 1024, 32, 0, true, true, 1234ULL},
{0.0000001, 1024, 64, 1, true, true, 1234ULL},
{0.0000001, 1024, 32, 2, true, false, 1234ULL},
{0.0000001, 1024, 64, 3, true, false, 1234ULL},
{0.0000001, 1024, 32, 4, false, true, 1234ULL},
{0.0000001, 1024, 64, 0, false, true, 1234ULL},
{0.0000001, 1024, 32, 1, false, false, 1234ULL},
{0.0000001, 1024, 64, 2, false, false, 1234ULL},
{0.0000001, 1024, 32, 3, true, true, 1234ULL},
{0.0000001, 1024, 64, 4, true, true, 1234ULL},
{0.0000001, 1024, 32, 0, true, false, 1234ULL},
{0.0000001, 1024, 64, 1, true, false, 1234ULL},
{0.0000001, 1024, 32, 2, false, true, 1234ULL},
{0.0000001, 1024, 64, 3, false, true, 1234ULL},
{0.0000001, 1024, 32, 4, false, false, 1234ULL},
{0.0000001, 1024, 64, 0, false, false, 1234ULL}};
typedef MatrixVectorTest<double, int> MatrixVectorTestD_i32;
TEST_P(MatrixVectorTestD_i32, Result)
{
ASSERT_TRUE(devArrMatch(out_ref.data(),
out.data(),
params.rows * params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MatrixVectorTests,
MatrixVectorTestD_i32,
::testing::ValuesIn(inputsd_i32));
const std::vector<MatrixVectorInputs<double, size_t>> inputsd_i64 = {
{0.0000001, 2500, 250, 0, false, false, 1234ULL},
{0.0000001, 2500, 250, 1, false, false, 1234ULL}};
typedef MatrixVectorTest<double, size_t> MatrixVectorTestD_i64;
TEST_P(MatrixVectorTestD_i64, Result)
{
ASSERT_TRUE(devArrMatch(out_ref.data(),
out.data(),
params.rows * params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MatrixVectorTests,
MatrixVectorTestD_i64,
::testing::ValuesIn(inputsd_i64));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/unary_op.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include "unary_op.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/unary_op.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename InType, typename IdxType, typename OutType = InType>
class UnaryOpTest : public ::testing::TestWithParam<UnaryOpInputs<InType, IdxType, OutType>> {
public:
UnaryOpTest()
: params(::testing::TestWithParam<UnaryOpInputs<InType, IdxType, OutType>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in(params.len, stream),
out_ref(params.len, stream),
out(params.len, stream)
{
}
protected:
void SetUp() override
{
raft::random::RngState r(params.seed);
auto len = params.len;
uniform(handle, r, in.data(), len, InType(-1.0), InType(1.0));
resource::sync_stream(handle, stream);
}
virtual void DoTest()
{
auto len = params.len;
auto scalar = params.scalar;
naiveScale(out_ref.data(), in.data(), scalar, len, stream);
auto in_view = raft::make_device_vector_view<const InType>(in.data(), len);
auto out_view = raft::make_device_vector_view(out.data(), len);
unary_op(handle,
in_view,
out_view,
raft::compose_op(raft::cast_op<OutType>(), raft::mul_const_op<InType>(scalar)));
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
UnaryOpInputs<InType, IdxType, OutType> params;
rmm::device_uvector<InType> in;
rmm::device_uvector<OutType> out_ref, out;
};
// Or else, we get the following compilation error:
// The enclosing parent function ("DoTest") for an extended __device__ lambda cannot have private or
// protected access within its class
template <typename InType, typename IdxType, typename OutType>
void launchWriteOnlyUnaryOp(const raft::resources& handle, OutType* out, InType scalar, IdxType len)
{
auto out_view = raft::make_device_vector_view(out, len);
auto op = [scalar] __device__(OutType * ptr, IdxType idx) {
*ptr = static_cast<OutType>(scalar * idx);
};
write_only_unary_op(handle, out_view, op);
}
template <typename OutType, typename IdxType>
class WriteOnlyUnaryOpTest : public UnaryOpTest<OutType, IdxType, OutType> {
protected:
void DoTest() override
{
auto len = this->params.len;
auto scalar = this->params.scalar;
naiveScale(this->out_ref.data(), (OutType*)nullptr, scalar, len, this->stream);
launchWriteOnlyUnaryOp(this->handle, this->out.data(), scalar, len);
resource::sync_stream(this->handle, this->stream);
}
};
#define UNARY_OP_TEST(test_type, test_name, inputs) \
typedef RAFT_DEPAREN(test_type) test_name; \
TEST_P(test_name, Result) \
{ \
DoTest(); \
ASSERT_TRUE(devArrMatch(this->out_ref.data(), \
this->out.data(), \
this->params.len, \
CompareApprox(this->params.tolerance))); \
} \
INSTANTIATE_TEST_SUITE_P(UnaryOpTests, test_name, ::testing::ValuesIn(inputs))
const std::vector<UnaryOpInputs<float, int>> inputsf_i32 = {{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
UNARY_OP_TEST((UnaryOpTest<float, int>), UnaryOpTestF_i32, inputsf_i32);
UNARY_OP_TEST((WriteOnlyUnaryOpTest<float, int>), WriteOnlyUnaryOpTestF_i32, inputsf_i32);
const std::vector<UnaryOpInputs<float, size_t>> inputsf_i64 = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
UNARY_OP_TEST((UnaryOpTest<float, size_t>), UnaryOpTestF_i64, inputsf_i64);
UNARY_OP_TEST((WriteOnlyUnaryOpTest<float, size_t>), WriteOnlyUnaryOpTestF_i64, inputsf_i64);
const std::vector<UnaryOpInputs<float, int, double>> inputsf_i32_d = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
UNARY_OP_TEST((UnaryOpTest<float, int, double>), UnaryOpTestF_i32_D, inputsf_i32_d);
const std::vector<UnaryOpInputs<double, int>> inputsd_i32 = {
{0.00000001, 1024 * 1024, 2.0, 1234ULL}};
UNARY_OP_TEST((UnaryOpTest<double, int>), UnaryOpTestD_i32, inputsd_i32);
UNARY_OP_TEST((WriteOnlyUnaryOpTest<double, int>), WriteOnlyUnaryOpTestD_i32, inputsd_i32);
const std::vector<UnaryOpInputs<double, size_t>> inputsd_i64 = {
{0.00000001, 1024 * 1024, 2.0, 1234ULL}};
UNARY_OP_TEST((UnaryOpTest<double, size_t>), UnaryOpTestD_i64, inputsd_i64);
UNARY_OP_TEST((WriteOnlyUnaryOpTest<double, size_t>), WriteOnlyUnaryOpTestD_i64, inputsd_i64);
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/map.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include "unary_op.cuh"
#include <gtest/gtest.h>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/eltwise.cuh>
#include <raft/linalg/map.cuh>
#include <raft/matrix/init.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
/*
* Padded_float is a 12 byte type that contains a single float. Two integers are
* used for padding. It is used to test types that are not power-of-two-sized.
*/
struct padded_float {
float value_;
int padding1;
int padding2;
padded_float() = default;
constexpr padded_float(const float& x) : value_(x), padding1(0), padding2(0) {}
constexpr padded_float(const padded_float&) = default;
constexpr padded_float& operator=(const padded_float&) = default;
constexpr float abs() const { return std::abs(value_); }
};
constexpr padded_float operator+(const padded_float& x, const padded_float& y)
{
return padded_float(x.value_ + y.value_);
}
constexpr padded_float operator-(const padded_float& x, const padded_float& y)
{
return padded_float(x.value_ - y.value_);
}
constexpr padded_float operator*(const padded_float& x, const padded_float& y)
{
return padded_float(x.value_ * y.value_);
}
constexpr padded_float operator*(const padded_float& x, const int& scalar)
{
return padded_float(scalar * x.value_);
}
constexpr bool operator==(const padded_float& x, const padded_float& y)
{
return x.value_ == y.value_;
}
constexpr bool operator<(const padded_float& x, const padded_float& y)
{
return x.value_ < y.value_;
}
constexpr bool operator>(const padded_float& x, const padded_float& y)
{
return x.value_ > y.value_;
}
inline auto operator<<(std::ostream& os, const padded_float& x) -> std::ostream&
{
os << x.value_;
return os;
}
template <typename InType, typename IdxType, typename OutType>
void mapLaunch(OutType* out,
const InType* in1,
const InType* in2,
const InType* in3,
InType scalar,
IdxType len,
cudaStream_t stream)
{
raft::resources handle;
resource::set_cuda_stream(handle, stream);
auto out_view = raft::make_device_vector_view(out, len);
auto in1_view = raft::make_device_vector_view(in1, len);
auto in2_view = raft::make_device_vector_view(in2, len);
auto in3_view = raft::make_device_vector_view(in3, len);
map(
handle,
out_view,
[=] __device__(InType a, InType b, InType c) { return a + b + c + scalar; },
in1_view,
in2_view,
in3_view);
}
template <typename InType, typename IdxType = int, typename OutType = InType>
struct MapInputs {
InType tolerance;
IdxType len;
unsigned long long int seed;
InType scalar;
};
template <typename InType, typename IdxType, typename OutType = InType>
void create_ref(OutType* out_ref,
const InType* in1,
const InType* in2,
const InType* in3,
InType scalar,
IdxType len,
cudaStream_t stream)
{
rmm::device_uvector<InType> tmp(len, stream);
eltwiseAdd(tmp.data(), in1, in2, len, stream);
eltwiseAdd(out_ref, tmp.data(), in3, len, stream);
scalarAdd(out_ref, out_ref, (OutType)scalar, len, stream);
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
}
template <typename InType, typename IdxType, typename OutType = InType>
class MapTest : public ::testing::TestWithParam<MapInputs<InType, IdxType, OutType>> {
public:
MapTest()
: params(::testing::TestWithParam<MapInputs<InType, IdxType, OutType>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in1(params.len, stream),
in2(params.len, stream),
in3(params.len, stream),
out_ref(params.len, stream),
out(params.len, stream)
{
}
void SetUp() override
{
raft::random::RngState r(params.seed);
IdxType len = params.len;
if constexpr (std::is_floating_point<InType>::value) {
uniform(handle, r, in1.data(), len, InType(-1.0), InType(1.0));
uniform(handle, r, in2.data(), len, InType(-1.0), InType(1.0));
uniform(handle, r, in3.data(), len, InType(-1.0), InType(1.0));
} else {
// First create random float arrays
rmm::device_uvector<float> fin1(params.len, stream);
rmm::device_uvector<float> fin2(params.len, stream);
rmm::device_uvector<float> fin3(params.len, stream);
uniform(handle, r, fin1.data(), len, float(-1.0), float(1.0));
uniform(handle, r, fin2.data(), len, float(-1.0), float(1.0));
uniform(handle, r, fin3.data(), len, float(-1.0), float(1.0));
// Then pad them
raft::device_resources handle{stream};
auto fin1_view = raft::make_device_vector_view(fin1.data(), fin1.size());
auto fin2_view = raft::make_device_vector_view(fin2.data(), fin2.size());
auto fin3_view = raft::make_device_vector_view(fin3.data(), fin3.size());
auto in1_view = raft::make_device_vector_view(in1.data(), in1.size());
auto in2_view = raft::make_device_vector_view(in2.data(), in2.size());
auto in3_view = raft::make_device_vector_view(in3.data(), in3.size());
auto add_padding = [] __device__(float a) { return padded_float(a); };
raft::linalg::map(handle, in1_view, add_padding, raft::make_const_mdspan(fin1_view));
raft::linalg::map(handle, in2_view, add_padding, raft::make_const_mdspan(fin2_view));
raft::linalg::map(handle, in3_view, add_padding, raft::make_const_mdspan(fin3_view));
}
create_ref(out_ref.data(), in1.data(), in2.data(), in3.data(), params.scalar, len, stream);
mapLaunch(out.data(), in1.data(), in2.data(), in3.data(), params.scalar, len, stream);
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
}
protected:
raft::resources handle;
cudaStream_t stream;
MapInputs<InType, IdxType, OutType> params;
rmm::device_uvector<InType> in1, in2, in3;
rmm::device_uvector<OutType> out_ref, out;
};
template <typename OutType, typename IdxType>
class MapOffsetTest : public ::testing::TestWithParam<MapInputs<OutType, IdxType, OutType>> {
public:
MapOffsetTest()
: params(::testing::TestWithParam<MapInputs<OutType, IdxType, OutType>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
out_ref(params.len, stream),
out(params.len, stream)
{
}
protected:
void SetUp() override
{
IdxType len = params.len;
OutType scalar = params.scalar;
naiveScale(out_ref.data(), (OutType*)nullptr, scalar, len, stream);
auto out_view = raft::make_device_vector_view(out.data(), len);
map_offset(handle,
out_view,
raft::compose_op(raft::cast_op<OutType>(), raft::mul_const_op<OutType>(scalar)));
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
}
protected:
raft::resources handle;
cudaStream_t stream;
MapInputs<OutType, IdxType, OutType> params;
rmm::device_uvector<OutType> out_ref, out;
};
#define MAP_TEST(test_type, test_name, inputs) \
typedef RAFT_DEPAREN(test_type) test_name; \
TEST_P(test_name, Result) \
{ \
ASSERT_TRUE(devArrMatch(this->out_ref.data(), \
this->out.data(), \
this->params.len, \
CompareApprox(this->params.tolerance))); \
} \
INSTANTIATE_TEST_SUITE_P(MapTests, test_name, ::testing::ValuesIn(inputs))
const std::vector<MapInputs<float, int>> inputsf_i32 = {{0.000001f, 1024 * 1024, 1234ULL, 3.2}};
MAP_TEST((MapTest<float, int>), MapTestF_i32, inputsf_i32);
MAP_TEST((MapOffsetTest<float, int>), MapOffsetTestF_i32, inputsf_i32);
const std::vector<MapInputs<float, size_t>> inputsf_i64 = {{0.000001f, 1024 * 1024, 1234ULL, 9.4}};
MAP_TEST((MapTest<float, size_t>), MapTestF_i64, inputsf_i64);
MAP_TEST((MapOffsetTest<float, size_t>), MapOffsetTestF_i64, inputsf_i64);
const std::vector<MapInputs<float, int, double>> inputsf_i32_d = {
{0.000001f, 1024 * 1024, 1234ULL, 5.9}};
MAP_TEST((MapTest<float, int, double>), MapTestF_i32_D, inputsf_i32_d);
const std::vector<MapInputs<double, int>> inputsd_i32 = {{0.00000001, 1024 * 1024, 1234ULL, 7.5}};
MAP_TEST((MapTest<double, int>), MapTestD_i32, inputsd_i32);
MAP_TEST((MapOffsetTest<double, int>), MapOffsetTestD_i32, inputsd_i32);
const std::vector<MapInputs<double, size_t>> inputsd_i64 = {
{0.00000001, 1024 * 1024, 1234ULL, 5.2}};
MAP_TEST((MapTest<double, size_t>), MapTestD_i64, inputsd_i64);
MAP_TEST((MapOffsetTest<double, size_t>), MapOffsetTestD_i64, inputsd_i64);
// This comparison structure is necessary, because it is not straight-forward to
// add an overload of std::abs for padded_float.
struct ComparePadded {
float eps;
ComparePadded(float eps_) : eps(eps_) {}
ComparePadded(padded_float eps_) : eps(eps_.value_) {}
ComparePadded(double eps_) : eps(eps_) {}
bool operator()(const padded_float& a, const padded_float& b) const
{
float diff = (a - b).abs();
float m = std::max(a.abs(), b.abs());
float ratio = diff > eps ? diff / m : diff;
return (ratio <= eps);
}
};
// Use PaddedComparison
#define MAP_TEST_PADDED(test_type, test_name, inputs) \
typedef RAFT_DEPAREN(test_type) test_name; \
TEST_P(test_name, Result) \
{ \
ASSERT_TRUE(devArrMatch(this->out_ref.data(), \
this->out.data(), \
this->params.len, \
ComparePadded(this->params.tolerance))); \
} \
INSTANTIATE_TEST_SUITE_P(MapTests, test_name, ::testing::ValuesIn(inputs))
const std::vector<MapInputs<padded_float, size_t>> inputsd_padded_float = {
{0.00000001, 1024 * 1024, 1234ULL, 5.2}};
MAP_TEST_PADDED((MapTest<padded_float, size_t>), MapTestD_padded_float, inputsd_padded_float);
MAP_TEST_PADDED((MapOffsetTest<padded_float, size_t>),
MapOffsetTestD_padded_float,
inputsd_padded_float);
} // namespace linalg
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/eig_sel.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if CUDART_VERSION >= 10010
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/eig.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename T>
struct EigSelInputs {
T tolerance;
int len;
int n;
int n_eigen_vals;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const EigSelInputs<T>& dims)
{
return os;
}
template <typename T>
class EigSelTest : public ::testing::TestWithParam<EigSelInputs<T>> {
public:
EigSelTest()
: params(::testing::TestWithParam<EigSelInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
cov_matrix(params.len, stream),
eig_vectors(params.n_eigen_vals * params.n, stream),
eig_vectors_ref(params.n_eigen_vals * params.n, stream),
eig_vals(params.n, stream),
eig_vals_ref(params.n, stream)
{
}
protected:
void SetUp() override
{
int len = params.len;
///@todo: Generate a random symmetric matrix
T cov_matrix_h[] = {
1.0, 0.9, 0.81, 0.729, 0.9, 1.0, 0.9, 0.81, 0.81, 0.9, 1.0, 0.9, 0.729, 0.81, 0.9, 1.0};
ASSERT(len == 16, "This test only works with 4x4 matrices!");
raft::update_device(cov_matrix.data(), cov_matrix_h, len, stream);
T eig_vectors_ref_h[] = {-0.5123,
0.4874,
0.4874,
-0.5123,
0.6498,
0.2789,
-0.2789,
-0.6498,
0.4874,
0.5123,
0.5123,
0.4874};
T eig_vals_ref_h[] = {0.1024, 0.3096, 3.5266, 0.0};
raft::update_device(
eig_vectors_ref.data(), eig_vectors_ref_h, params.n_eigen_vals * params.n, stream);
raft::update_device(eig_vals_ref.data(), eig_vals_ref_h, params.n_eigen_vals, stream);
auto cov_matrix_view = raft::make_device_matrix_view<const T, std::uint32_t, raft::col_major>(
cov_matrix.data(), params.n, params.n);
auto eig_vectors_view = raft::make_device_matrix_view<T, std::uint32_t, raft::col_major>(
eig_vectors.data(), params.n_eigen_vals, params.n);
auto eig_vals_view =
raft::make_device_vector_view<T, std::uint32_t>(eig_vals.data(), params.n_eigen_vals);
raft::linalg::eig_dc_selective(handle,
cov_matrix_view,
eig_vectors_view,
eig_vals_view,
static_cast<std::size_t>(params.n_eigen_vals),
EigVecMemUsage::OVERWRITE_INPUT);
resource::sync_stream(handle);
}
protected:
raft::resources handle;
cudaStream_t stream;
EigSelInputs<T> params;
rmm::device_uvector<T> cov_matrix;
rmm::device_uvector<T> eig_vectors;
rmm::device_uvector<T> eig_vectors_ref;
rmm::device_uvector<T> eig_vals;
rmm::device_uvector<T> eig_vals_ref;
};
const std::vector<EigSelInputs<float>> inputsf2 = {{0.001f, 4 * 4, 4, 3}};
const std::vector<EigSelInputs<double>> inputsd2 = {{0.001, 4 * 4, 4, 3}};
typedef EigSelTest<float> EigSelTestValF;
TEST_P(EigSelTestValF, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vals_ref.data(),
eig_vals.data(),
params.n_eigen_vals,
raft::CompareApproxAbs<float>(params.tolerance),
stream));
}
typedef EigSelTest<double> EigSelTestValD;
TEST_P(EigSelTestValD, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vals_ref.data(),
eig_vals.data(),
params.n_eigen_vals,
raft::CompareApproxAbs<double>(params.tolerance),
stream));
}
typedef EigSelTest<float> EigSelTestVecF;
TEST_P(EigSelTestVecF, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vectors_ref.data(),
eig_vectors.data(),
params.n_eigen_vals * params.n,
raft::CompareApproxAbs<float>(params.tolerance),
stream));
}
typedef EigSelTest<double> EigSelTestVecD;
TEST_P(EigSelTestVecD, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vectors_ref.data(),
eig_vectors.data(),
params.n_eigen_vals * params.n,
raft::CompareApproxAbs<double>(params.tolerance),
stream));
}
INSTANTIATE_TEST_SUITE_P(EigSelTest, EigSelTestValF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(EigSelTest, EigSelTestValD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_SUITE_P(EigSelTest, EigSelTestVecF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(EigSelTest, EigSelTestVecD, ::testing::ValuesIn(inputsd2));
} // end namespace linalg
} // end namespace raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/eltwise.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/eltwise.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
//// Testing unary ops
template <typename Type>
RAFT_KERNEL naiveScaleKernel(Type* out, const Type* in, Type scalar, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) { out[idx] = scalar * in[idx]; }
}
template <typename Type>
void naiveScale(Type* out, const Type* in, Type scalar, int len, cudaStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(len, TPB);
naiveScaleKernel<Type><<<nblks, TPB, 0, stream>>>(out, in, scalar, len);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename T>
struct ScalarMultiplyInputs {
T tolerance;
int len;
T scalar;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const ScalarMultiplyInputs<T>& dims)
{
return os;
}
template <typename T>
class ScalarMultiplyTest : public ::testing::TestWithParam<ScalarMultiplyInputs<T>> {
public:
ScalarMultiplyTest()
: params(::testing::TestWithParam<ScalarMultiplyInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in(len, stream),
out_ref(len, stream),
out(len, stream)
{
}
protected:
void SetUp() override
{
raft::random::RngState r(params.seed);
int len = params.len;
T scalar = params.scalar;
uniform(handle, r, in, len, T(-1.0), T(1.0));
naiveScale(out_ref, in, scalar, len, stream);
scalarMultiply(out, in, scalar, len, stream);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
ScalarMultiplyInputs<T> params;
rmm::device_uvector<T> in, out_ref, out;
};
const std::vector<ScalarMultiplyInputs<float>> inputsf1 = {{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
const std::vector<ScalarMultiplyInputs<double>> inputsd1 = {
{0.00000001, 1024 * 1024, 2.0, 1234ULL}};
typedef ScalarMultiplyTest<float> ScalarMultiplyTestF;
TEST_P(ScalarMultiplyTestF, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, CompareApprox<float>(params.tolerance), stream));
}
typedef ScalarMultiplyTest<double> ScalarMultiplyTestD;
TEST_P(ScalarMultiplyTestD, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, CompareApprox<double>(params.tolerance), stream));
}
INSTANTIATE_TEST_SUITE_P(ScalarMultiplyTests, ScalarMultiplyTestF, ::testing::ValuesIn(inputsf1));
INSTANTIATE_TEST_SUITE_P(ScalarMultiplyTests, ScalarMultiplyTestD, ::testing::ValuesIn(inputsd1));
//// Testing binary ops
template <typename Type>
RAFT_KERNEL naiveAddKernel(Type* out, const Type* in1, const Type* in2, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) { out[idx] = in1[idx] + in2[idx]; }
}
template <typename Type>
void naiveAdd(Type* out, const Type* in1, const Type* in2, int len, cudaStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(len, TPB);
naiveAddKernel<Type><<<nblks, TPB, 0, stream>>>(out, in1, in2, len);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename T>
struct EltwiseAddInputs {
T tolerance;
int len;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const EltwiseAddInputs<T>& dims)
{
return os;
}
template <typename T>
class EltwiseAddTest : public ::testing::TestWithParam<EltwiseAddInputs<T>> {
public:
EltwiseAddTest()
: params(::testing::TestWithParam<EltwiseAddInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in1(params.len, stream),
in2(params.len, stream),
out_ref(params.len, stream),
out(params.len, stream)
{
}
protected:
void SetUp() override
{
params = ::testing::TestWithParam<EltwiseAddInputs<T>>::GetParam();
raft::random::RngState r(params.seed);
int len = params.len;
uniform(handle, r, in1, len, T(-1.0), T(1.0));
uniform(handle, r, in2, len, T(-1.0), T(1.0));
naiveAdd(out_ref, in1, in2, len, stream);
eltwiseAdd(out, in1, in2, len, stream);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
EltwiseAddInputs<T> params;
rmm::device_uvector<T> in1, in2, out_ref, out;
};
const std::vector<EltwiseAddInputs<float>> inputsf2 = {{0.000001f, 1024 * 1024, 1234ULL}};
const std::vector<EltwiseAddInputs<double>> inputsd2 = {{0.00000001, 1024 * 1024, 1234ULL}};
typedef EltwiseAddTest<float> EltwiseAddTestF;
TEST_P(EltwiseAddTestF, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, CompareApprox<float>(params.tolerance), stream));
}
typedef EltwiseAddTest<double> EltwiseAddTestD;
TEST_P(EltwiseAddTestD, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, CompareApprox<double>(params.tolerance), stream));
}
INSTANTIATE_TEST_SUITE_P(EltwiseAddTests, EltwiseAddTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(EltwiseAddTests, EltwiseAddTestD, ::testing::ValuesIn(inputsd2));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/svd.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/init.cuh>
#include <raft/linalg/svd.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename T>
struct SvdInputs {
T tolerance;
int len;
int n_row;
int n_col;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const SvdInputs<T>& dims)
{
return os;
}
template <typename T>
class SvdTest : public ::testing::TestWithParam<SvdInputs<T>> {
public:
SvdTest()
: params(::testing::TestWithParam<SvdInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
data(params.len, stream),
left_eig_vectors_qr(params.n_row * params.n_col, stream),
right_eig_vectors_trans_qr(params.n_col * params.n_col, stream),
sing_vals_qr(params.n_col, stream),
left_eig_vectors_ref(params.n_row * params.n_col, stream),
right_eig_vectors_ref(params.n_col * params.n_col, stream),
sing_vals_ref(params.len, stream)
{
}
protected:
void test_API()
{
auto data_view = raft::make_device_matrix_view<const T, int, raft::col_major>(
data.data(), params.n_row, params.n_col);
auto sing_vals_view = raft::make_device_vector_view<T, int>(sing_vals_qr.data(), params.n_col);
auto left_eig_vectors_view = raft::make_device_matrix_view<T, int, raft::col_major>(
left_eig_vectors_qr.data(), params.n_row, params.n_col);
auto right_eig_vectors_view = raft::make_device_matrix_view<T, int, raft::col_major>(
right_eig_vectors_trans_qr.data(), params.n_col, params.n_col);
raft::linalg::svd_eig(handle, data_view, sing_vals_view, right_eig_vectors_view, std::nullopt);
raft::linalg::svd_qr(handle, data_view, sing_vals_view);
raft::linalg::svd_qr(
handle, data_view, sing_vals_view, std::make_optional(left_eig_vectors_view));
raft::linalg::svd_qr(
handle, data_view, sing_vals_view, std::nullopt, std::make_optional(right_eig_vectors_view));
raft::linalg::svd_qr_transpose_right_vec(handle, data_view, sing_vals_view);
raft::linalg::svd_qr_transpose_right_vec(
handle, data_view, sing_vals_view, std::make_optional(left_eig_vectors_view));
raft::linalg::svd_qr_transpose_right_vec(
handle, data_view, sing_vals_view, std::nullopt, std::make_optional(right_eig_vectors_view));
}
void test_qr()
{
auto data_view = raft::make_device_matrix_view<const T, int, raft::col_major>(
data.data(), params.n_row, params.n_col);
auto sing_vals_qr_view =
raft::make_device_vector_view<T, int>(sing_vals_qr.data(), params.n_col);
auto left_eig_vectors_qr_view =
std::optional(raft::make_device_matrix_view<T, int, raft::col_major>(
left_eig_vectors_qr.data(), params.n_row, params.n_col));
auto right_eig_vectors_trans_qr_view =
std::make_optional(raft::make_device_matrix_view<T, int, raft::col_major>(
right_eig_vectors_trans_qr.data(), params.n_col, params.n_col));
svd_qr_transpose_right_vec(handle,
data_view,
sing_vals_qr_view,
left_eig_vectors_qr_view,
right_eig_vectors_trans_qr_view);
resource::sync_stream(handle, stream);
}
void SetUp() override
{
int len = params.len;
ASSERT(params.n_row == 3, "This test only supports nrows=3!");
ASSERT(params.len == 6, "This test only supports len=6!");
T data_h[] = {1.0, 4.0, 2.0, 2.0, 5.0, 1.0};
raft::update_device(data.data(), data_h, len, stream);
int left_evl = params.n_row * params.n_col;
int right_evl = params.n_col * params.n_col;
T left_eig_vectors_ref_h[] = {-0.308219, -0.906133, -0.289695, 0.488195, 0.110706, -0.865685};
T right_eig_vectors_ref_h[] = {-0.638636, -0.769509, -0.769509, 0.638636};
T sing_vals_ref_h[] = {7.065283, 1.040081};
raft::update_device(left_eig_vectors_ref.data(), left_eig_vectors_ref_h, left_evl, stream);
raft::update_device(right_eig_vectors_ref.data(), right_eig_vectors_ref_h, right_evl, stream);
raft::update_device(sing_vals_ref.data(), sing_vals_ref_h, params.n_col, stream);
test_API();
raft::update_device(data.data(), data_h, len, stream);
test_qr();
}
protected:
raft::resources handle;
cudaStream_t stream;
SvdInputs<T> params;
rmm::device_uvector<T> data, left_eig_vectors_qr, right_eig_vectors_trans_qr, sing_vals_qr,
left_eig_vectors_ref, right_eig_vectors_ref, sing_vals_ref;
};
const std::vector<SvdInputs<float>> inputsf2 = {{0.00001f, 3 * 2, 3, 2, 1234ULL}};
const std::vector<SvdInputs<double>> inputsd2 = {{0.00001, 3 * 2, 3, 2, 1234ULL}};
typedef SvdTest<float> SvdTestValF;
TEST_P(SvdTestValF, Result)
{
ASSERT_TRUE(raft::devArrMatch(sing_vals_ref.data(),
sing_vals_qr.data(),
params.n_col,
raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef SvdTest<double> SvdTestValD;
TEST_P(SvdTestValD, Result)
{
ASSERT_TRUE(raft::devArrMatch(sing_vals_ref.data(),
sing_vals_qr.data(),
params.n_col,
raft::CompareApproxAbs<double>(params.tolerance)));
}
typedef SvdTest<float> SvdTestLeftVecF;
TEST_P(SvdTestLeftVecF, Result)
{
ASSERT_TRUE(raft::devArrMatch(left_eig_vectors_ref.data(),
left_eig_vectors_qr.data(),
params.n_row * params.n_col,
raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef SvdTest<double> SvdTestLeftVecD;
TEST_P(SvdTestLeftVecD, Result)
{
ASSERT_TRUE(raft::devArrMatch(left_eig_vectors_ref.data(),
left_eig_vectors_qr.data(),
params.n_row * params.n_col,
raft::CompareApproxAbs<double>(params.tolerance)));
}
typedef SvdTest<float> SvdTestRightVecF;
TEST_P(SvdTestRightVecF, Result)
{
ASSERT_TRUE(raft::devArrMatch(right_eig_vectors_ref.data(),
right_eig_vectors_trans_qr.data(),
params.n_col * params.n_col,
raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef SvdTest<double> SvdTestRightVecD;
TEST_P(SvdTestRightVecD, Result)
{
ASSERT_TRUE(raft::devArrMatch(right_eig_vectors_ref.data(),
right_eig_vectors_trans_qr.data(),
params.n_col * params.n_col,
raft::CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(SvdTests, SvdTestValF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(SvdTests, SvdTestValD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_SUITE_P(SvdTests, SvdTestLeftVecF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(SvdTests, SvdTestLeftVecD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_SUITE_P(SvdTests, SvdTestRightVecF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(SvdTests, SvdTestRightVecD, ::testing::ValuesIn(inputsd2));
// INSTANTIATE_TEST_SUITE_P(SvdTests, SvdTestRightVecF,
// ::testing::ValuesIn(inputsf2));
// INSTANTIATE_TEST_SUITE_P(SvdTests, SvdTestRightVecD,
//::testing::ValuesIn(inputsd2));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/matrix_vector_op.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/util/cuda_utils.cuh>
namespace raft {
namespace linalg {
template <typename OutT, typename MatT, typename VecT, typename Lambda, typename IdxType = int>
RAFT_KERNEL naiveMatVecKernel(OutT* out,
const MatT* mat,
const VecT* vec,
IdxType D,
IdxType N,
bool rowMajor,
bool bcastAlongRows,
Lambda op)
{
IdxType idx = threadIdx.x + blockIdx.x * blockDim.x;
IdxType len = N * D;
IdxType col;
if (rowMajor && bcastAlongRows) {
col = idx % D;
} else if (!rowMajor && !bcastAlongRows) {
col = idx % N;
} else if (rowMajor && !bcastAlongRows) {
col = idx / D;
} else {
col = idx / N;
}
if (idx < len) { out[idx] = op(mat[idx], vec[col]); }
}
template <typename OutT, typename MatT, typename VecT, typename Lambda, typename IdxType = int>
void naiveMatVec(OutT* out,
const MatT* mat,
const VecT* vec,
IdxType D,
IdxType N,
bool rowMajor,
bool bcastAlongRows,
Lambda op,
cudaStream_t stream)
{
static const IdxType TPB = 64;
IdxType len = N * D;
IdxType nblks = raft::ceildiv(len, TPB);
naiveMatVecKernel<<<nblks, TPB, 0, stream>>>(out, mat, vec, D, N, rowMajor, bcastAlongRows, op);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename OutT, typename MatT, typename VecT, typename IdxType = int>
void naiveMatVec(OutT* out,
const MatT* mat,
const VecT* vec,
IdxType D,
IdxType N,
bool rowMajor,
bool bcastAlongRows,
OutT scalar,
cudaStream_t stream)
{
naiveMatVec(
out,
mat,
vec,
D,
N,
rowMajor,
bcastAlongRows,
[scalar] __device__(MatT a, VecT b) { return (OutT)(a + scalar * b); },
stream);
}
template <typename OutT,
typename MatT,
typename Vec1T,
typename Vec2T,
typename Lambda,
typename IdxType = int>
RAFT_KERNEL naiveMatVecKernel(OutT* out,
const MatT* mat,
const Vec1T* vec1,
const Vec2T* vec2,
IdxType D,
IdxType N,
bool rowMajor,
bool bcastAlongRows,
Lambda op)
{
IdxType idx = threadIdx.x + blockIdx.x * blockDim.x;
IdxType len = N * D;
IdxType col;
if (rowMajor && bcastAlongRows) {
col = idx % D;
} else if (!rowMajor && !bcastAlongRows) {
col = idx % N;
} else if (rowMajor && !bcastAlongRows) {
col = idx / D;
} else {
col = idx / N;
}
if (idx < len) { out[idx] = op(mat[idx], vec1[col], vec2[col]); }
}
template <typename OutT,
typename MatT,
typename Vec1T,
typename Vec2T,
typename Lambda,
typename IdxType = int>
void naiveMatVec(OutT* out,
const MatT* mat,
const Vec1T* vec1,
const Vec2T* vec2,
IdxType D,
IdxType N,
bool rowMajor,
bool bcastAlongRows,
Lambda op,
cudaStream_t stream)
{
static const IdxType TPB = 64;
IdxType len = N * D;
IdxType nblks = raft::ceildiv(len, TPB);
naiveMatVecKernel<<<nblks, TPB, 0, stream>>>(
out, mat, vec1, vec2, D, N, rowMajor, bcastAlongRows, op);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename OutT, typename MatT, typename Vec1T, typename Vec2T, typename IdxType = int>
void naiveMatVec(OutT* out,
const MatT* mat,
const Vec1T* vec1,
const Vec2T* vec2,
IdxType D,
IdxType N,
bool rowMajor,
bool bcastAlongRows,
OutT scalar,
cudaStream_t stream)
{
naiveMatVec(
out,
mat,
vec1,
vec2,
D,
N,
rowMajor,
bcastAlongRows,
[scalar] __device__(MatT a, Vec1T b, Vec2T c) { return (OutT)(a + scalar * b + c); },
stream);
}
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/add.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include "add.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/add.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename InT, typename OutT = InT>
class AddTest : public ::testing::TestWithParam<AddInputs<InT, OutT>> {
public:
AddTest()
: params(::testing::TestWithParam<AddInputs<InT, OutT>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in1(params.len, stream),
in2(params.len, stream),
out_ref(params.len, stream),
out(params.len, stream)
{
}
protected:
void SetUp() override
{
params = ::testing::TestWithParam<AddInputs<InT, OutT>>::GetParam();
raft::random::RngState r{params.seed};
int len = params.len;
uniform(handle, r, in1.data(), len, InT(-1.0), InT(1.0));
uniform(handle, r, in2.data(), len, InT(-1.0), InT(1.0));
naiveAddElem<InT, OutT>(out_ref.data(), in1.data(), in2.data(), len, stream);
auto out_view = raft::make_device_vector_view(out.data(), out.size());
auto in1_view = raft::make_device_vector_view<const InT>(in1.data(), in1.size());
auto in2_view = raft::make_device_vector_view<const InT>(in2.data(), in2.size());
add(handle, in1_view, in2_view, out_view);
resource::sync_stream(handle, stream);
}
void compare()
{
ASSERT_TRUE(raft::devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<OutT>(params.tolerance), stream));
}
protected:
raft::resources handle;
cudaStream_t stream;
AddInputs<InT, OutT> params;
rmm::device_uvector<InT> in1;
rmm::device_uvector<InT> in2;
rmm::device_uvector<OutT> out_ref;
rmm::device_uvector<OutT> out;
};
const std::vector<AddInputs<float>> inputsf = {
{0.000001f, 1024 * 1024, 1234ULL},
{0.000001f, 1024 * 1024 + 2, 1234ULL},
{0.000001f, 1024 * 1024 + 1, 1234ULL},
};
typedef AddTest<float> AddTestF;
TEST_P(AddTestF, Result) { compare(); }
INSTANTIATE_TEST_SUITE_P(AddTests, AddTestF, ::testing::ValuesIn(inputsf));
const std::vector<AddInputs<double>> inputsd = {
{0.00000001, 1024 * 1024, 1234ULL},
{0.00000001, 1024 * 1024 + 2, 1234ULL},
{0.00000001, 1024 * 1024 + 1, 1234ULL},
};
typedef AddTest<double> AddTestD;
TEST_P(AddTestD, Result) { compare(); }
INSTANTIATE_TEST_SUITE_P(AddTests, AddTestD, ::testing::ValuesIn(inputsd));
const std::vector<AddInputs<float, double>> inputsfd = {
{0.00000001, 1024 * 1024, 1234ULL},
{0.00000001, 1024 * 1024 + 2, 1234ULL},
{0.00000001, 1024 * 1024 + 1, 1234ULL},
};
typedef AddTest<float, double> AddTestFD;
TEST_P(AddTestFD, Result) { compare(); }
INSTANTIATE_TEST_SUITE_P(AddTests, AddTestFD, ::testing::ValuesIn(inputsfd));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/reduce_cols_by_key.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/interruptible.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/reduce_cols_by_key.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/itertools.hpp>
namespace raft {
namespace linalg {
template <typename T, typename KeyT, typename IdxT>
void naiveReduceColsByKey(const T* in,
const KeyT* keys,
T* out_ref,
IdxT nrows,
IdxT ncols,
IdxT nkeys,
cudaStream_t stream)
{
std::vector<KeyT> h_keys(ncols, 0u);
raft::copy(&(h_keys[0]), keys, ncols, stream);
std::vector<T> h_in(nrows * ncols);
raft::copy(&(h_in[0]), in, nrows * ncols, stream);
raft::interruptible::synchronize(stream);
std::vector<T> out(nrows * nkeys, T(0));
for (IdxT i = 0; i < nrows; ++i) {
for (IdxT j = 0; j < ncols; ++j) {
out[i * nkeys + h_keys[j]] += h_in[i * ncols + j];
}
}
raft::copy(out_ref, &(out[0]), nrows * nkeys, stream);
raft::interruptible::synchronize(stream);
}
template <typename T, typename IdxT>
struct ReduceColsInputs {
T tolerance;
IdxT rows;
IdxT cols;
IdxT nkeys;
unsigned long long int seed;
};
template <typename T, typename IdxT>
::std::ostream& operator<<(::std::ostream& os, const ReduceColsInputs<T, IdxT>& p)
{
os << "{" << p.tolerance << "," << p.rows << "," << p.cols << "," << p.nkeys << "," << p.seed
<< "}";
return os;
}
template <typename T, typename KeyT, typename IdxT>
class ReduceColsTest : public ::testing::TestWithParam<ReduceColsInputs<T, IdxT>> {
protected:
ReduceColsTest() : in(0, stream), out_ref(0, stream), out(0, stream), keys(0, stream) {}
void SetUp() override
{
params = ::testing::TestWithParam<ReduceColsInputs<T, IdxT>>::GetParam();
raft::random::RngState r(params.seed);
raft::resources handle;
auto stream = resource::get_cuda_stream(handle);
auto nrows = params.rows;
auto ncols = params.cols;
auto nkeys = params.nkeys;
in.resize(nrows * ncols, stream);
keys.resize(ncols, stream);
out_ref.resize(nrows * nkeys, stream);
out.resize(nrows * nkeys, stream);
uniform(handle, r, in.data(), nrows * ncols, T(-1.0), T(1.0));
uniformInt(handle, r, keys.data(), ncols, KeyT{0}, static_cast<KeyT>(params.nkeys));
naiveReduceColsByKey(in.data(), keys.data(), out_ref.data(), nrows, ncols, nkeys, stream);
auto input_view = raft::make_device_matrix_view<const T>(in.data(), nrows, ncols);
auto output_view = raft::make_device_matrix_view(out.data(), nrows, nkeys);
auto keys_view = raft::make_device_vector_view<const KeyT>(keys.data(), ncols);
reduce_cols_by_key(handle, input_view, keys_view, output_view, nkeys);
raft::interruptible::synchronize(stream);
}
protected:
cudaStream_t stream = 0;
ReduceColsInputs<T, IdxT> params;
rmm::device_uvector<T> in, out_ref, out;
rmm::device_uvector<KeyT> keys;
};
#define RCBK_TEST(test_type, test_name, test_inputs) \
typedef RAFT_DEPAREN(test_type) test_name; \
TEST_P(test_name, Result) \
{ \
ASSERT_TRUE(raft::devArrMatch(out_ref.data(), \
out.data(), \
params.rows* params.nkeys, \
raft::CompareApprox(params.tolerance))); \
} \
INSTANTIATE_TEST_CASE_P(ReduceColsTests, test_name, ::testing::ValuesIn(test_inputs))
const std::vector<ReduceColsInputs<float, int>> inputsf_i32 =
raft::util::itertools::product<ReduceColsInputs<float, int>>(
{0.001f}, {1, 9, 63, 1024}, {1234, 9999, 101010}, {7, 42, 127, 515, 2022}, {1234ULL});
const std::vector<ReduceColsInputs<double, int>> inputsd_i32 =
raft::util::itertools::product<ReduceColsInputs<double, int>>(
{0.000001}, {1, 9, 63, 1024}, {1234, 9999, 101010}, {7, 42, 127, 515, 2022}, {1234ULL});
const std::vector<ReduceColsInputs<float, uint32_t>> inputsf_u32 =
raft::util::itertools::product<ReduceColsInputs<float, uint32_t>>({0.001f},
{1u, 9u, 63u, 1024u},
{1234u, 9999u, 101010u},
{7u, 42u, 127u, 515u, 2022u},
{1234ULL});
const std::vector<ReduceColsInputs<float, int64_t>> inputsf_i64 =
raft::util::itertools::product<ReduceColsInputs<float, int64_t>>(
{0.001f}, {1, 9, 63, 1024}, {1234, 9999, 101010}, {7, 42, 127, 515, 2022}, {1234ULL});
RCBK_TEST((ReduceColsTest<float, uint32_t, int>), ReduceColsTestFU32I32, inputsf_i32);
RCBK_TEST((ReduceColsTest<double, uint32_t, int>), ReduceColsTestDU32I32, inputsd_i32);
RCBK_TEST((ReduceColsTest<float, int, uint32_t>), ReduceColsTestFI32U32, inputsf_u32);
RCBK_TEST((ReduceColsTest<float, uint32_t, int64_t>), ReduceColsTestFI32I64, inputsf_i64);
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/eigen_solvers.cu | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/nvtx.hpp>
#include <raft/core/resource/device_id.hpp>
#include <raft/core/resources.hpp>
#include <raft/spectral/eigen_solvers.cuh>
#include <raft/spectral/partition.cuh>
#include <gtest/gtest.h>
#include <cstddef>
#include <iostream>
#include <memory>
#include <type_traits>
namespace raft {
namespace spectral {
TEST(Raft, EigenSolvers)
{
common::nvtx::range fun_scope("test::EigenSolvers");
using namespace matrix;
using index_type = int;
using value_type = double;
raft::resources h;
ASSERT_EQ(0, resource::get_device_id(h));
index_type* ro{nullptr};
index_type* ci{nullptr};
value_type* vs{nullptr};
index_type nnz = 0;
index_type nrows = 0;
sparse_matrix_t<index_type, value_type> sm1{h, ro, ci, vs, nrows, nnz};
ASSERT_EQ(nullptr, sm1.row_offsets_);
index_type neigvs{10};
index_type maxiter{100};
index_type restart_iter{10};
value_type tol{1.0e-10};
bool reorthog{true};
// nullptr expected to trigger exceptions:
//
value_type* eigvals{nullptr};
value_type* eigvecs{nullptr};
std::uint64_t seed{100110021003};
eigen_solver_config_t<index_type, value_type> cfg{
neigvs, maxiter, restart_iter, tol, reorthog, seed};
lanczos_solver_t<index_type, value_type> eig_solver{cfg};
EXPECT_ANY_THROW(eig_solver.solve_smallest_eigenvectors(h, sm1, eigvals, eigvecs));
EXPECT_ANY_THROW(eig_solver.solve_largest_eigenvectors(h, sm1, eigvals, eigvecs));
}
TEST(Raft, SpectralSolvers)
{
common::nvtx::range fun_scope("test::SpectralSolvers");
using namespace matrix;
using index_type = int;
using value_type = double;
raft::resources h;
ASSERT_EQ(0, resource::get_device_id(h)
);
index_type neigvs{10};
index_type maxiter{100};
index_type restart_iter{10};
value_type tol{1.0e-10};
bool reorthog{true};
// nullptr expected to trigger exceptions:
//
index_type* clusters{nullptr};
value_type* eigvals{nullptr};
value_type* eigvecs{nullptr};
unsigned long long seed{100110021003};
eigen_solver_config_t<index_type, value_type> eig_cfg{
neigvs, maxiter, restart_iter, tol, reorthog, seed};
lanczos_solver_t<index_type, value_type> eig_solver{eig_cfg};
index_type k{5};
cluster_solver_config_t<index_type, value_type> clust_cfg{k, maxiter, tol, seed};
kmeans_solver_t<index_type, value_type> cluster_solver{clust_cfg};
sparse_matrix_t<index_type, value_type> sm{h, nullptr, nullptr, nullptr, 0, 0};
EXPECT_ANY_THROW(
spectral::partition(h, sm, eig_solver, cluster_solver, clusters, eigvals, eigvecs));
value_type edgeCut{0};
value_type cost{0};
EXPECT_ANY_THROW(spectral::analyzePartition(h, sm, k, clusters, edgeCut, cost));
}
} // namespace spectral
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/axpy.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/axpy.cuh>
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_scalar.hpp>
namespace raft {
namespace linalg {
// Reference axpy implementation.
template <typename T>
RAFT_KERNEL naiveAxpy(const int n, const T alpha, const T* x, T* y, int incx, int incy)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) { y[idx * incy] += alpha * x[idx * incx]; }
}
template <typename InType, typename IndexType = int, typename OutType = InType>
struct AxpyInputs {
OutType tolerance;
IndexType len;
InType alpha;
IndexType incx;
IndexType incy;
unsigned long long int seed;
};
template <typename T, typename IndexType = int>
class AxpyTest : public ::testing::TestWithParam<AxpyInputs<T>> {
protected:
raft::resources handle;
AxpyInputs<T, IndexType> params;
rmm::device_uvector<T> refy;
rmm::device_uvector<T> y_device_alpha;
rmm::device_uvector<T> y_host_alpha;
public:
AxpyTest()
: testing::TestWithParam<AxpyInputs<T>>(),
refy(0, resource::get_cuda_stream(handle)),
y_host_alpha(0, resource::get_cuda_stream(handle)),
y_device_alpha(0, resource::get_cuda_stream(handle))
{
resource::sync_stream(handle);
}
protected:
void SetUp() override
{
params = ::testing::TestWithParam<AxpyInputs<T>>::GetParam();
cudaStream_t stream = resource::get_cuda_stream(handle);
raft::random::RngState r(params.seed);
IndexType x_len = params.len * params.incx;
IndexType y_len = params.len * params.incy;
rmm::device_uvector<T> x(x_len, stream);
y_host_alpha.resize(y_len, stream);
y_device_alpha.resize(y_len, stream);
refy.resize(y_len, stream);
uniform(handle, r, x.data(), x_len, T(-1.0), T(1.0));
uniform(handle, r, refy.data(), y_len, T(-1.0), T(1.0));
// Take a copy of the random generated values in refy
// this is necessary since axpy uses y for both input and output
raft::copy(y_host_alpha.data(), refy.data(), y_len, stream);
raft::copy(y_device_alpha.data(), refy.data(), y_len, stream);
int threads = 64;
int blocks = raft::ceildiv<int>(params.len, threads);
naiveAxpy<T><<<blocks, threads, 0, stream>>>(
params.len, params.alpha, x.data(), refy.data(), params.incx, params.incy);
auto host_alpha_view = make_host_scalar_view<const T>(¶ms.alpha);
// test out both axpy overloads - taking either a host scalar or device scalar view
rmm::device_scalar<T> device_alpha(params.alpha, stream);
auto device_alpha_view = make_device_scalar_view<const T>(device_alpha.data());
if ((params.incx > 1) && (params.incy > 1)) {
auto x_view = make_device_vector_view<const T, IndexType, layout_stride>(
x.data(), make_vector_strided_layout<IndexType>(params.len, params.incx));
axpy(handle,
host_alpha_view,
x_view,
make_device_vector_view<T, IndexType, layout_stride>(
y_host_alpha.data(), make_vector_strided_layout(params.len, params.incy)));
axpy(handle,
device_alpha_view,
x_view,
make_device_vector_view<T, IndexType, layout_stride>(
y_device_alpha.data(), make_vector_strided_layout(params.len, params.incy)));
} else if (params.incx > 1) {
auto x_view = make_device_vector_view<const T, IndexType, layout_stride>(
x.data(), make_vector_strided_layout<IndexType>(params.len, params.incx));
axpy(handle,
host_alpha_view,
x_view,
make_device_vector_view<T>(y_host_alpha.data(), params.len));
axpy(handle,
device_alpha_view,
x_view,
make_device_vector_view<T>(y_device_alpha.data(), params.len));
} else if (params.incy > 1) {
auto x_view = make_device_vector_view<const T>(x.data(), params.len);
axpy(handle,
host_alpha_view,
x_view,
make_device_vector_view<T, IndexType, layout_stride>(
y_host_alpha.data(), make_vector_strided_layout(params.len, params.incy)));
axpy(handle,
device_alpha_view,
x_view,
make_device_vector_view<T, IndexType, layout_stride>(
y_device_alpha.data(), make_vector_strided_layout(params.len, params.incy)));
} else {
auto x_view = make_device_vector_view<const T>(x.data(), params.len);
axpy(handle,
host_alpha_view,
x_view,
make_device_vector_view<T>(y_host_alpha.data(), params.len));
axpy(handle,
device_alpha_view,
x_view,
make_device_vector_view<T>(y_device_alpha.data(), params.len));
}
resource::sync_stream(handle);
}
void TearDown() override {}
};
const std::vector<AxpyInputs<float>> inputsf = {
{0.000001f, 1024 * 1024, 2.f, 1, 1, 1234ULL},
{0.000001f, 16 * 1024 * 1024, 128.f, 1, 1, 1234ULL},
{0.000001f, 98689, 4.f, 1, 1, 1234ULL},
{0.000001f, 4 * 1024 * 1024, -1, 1, 1, 1234ULL},
{0.000001f, 1024 * 1024, 6, 4, 1, 1234ULL},
{0.000001f, 1024 * 1024, 7, 1, 3, 1234ULL},
{0.000001f, 1024 * 1024, 8, 4, 3, 1234ULL},
};
const std::vector<AxpyInputs<double>> inputsd = {
{0.000001f, 1024 * 1024, 2.f, 1, 1, 1234ULL},
{0.000001f, 16 * 1024 * 1024, 128.f, 1, 1, 1234ULL},
{0.000001f, 98689, 4.f, 1, 1, 1234ULL},
{0.000001f, 4 * 1024 * 1024, -1, 1, 1, 1234ULL},
{0.000001f, 1024 * 1024, 6, 4, 1, 1234ULL},
{0.000001f, 1024 * 1024, 7, 1, 3, 1234ULL},
{0.000001f, 1024 * 1024, 8, 4, 3, 1234ULL},
};
typedef AxpyTest<float> AxpyTestF;
TEST_P(AxpyTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(refy.data(),
y_host_alpha.data(),
params.len * params.incy,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(refy.data(),
y_device_alpha.data(),
params.len * params.incy,
raft::CompareApprox<float>(params.tolerance)));
}
typedef AxpyTest<double> AxpyTestD;
TEST_P(AxpyTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(refy.data(),
y_host_alpha.data(),
params.len * params.incy,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(refy.data(),
y_device_alpha.data(),
params.len * params.incy,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(AxpyTests, AxpyTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(AxpyTests, AxpyTestD, ::testing::ValuesIn(inputsd));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/gemm_layout.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/gemm.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
namespace raft {
namespace linalg {
template <typename T>
struct GemmLayoutInputs {
int M;
int N;
int K;
bool zLayout;
bool xLayout;
bool yLayout;
unsigned long long int seed;
};
// Reference GEMM implementation.
template <typename T>
RAFT_KERNEL naiveGemm(
T* Z, T* X, T* Y, int M, int N, int K, bool isZColMajor, bool isXColMajor, bool isYColMajor)
{
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
for (int m = tidy; m < M; m += (blockDim.y * gridDim.y)) {
for (int n = tidx; n < N; n += (blockDim.x * gridDim.x)) {
T temp = T(0.0);
for (int k = 0; k < K; k++) {
int xIndex = isXColMajor ? m + k * M : m * K + k;
int yIndex = isYColMajor ? k + n * K : k * N + n;
temp += X[xIndex] * Y[yIndex];
}
int zIndex = isZColMajor ? m + n * M : m * N + n;
Z[zIndex] = temp;
}
}
}
template <typename T>
class GemmLayoutTest : public ::testing::TestWithParam<GemmLayoutInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<GemmLayoutInputs<T>>::GetParam();
raft::resources handle;
cudaStream_t stream = resource::get_cuda_stream(handle);
raft::random::RngState r(params.seed);
// We compute Z = X * Y and compare against reference result
// Dimensions of X : M x K
// Dimensions of Y : K x N
// Dimensions of Z : M x N
T* X = NULL; // Argument X
T* Y = NULL; // Argument Y
size_t xElems = params.M * params.K;
size_t yElems = params.K * params.N;
size_t zElems = params.M * params.N;
RAFT_CUDA_TRY(cudaMalloc(&X, xElems * sizeof(T)));
RAFT_CUDA_TRY(cudaMalloc(&Y, yElems * sizeof(T)));
RAFT_CUDA_TRY(cudaMalloc(&refZ, zElems * sizeof(T)));
RAFT_CUDA_TRY(cudaMalloc(&Z, zElems * sizeof(T)));
uniform(handle, r, X, xElems, T(-10.0), T(10.0));
uniform(handle, r, Y, yElems, T(-10.0), T(10.0));
dim3 blocks(raft::ceildiv<int>(params.M, 128), raft::ceildiv<int>(params.N, 4), 1);
dim3 threads(128, 4, 1);
naiveGemm<<<blocks, threads>>>(
refZ, X, Y, params.M, params.N, params.K, params.zLayout, params.xLayout, params.yLayout);
auto x_view_row_major = raft::make_device_matrix_view(X, params.M, params.K);
auto y_view_row_major = raft::make_device_matrix_view(Y, params.K, params.N);
auto z_view_row_major = raft::make_device_matrix_view(Z, params.M, params.N);
auto x_view_col_major =
raft::make_device_matrix_view<T, int, raft::col_major>(X, params.M, params.K);
auto y_view_col_major =
raft::make_device_matrix_view<T, int, raft::col_major>(Y, params.K, params.N);
auto z_view_col_major =
raft::make_device_matrix_view<T, int, raft::col_major>(Z, params.M, params.N);
if (params.xLayout && params.yLayout && params.zLayout) {
gemm(handle, x_view_col_major, y_view_col_major, z_view_col_major);
} else if (params.xLayout && params.yLayout && !params.zLayout) {
gemm(handle, x_view_col_major, y_view_col_major, z_view_row_major);
} else if (params.xLayout && !params.yLayout && params.zLayout) {
gemm(handle, x_view_col_major, y_view_row_major, z_view_col_major);
} else if (!params.xLayout && params.yLayout && params.zLayout) {
gemm(handle, x_view_row_major, y_view_col_major, z_view_col_major);
} else if (params.xLayout && !params.yLayout && !params.zLayout) {
gemm(handle, x_view_col_major, y_view_row_major, z_view_row_major);
} else if (!params.xLayout && params.yLayout && !params.zLayout) {
gemm(handle, x_view_row_major, y_view_col_major, z_view_row_major);
} else if (!params.xLayout && !params.yLayout && params.zLayout) {
gemm(handle, x_view_row_major, y_view_row_major, z_view_col_major);
} else if (!params.xLayout && !params.yLayout && !params.zLayout) {
gemm(handle, x_view_row_major, y_view_row_major, z_view_row_major);
}
resource::sync_stream(handle);
}
void TearDown() override
{
RAFT_CUDA_TRY(cudaFree(refZ));
RAFT_CUDA_TRY(cudaFree(Z));
}
protected:
GemmLayoutInputs<T> params;
T* refZ = NULL; // Reference result for comparison
T* Z = NULL; // Computed result
};
const std::vector<GemmLayoutInputs<float>> inputsf = {
{80, 70, 80, true, true, true, 76430ULL},
{80, 100, 40, true, true, false, 426646ULL},
{20, 100, 20, true, false, true, 237703ULL},
{100, 60, 30, true, false, false, 538004ULL},
{50, 10, 60, false, true, true, 73012ULL},
{90, 90, 30, false, true, false, 538147ULL},
{30, 100, 10, false, false, true, 412352ULL},
{40, 80, 100, false, false, false, 2979410ULL}};
const std::vector<GemmLayoutInputs<double>> inputsd = {
{10, 70, 40, true, true, true, 535648ULL},
{30, 30, 30, true, true, false, 956681ULL},
{70, 80, 50, true, false, true, 875083ULL},
{80, 90, 70, true, false, false, 50744ULL},
{90, 90, 30, false, true, true, 506321ULL},
{40, 100, 70, false, true, false, 638418ULL},
{80, 50, 30, false, false, true, 701529ULL},
{50, 80, 60, false, false, false, 893038ULL}};
typedef GemmLayoutTest<float> GemmLayoutTestF;
TEST_P(GemmLayoutTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(refZ, Z, params.M * params.N, raft::CompareApprox<float>(1e-4)));
}
typedef GemmLayoutTest<double> GemmLayoutTestD;
TEST_P(GemmLayoutTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(refZ, Z, params.M * params.N, raft::CompareApprox<float>(1e-6)));
}
INSTANTIATE_TEST_SUITE_P(GemmLayoutTests, GemmLayoutTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(GemmLayoutTests, GemmLayoutTestD, ::testing::ValuesIn(inputsd));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/dot.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/dot.cuh>
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_scalar.hpp>
namespace raft {
namespace linalg {
// Reference dot implementation.
template <typename T>
RAFT_KERNEL naiveDot(const int n, const T* x, int incx, const T* y, int incy, T* out)
{
T sum = 0;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
sum += x[i * incx] * y[i * incy];
}
atomicAdd(out, sum);
}
template <typename InType, typename IndexType = int, typename OutType = InType>
struct DotInputs {
OutType tolerance;
IndexType len;
IndexType incx;
IndexType incy;
unsigned long long int seed;
};
template <typename T, typename IndexType = int>
class DotTest : public ::testing::TestWithParam<DotInputs<T>> {
protected:
DotInputs<T, IndexType> params;
T host_output, device_output, ref_output;
public:
DotTest() : testing::TestWithParam<DotInputs<T>>() {}
protected:
void SetUp() override
{
params = ::testing::TestWithParam<DotInputs<T>>::GetParam();
raft::resources handle;
cudaStream_t stream = resource::get_cuda_stream(handle);
raft::random::RngState r(params.seed);
IndexType x_len = params.len * params.incx;
IndexType y_len = params.len * params.incy;
rmm::device_uvector<T> x(x_len, stream);
rmm::device_uvector<T> y(y_len, stream);
uniform(handle, r, x.data(), x_len, T(-1.0), T(1.0));
uniform(handle, r, y.data(), y_len, T(-1.0), T(1.0));
rmm::device_scalar<T> ref(0, resource::get_cuda_stream(handle));
naiveDot<<<256, 256, 0, stream>>>(
params.len, x.data(), params.incx, y.data(), params.incy, ref.data());
raft::update_host(&ref_output, ref.data(), 1, stream);
// Test out both the device and host api's
rmm::device_scalar<T> out(0, resource::get_cuda_stream(handle));
auto device_out_view = make_device_scalar_view<T, IndexType>(out.data());
auto host_out_view = make_host_scalar_view<T, IndexType>(&host_output);
if ((params.incx > 1) && (params.incy > 1)) {
auto x_view = make_device_vector_view<const T, IndexType, layout_stride>(
x.data(), make_vector_strided_layout(params.len, params.incx));
auto y_view = make_device_vector_view<const T, IndexType, layout_stride>(
y.data(), make_vector_strided_layout(params.len, params.incy));
dot(handle, x_view, y_view, device_out_view);
dot(handle, x_view, y_view, host_out_view);
} else if (params.incx > 1) {
auto x_view = make_device_vector_view<const T, IndexType, layout_stride>(
x.data(), make_vector_strided_layout(params.len, params.incx));
auto y_view = make_device_vector_view<const T>(y.data(), params.len);
dot(handle, x_view, y_view, device_out_view);
dot(handle, x_view, y_view, host_out_view);
} else if (params.incy > 1) {
auto x_view = make_device_vector_view<const T>(x.data(), params.len);
auto y_view = make_device_vector_view<const T, IndexType, layout_stride>(
y.data(), make_vector_strided_layout(params.len, params.incy));
dot(handle, x_view, y_view, device_out_view);
dot(handle, x_view, y_view, host_out_view);
} else {
auto x_view = make_device_vector_view<const T>(x.data(), params.len);
auto y_view = make_device_vector_view<const T>(y.data(), params.len);
dot(handle, x_view, y_view, device_out_view);
dot(handle, x_view, y_view, host_out_view);
}
raft::update_host(&device_output, out.data(), 1, stream);
resource::sync_stream(handle);
}
void TearDown() override {}
};
const std::vector<DotInputs<float>> inputsf = {
{0.0001f, 1024 * 1024, 1, 1, 1234ULL},
{0.0001f, 16 * 1024 * 1024, 1, 1, 1234ULL},
{0.0001f, 98689, 1, 1, 1234ULL},
{0.0001f, 4 * 1024 * 1024, 1, 1, 1234ULL},
{0.0001f, 1024 * 1024, 4, 1, 1234ULL},
{0.0001f, 1024 * 1024, 1, 3, 1234ULL},
{0.0001f, 1024 * 1024, 4, 3, 1234ULL},
};
const std::vector<DotInputs<double>> inputsd = {
{0.000001f, 1024 * 1024, 1, 1, 1234ULL},
{0.000001f, 16 * 1024 * 1024, 1, 1, 1234ULL},
{0.000001f, 98689, 1, 1, 1234ULL},
{0.000001f, 4 * 1024 * 1024, 1, 1, 1234ULL},
{0.000001f, 1024 * 1024, 4, 1, 1234ULL},
{0.000001f, 1024 * 1024, 1, 3, 1234ULL},
{0.000001f, 1024 * 1024, 4, 3, 1234ULL},
};
typedef DotTest<float> DotTestF;
TEST_P(DotTestF, Result)
{
auto compare = raft::CompareApprox<float>(params.tolerance);
ASSERT_TRUE(compare(ref_output, host_output));
ASSERT_TRUE(compare(ref_output, device_output));
}
typedef DotTest<double> DotTestD;
TEST_P(DotTestD, Result)
{
auto compare = raft::CompareApprox<float>(params.tolerance);
ASSERT_TRUE(compare(ref_output, host_output));
ASSERT_TRUE(compare(ref_output, device_output));
}
INSTANTIATE_TEST_SUITE_P(DotTests, DotTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(DotTests, DotTestD, ::testing::ValuesIn(inputsd));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/sqrt.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/sqrt.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename Type>
RAFT_KERNEL naiveSqrtElemKernel(Type* out, const Type* in1, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) { out[idx] = raft::sqrt(in1[idx]); }
}
template <typename Type>
void naiveSqrtElem(Type* out, const Type* in1, int len)
{
static const int TPB = 64;
int nblks = raft::ceildiv(len, TPB);
naiveSqrtElemKernel<Type><<<nblks, TPB>>>(out, in1, len);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename T>
struct SqrtInputs {
T tolerance;
int len;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const SqrtInputs<T>& dims)
{
return os;
}
template <typename T>
class SqrtTest : public ::testing::TestWithParam<SqrtInputs<T>> {
protected:
SqrtTest()
: in1(0, resource::get_cuda_stream(handle)),
out_ref(0, resource::get_cuda_stream(handle)),
out(0, resource::get_cuda_stream(handle))
{
}
void SetUp() override
{
auto stream = resource::get_cuda_stream(handle);
params = ::testing::TestWithParam<SqrtInputs<T>>::GetParam();
raft::random::RngState r(params.seed);
int len = params.len;
in1.resize(len, stream);
out_ref.resize(len, stream);
out.resize(len, stream);
uniform(handle, r, in1.data(), len, T(1.0), T(2.0));
naiveSqrtElem(out_ref.data(), in1.data(), len);
auto out_view = raft::make_device_vector_view(out.data(), len);
auto in_view = raft::make_device_vector_view<const T>(in1.data(), len);
auto in2_view = raft::make_device_vector_view(in1.data(), len);
sqrt(handle, in_view, out_view);
sqrt(handle, in_view, in2_view);
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
}
protected:
raft::resources handle;
SqrtInputs<T> params;
rmm::device_uvector<T> in1, out_ref, out;
int device_count = 0;
};
const std::vector<SqrtInputs<float>> inputsf2 = {{0.000001f, 1024 * 1024, 1234ULL}};
const std::vector<SqrtInputs<double>> inputsd2 = {{0.00000001, 1024 * 1024, 1234ULL}};
typedef SqrtTest<float> SqrtTestF;
TEST_P(SqrtTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(
out_ref.data(), in1.data(), params.len, raft::CompareApprox<float>(params.tolerance)));
}
typedef SqrtTest<double> SqrtTestD;
TEST_P(SqrtTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(
out_ref.data(), in1.data(), params.len, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(SqrtTests, SqrtTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(SqrtTests, SqrtTestD, ::testing::ValuesIn(inputsd2));
} // namespace linalg
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/normalize.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/linalg/normalize.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/itertools.hpp>
namespace raft {
namespace linalg {
template <typename T, typename IdxT>
struct RowNormalizeInputs {
T tolerance;
IdxT rows, cols;
raft::linalg::NormType norm_type;
unsigned long long int seed;
};
template <typename T, typename IdxT>
::std::ostream& operator<<(::std::ostream& os, const RowNormalizeInputs<T, IdxT>& I)
{
os << "{ " << I.tolerance << ", " << I.rows << ", " << I.cols << ", " << I.norm_type << ", "
<< I.seed << '}' << std::endl;
return os;
}
template <typename T, typename IdxT>
void rowNormalizeRef(
T* out, const T* in, IdxT cols, IdxT rows, raft::linalg::NormType norm_type, cudaStream_t stream)
{
rmm::device_uvector<T> norm(rows, stream);
if (norm_type == raft::linalg::L2Norm) {
raft::linalg::rowNorm(norm.data(), in, cols, rows, norm_type, true, stream, raft::sqrt_op());
} else {
raft::linalg::rowNorm(
norm.data(), in, cols, rows, norm_type, true, stream, raft::identity_op());
}
raft::linalg::matrixVectorOp(
out, in, norm.data(), cols, rows, true, false, raft::div_op{}, stream);
}
template <typename T, typename IdxT>
class RowNormalizeTest : public ::testing::TestWithParam<RowNormalizeInputs<T, IdxT>> {
public:
RowNormalizeTest()
: params(::testing::TestWithParam<RowNormalizeInputs<T, IdxT>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
data(params.rows * params.cols, stream),
out_exp(params.rows * params.cols, stream),
out_act(params.rows * params.cols, stream)
{
}
void SetUp() override
{
raft::random::RngState r(params.seed);
int len = params.rows * params.cols;
uniform(handle, r, data.data(), len, T(-10.0), T(10.0));
rowNormalizeRef(
out_exp.data(), data.data(), params.cols, params.rows, params.norm_type, stream);
auto input_view = raft::make_device_matrix_view<const T, IdxT, raft::row_major>(
data.data(), params.rows, params.cols);
auto output_view = raft::make_device_matrix_view<T, IdxT, raft::row_major>(
out_act.data(), params.rows, params.cols);
raft::linalg::row_normalize(handle, input_view, output_view, params.norm_type);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
RowNormalizeInputs<T, IdxT> params;
rmm::device_uvector<T> data, out_exp, out_act;
};
const std::vector<RowNormalizeInputs<float, int>> inputsf_i32 =
raft::util::itertools::product<RowNormalizeInputs<float, int>>(
{0.00001f},
{11, 101, 12345},
{2, 3, 7, 12, 33, 125, 254},
{raft::linalg::L1Norm, raft::linalg::L2Norm, raft::linalg::LinfNorm},
{1234ULL});
const std::vector<RowNormalizeInputs<double, int>> inputsd_i32 =
raft::util::itertools::product<RowNormalizeInputs<double, int>>(
{0.00000001},
{11, 101, 12345},
{2, 3, 7, 12, 33, 125, 254},
{raft::linalg::L1Norm, raft::linalg::L2Norm, raft::linalg::LinfNorm},
{1234ULL});
const std::vector<RowNormalizeInputs<float, uint32_t>> inputsf_u32 =
raft::util::itertools::product<RowNormalizeInputs<float, uint32_t>>(
{0.00001f},
{11u, 101u, 12345u},
{2u, 3u, 7u, 12u, 33u, 125u, 254u},
{raft::linalg::L1Norm, raft::linalg::L2Norm, raft::linalg::LinfNorm},
{1234ULL});
const std::vector<RowNormalizeInputs<double, uint32_t>> inputsd_u32 =
raft::util::itertools::product<RowNormalizeInputs<double, uint32_t>>(
{0.00000001},
{11u, 101u, 12345u},
{2u, 3u, 7u, 12u, 33u, 125u, 254u},
{raft::linalg::L1Norm, raft::linalg::L2Norm, raft::linalg::LinfNorm},
{1234ULL});
#define ROWNORMALIZE_TEST(test_type, test_name, test_inputs) \
typedef RAFT_DEPAREN(test_type) test_name; \
TEST_P(test_name, Result) \
{ \
ASSERT_TRUE(raft::devArrMatch(out_exp.data(), \
out_act.data(), \
params.rows* params.cols, \
raft::CompareApprox(params.tolerance))); \
} \
INSTANTIATE_TEST_CASE_P(RowNormalizeTests, test_name, ::testing::ValuesIn(test_inputs))
ROWNORMALIZE_TEST((RowNormalizeTest<float, int>), RowNormalizeTestFI32, inputsf_i32);
ROWNORMALIZE_TEST((RowNormalizeTest<double, int>), RowNormalizeTestDI32, inputsd_i32);
ROWNORMALIZE_TEST((RowNormalizeTest<float, uint32_t>), RowNormalizeTestFU32, inputsf_u32);
ROWNORMALIZE_TEST((RowNormalizeTest<double, uint32_t>), RowNormalizeTestDU32, inputsd_u32);
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/norm.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/norm.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/itertools.hpp>
namespace raft {
namespace linalg {
template <typename T, typename IdxT>
struct NormInputs {
T tolerance;
IdxT rows, cols;
NormType type;
bool do_sqrt;
bool rowMajor;
unsigned long long int seed;
};
template <typename T, typename IdxT>
::std::ostream& operator<<(::std::ostream& os, const NormInputs<T, IdxT>& I)
{
os << "{ " << I.tolerance << ", " << I.rows << ", " << I.cols << ", " << I.type << ", "
<< I.do_sqrt << ", " << I.seed << '}' << std::endl;
return os;
}
///// Row-wise norm test definitions
template <typename Type, typename IdxT>
RAFT_KERNEL naiveRowNormKernel(
Type* dots, const Type* data, IdxT D, IdxT N, NormType type, bool do_sqrt)
{
Type acc = (Type)0;
IdxT rowStart = threadIdx.x + static_cast<IdxT>(blockIdx.x) * blockDim.x;
if (rowStart < N) {
for (IdxT i = 0; i < D; ++i) {
if (type == L2Norm) {
acc += data[rowStart * D + i] * data[rowStart * D + i];
} else {
acc += raft::abs(data[rowStart * D + i]);
}
}
dots[rowStart] = do_sqrt ? raft::sqrt(acc) : acc;
}
}
template <typename Type, typename IdxT>
void naiveRowNorm(
Type* dots, const Type* data, IdxT D, IdxT N, NormType type, bool do_sqrt, cudaStream_t stream)
{
static const IdxT TPB = 64;
IdxT nblks = raft::ceildiv(N, TPB);
naiveRowNormKernel<Type><<<nblks, TPB, 0, stream>>>(dots, data, D, N, type, do_sqrt);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename T, typename IdxT>
class RowNormTest : public ::testing::TestWithParam<NormInputs<T, IdxT>> {
public:
RowNormTest()
: params(::testing::TestWithParam<NormInputs<T, IdxT>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
data(params.rows * params.cols, stream),
dots_exp(params.rows, stream),
dots_act(params.rows, stream)
{
}
void SetUp() override
{
raft::random::RngState r(params.seed);
IdxT rows = params.rows, cols = params.cols, len = rows * cols;
uniform(handle, r, data.data(), len, T(-1.0), T(1.0));
naiveRowNorm(dots_exp.data(), data.data(), cols, rows, params.type, params.do_sqrt, stream);
auto output_view = raft::make_device_vector_view<T, IdxT>(dots_act.data(), params.rows);
auto input_row_major = raft::make_device_matrix_view<const T, IdxT, raft::row_major>(
data.data(), params.rows, params.cols);
auto input_col_major = raft::make_device_matrix_view<const T, IdxT, raft::col_major>(
data.data(), params.rows, params.cols);
if (params.do_sqrt) {
if (params.rowMajor) {
norm(handle, input_row_major, output_view, params.type, Apply::ALONG_ROWS, raft::sqrt_op{});
} else {
norm(handle, input_col_major, output_view, params.type, Apply::ALONG_ROWS, raft::sqrt_op{});
}
} else {
if (params.rowMajor) {
norm(handle, input_row_major, output_view, params.type, Apply::ALONG_ROWS);
} else {
norm(handle, input_col_major, output_view, params.type, Apply::ALONG_ROWS);
}
}
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
NormInputs<T, IdxT> params;
rmm::device_uvector<T> data, dots_exp, dots_act;
};
///// Column-wise norm test definitisons
template <typename Type, typename IdxT>
RAFT_KERNEL naiveColNormKernel(
Type* dots, const Type* data, IdxT D, IdxT N, NormType type, bool do_sqrt)
{
IdxT colID = threadIdx.x + static_cast<IdxT>(blockIdx.x) * blockDim.x;
if (colID >= D) return; // avoid out-of-bounds thread
Type acc = 0;
for (IdxT i = 0; i < N; i++) {
Type v = data[colID + i * D];
acc += type == L2Norm ? v * v : raft::abs(v);
}
dots[colID] = do_sqrt ? raft::sqrt(acc) : acc;
}
template <typename Type, typename IdxT>
void naiveColNorm(
Type* dots, const Type* data, IdxT D, IdxT N, NormType type, bool do_sqrt, cudaStream_t stream)
{
static const IdxT TPB = 64;
IdxT nblks = raft::ceildiv(D, TPB);
naiveColNormKernel<Type><<<nblks, TPB, 0, stream>>>(dots, data, D, N, type, do_sqrt);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename T, typename IdxT>
class ColNormTest : public ::testing::TestWithParam<NormInputs<T, IdxT>> {
public:
ColNormTest()
: params(::testing::TestWithParam<NormInputs<T, IdxT>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
data(params.rows * params.cols, stream),
dots_exp(params.cols, stream),
dots_act(params.cols, stream)
{
}
void SetUp() override
{
raft::random::RngState r(params.seed);
IdxT rows = params.rows, cols = params.cols, len = rows * cols;
uniform(handle, r, data.data(), len, T(-1.0), T(1.0));
naiveColNorm(dots_exp.data(), data.data(), cols, rows, params.type, params.do_sqrt, stream);
auto output_view = raft::make_device_vector_view<T, IdxT>(dots_act.data(), params.cols);
auto input_row_major = raft::make_device_matrix_view<const T, IdxT, raft::row_major>(
data.data(), params.rows, params.cols);
auto input_col_major = raft::make_device_matrix_view<const T, IdxT, raft::col_major>(
data.data(), params.rows, params.cols);
if (params.do_sqrt) {
if (params.rowMajor) {
norm(
handle, input_row_major, output_view, params.type, Apply::ALONG_COLUMNS, raft::sqrt_op{});
} else {
norm(
handle, input_col_major, output_view, params.type, Apply::ALONG_COLUMNS, raft::sqrt_op{});
}
} else {
if (params.rowMajor) {
norm(handle, input_row_major, output_view, params.type, Apply::ALONG_COLUMNS);
} else {
norm(handle, input_col_major, output_view, params.type, Apply::ALONG_COLUMNS);
}
}
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
NormInputs<T, IdxT> params;
rmm::device_uvector<T> data, dots_exp, dots_act;
};
///// Row- and column-wise tests
const std::vector<NormInputs<float, int>> inputsf_i32 =
raft::util::itertools::product<NormInputs<float, int>>(
{0.00001f}, {11, 1234}, {7, 33, 128, 500}, {L1Norm, L2Norm}, {false, true}, {true}, {1234ULL});
const std::vector<NormInputs<double, int>> inputsd_i32 =
raft::util::itertools::product<NormInputs<double, int>>({0.00000001},
{11, 1234},
{7, 33, 128, 500},
{L1Norm, L2Norm},
{false, true},
{true},
{1234ULL});
const std::vector<NormInputs<float, int64_t>> inputsf_i64 =
raft::util::itertools::product<NormInputs<float, int64_t>>(
{0.00001f}, {11, 1234}, {7, 33, 128, 500}, {L1Norm, L2Norm}, {false, true}, {true}, {1234ULL});
const std::vector<NormInputs<double, int64_t>> inputsd_i64 =
raft::util::itertools::product<NormInputs<double, int64_t>>({0.00000001},
{11, 1234},
{7, 33, 128, 500},
{L1Norm, L2Norm},
{false, true},
{true},
{1234ULL});
const std::vector<NormInputs<float, int>> inputscf_i32 =
raft::util::itertools::product<NormInputs<float, int>>(
{0.00001f}, {7, 33, 128, 500}, {11, 1234}, {L1Norm, L2Norm}, {false, true}, {true}, {1234ULL});
const std::vector<NormInputs<double, int>> inputscd_i32 =
raft::util::itertools::product<NormInputs<double, int>>({0.00000001},
{7, 33, 128, 500},
{11, 1234},
{L1Norm, L2Norm},
{false, true},
{true},
{1234ULL});
const std::vector<NormInputs<float, int64_t>> inputscf_i64 =
raft::util::itertools::product<NormInputs<float, int64_t>>(
{0.00001f}, {7, 33, 128, 500}, {11, 1234}, {L1Norm, L2Norm}, {false, true}, {true}, {1234ULL});
const std::vector<NormInputs<double, int64_t>> inputscd_i64 =
raft::util::itertools::product<NormInputs<double, int64_t>>({0.00000001},
{7, 33, 128, 500},
{11, 1234},
{L1Norm, L2Norm},
{false, true},
{true},
{1234ULL});
typedef RowNormTest<float, int> RowNormTestF_i32;
typedef RowNormTest<double, int> RowNormTestD_i32;
typedef RowNormTest<float, int64_t> RowNormTestF_i64;
typedef RowNormTest<double, int64_t> RowNormTestD_i64;
typedef ColNormTest<float, int> ColNormTestF_i32;
typedef ColNormTest<double, int> ColNormTestD_i32;
typedef ColNormTest<float, int64_t> ColNormTestF_i64;
typedef ColNormTest<double, int64_t> ColNormTestD_i64;
#define ROWNORM_TEST(test_type, test_inputs) \
TEST_P(test_type, Result) \
{ \
ASSERT_TRUE(raft::devArrMatch( \
dots_exp.data(), dots_act.data(), dots_exp.size(), raft::CompareApprox(params.tolerance))); \
} \
INSTANTIATE_TEST_CASE_P(RowNormTests, test_type, ::testing::ValuesIn(test_inputs))
ROWNORM_TEST(RowNormTestF_i32, inputsf_i32);
ROWNORM_TEST(RowNormTestD_i32, inputsd_i32);
ROWNORM_TEST(RowNormTestF_i64, inputsf_i64);
ROWNORM_TEST(RowNormTestD_i64, inputsd_i64);
ROWNORM_TEST(ColNormTestF_i32, inputscf_i32);
ROWNORM_TEST(ColNormTestD_i32, inputscd_i32);
ROWNORM_TEST(ColNormTestF_i64, inputscf_i64);
ROWNORM_TEST(ColNormTestD_i64, inputscd_i64);
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/reduce.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include "reduce.cuh"
#include <gtest/gtest.h>
#include <raft/core/detail/macros.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/reduce.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/itertools.hpp>
namespace raft {
namespace linalg {
template <typename InType, typename OutType, typename IdxType>
struct ReduceInputs {
OutType tolerance;
IdxType rows, cols;
bool rowMajor, alongRows;
OutType init;
unsigned long long int seed;
};
template <typename InType, typename OutType, typename IdxType>
::std::ostream& operator<<(::std::ostream& os, const ReduceInputs<InType, OutType, IdxType>& dims)
{
os << "{ " << dims.tolerance << ", " << dims.rows << ", " << dims.cols << ", " << dims.rowMajor
<< ", " << dims.alongRows << ", " << dims.init << " " << dims.seed << '}';
return os;
}
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename InType,
typename OutType,
typename IdxType,
typename MainLambda,
typename ReduceLambda,
typename FinalLambda>
void reduceLaunch(OutType* dots,
const InType* data,
IdxType cols,
IdxType rows,
bool rowMajor,
bool alongRows,
OutType init,
bool inplace,
cudaStream_t stream,
MainLambda main_op,
ReduceLambda reduce_op,
FinalLambda final_op)
{
Apply apply = alongRows ? Apply::ALONG_ROWS : Apply::ALONG_COLUMNS;
IdxType output_size = alongRows ? cols : rows;
auto output_view = raft::make_device_vector_view(dots, output_size);
auto input_view_row_major = raft::make_device_matrix_view(data, rows, cols);
auto input_view_col_major =
raft::make_device_matrix_view<const InType, IdxType, raft::col_major>(data, rows, cols);
raft::resources handle;
resource::set_cuda_stream(handle, stream);
if (rowMajor) {
reduce(handle,
input_view_row_major,
output_view,
init,
apply,
inplace,
main_op,
reduce_op,
final_op);
} else {
reduce(handle,
input_view_col_major,
output_view,
init,
apply,
inplace,
main_op,
reduce_op,
final_op);
}
}
template <typename InType,
typename OutType,
typename IdxType,
typename MainLambda = raft::sq_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::sqrt_op>
class ReduceTest : public ::testing::TestWithParam<ReduceInputs<InType, OutType, IdxType>> {
public:
ReduceTest()
: params(::testing::TestWithParam<ReduceInputs<InType, OutType, IdxType>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
data(params.rows * params.cols, stream),
dots_exp(params.alongRows ? params.rows : params.cols, stream),
dots_act(params.alongRows ? params.rows : params.cols, stream)
{
}
protected:
void SetUp() override
{
raft::random::RngState r(params.seed);
IdxType rows = params.rows, cols = params.cols;
IdxType len = rows * cols;
gen_uniform(handle, data.data(), r, len);
MainLambda main_op;
ReduceLambda reduce_op;
FinalLambda fin_op;
// For both the naive and the actual implementation, execute first with inplace=false then true
naiveReduction(dots_exp.data(),
data.data(),
cols,
rows,
params.rowMajor,
params.alongRows,
stream,
params.init,
false,
main_op,
reduce_op,
fin_op);
naiveReduction(dots_exp.data(),
data.data(),
cols,
rows,
params.rowMajor,
params.alongRows,
stream,
params.init,
true,
main_op,
reduce_op,
fin_op);
reduceLaunch(dots_act.data(),
data.data(),
cols,
rows,
params.rowMajor,
params.alongRows,
params.init,
false,
stream,
main_op,
reduce_op,
fin_op);
reduceLaunch(dots_act.data(),
data.data(),
cols,
rows,
params.rowMajor,
params.alongRows,
params.init,
true,
stream,
main_op,
reduce_op,
fin_op);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
ReduceInputs<InType, OutType, IdxType> params;
rmm::device_uvector<InType> data;
rmm::device_uvector<OutType> dots_exp, dots_act;
};
#define REDUCE_TEST(test_type, test_name, test_inputs) \
typedef RAFT_DEPAREN(test_type) test_name; \
TEST_P(test_name, Result) \
{ \
ASSERT_TRUE(raft::devArrMatch( \
dots_exp.data(), dots_act.data(), dots_exp.size(), raft::CompareApprox(params.tolerance))); \
} \
INSTANTIATE_TEST_CASE_P(ReduceTests, test_name, ::testing::ValuesIn(test_inputs))
const std::vector<ReduceInputs<float, float, int>> inputsff_i32 =
raft::util::itertools::product<ReduceInputs<float, float, int>>(
{0.000002f}, {11, 1234}, {7, 33, 128, 500}, {true, false}, {true, false}, {0.0f}, {1234ULL});
const std::vector<ReduceInputs<double, double, int>> inputsdd_i32 =
raft::util::itertools::product<ReduceInputs<double, double, int>>(
{0.000000001}, {11, 1234}, {7, 33, 128, 500}, {true, false}, {true, false}, {0.0}, {1234ULL});
const std::vector<ReduceInputs<float, double, int>> inputsfd_i32 =
raft::util::itertools::product<ReduceInputs<float, double, int>>(
{0.000000001}, {11, 1234}, {7, 33, 128, 500}, {true, false}, {true, false}, {0.0f}, {1234ULL});
const std::vector<ReduceInputs<float, float, uint32_t>> inputsff_u32 =
raft::util::itertools::product<ReduceInputs<float, float, uint32_t>>({0.000002f},
{11u, 1234u},
{7u, 33u, 128u, 500u},
{true, false},
{true, false},
{0.0f},
{1234ULL});
const std::vector<ReduceInputs<float, float, int64_t>> inputsff_i64 =
raft::util::itertools::product<ReduceInputs<float, float, int64_t>>(
{0.000002f}, {11, 1234}, {7, 33, 128, 500}, {true, false}, {true, false}, {0.0f}, {1234ULL});
REDUCE_TEST((ReduceTest<float, float, int>), ReduceTestFFI32, inputsff_i32);
REDUCE_TEST((ReduceTest<double, double, int>), ReduceTestDDI32, inputsdd_i32);
REDUCE_TEST((ReduceTest<float, double, int>), ReduceTestFDI32, inputsfd_i32);
REDUCE_TEST((ReduceTest<float, float, uint32_t>), ReduceTestFFU32, inputsff_u32);
REDUCE_TEST((ReduceTest<float, float, int64_t>), ReduceTestFFI64, inputsff_i64);
// The following test cases are for "thick" coalesced reductions
const std::vector<ReduceInputs<float, float, int>> inputsff_thick_i32 =
raft::util::itertools::product<ReduceInputs<float, float, int>>(
{0.0001f}, {3, 9}, {17771, 33333, 100000}, {true}, {true}, {0.0f}, {1234ULL});
const std::vector<ReduceInputs<double, double, int>> inputsdd_thick_i32 =
raft::util::itertools::product<ReduceInputs<double, double, int>>(
{0.000001}, {3, 9}, {17771, 33333, 100000}, {true}, {true}, {0.0}, {1234ULL});
const std::vector<ReduceInputs<float, double, int>> inputsfd_thick_i32 =
raft::util::itertools::product<ReduceInputs<float, double, int>>(
{0.000001}, {3, 9}, {17771, 33333, 100000}, {true}, {true}, {0.0f}, {1234ULL});
const std::vector<ReduceInputs<float, float, uint32_t>> inputsff_thick_u32 =
raft::util::itertools::product<ReduceInputs<float, float, uint32_t>>(
{0.0001f}, {3u, 9u}, {17771u, 33333u, 100000u}, {true}, {true}, {0.0f}, {1234ULL});
const std::vector<ReduceInputs<float, float, int64_t>> inputsff_thick_i64 =
raft::util::itertools::product<ReduceInputs<float, float, int64_t>>(
{0.0001f}, {3, 9}, {17771, 33333, 100000}, {true}, {true}, {0.0f}, {1234ULL});
REDUCE_TEST((ReduceTest<float, float, int>), ReduceTestFFI32Thick, inputsff_thick_i32);
REDUCE_TEST((ReduceTest<double, double, int>), ReduceTestDDI32Thick, inputsdd_thick_i32);
REDUCE_TEST((ReduceTest<float, double, int>), ReduceTestFDI32Thick, inputsfd_thick_i32);
REDUCE_TEST((ReduceTest<float, float, uint32_t>), ReduceTestFFU32Thick, inputsff_thick_u32);
REDUCE_TEST((ReduceTest<float, float, int64_t>), ReduceTestFFI64Thick, inputsff_thick_i64);
// Test key-value-pair reductions. This is important because shuffle intrinsics can't be used
// directly with those types.
template <typename T, typename IdxT = int>
struct ValueToKVP {
HDI raft::KeyValuePair<IdxT, T> operator()(T value, IdxT idx) { return {idx, value}; }
};
template <typename T1, typename T2>
struct ArgMaxOp {
HDI raft::KeyValuePair<T1, T2> operator()(raft::KeyValuePair<T1, T2> a,
raft::KeyValuePair<T1, T2> b)
{
return (a.value > b.value || (a.value == b.value && a.key <= b.key)) ? a : b;
}
};
const std::vector<ReduceInputs<short, raft::KeyValuePair<int, short>, int>> inputs_kvpis_i32 =
raft::util::itertools::product<ReduceInputs<short, raft::KeyValuePair<int, short>, int>>(
{raft::KeyValuePair{0, short(0)}},
{11, 1234},
{7, 33, 128, 500},
{true},
{true},
{raft::KeyValuePair{0, short(0)}},
{1234ULL});
const std::vector<ReduceInputs<float, raft::KeyValuePair<int, float>, int>> inputs_kvpif_i32 =
raft::util::itertools::product<ReduceInputs<float, raft::KeyValuePair<int, float>, int>>(
{raft::KeyValuePair{0, 0.0001f}},
{11, 1234},
{7, 33, 128, 500},
{true},
{true},
{raft::KeyValuePair{0, 0.0f}},
{1234ULL});
const std::vector<ReduceInputs<double, raft::KeyValuePair<int, double>, int>> inputs_kvpid_i32 =
raft::util::itertools::product<ReduceInputs<double, raft::KeyValuePair<int, double>, int>>(
{raft::KeyValuePair{0, 0.000001}},
{11, 1234},
{7, 33, 128, 500},
{true},
{true},
{raft::KeyValuePair{0, 0.0}},
{1234ULL});
REDUCE_TEST((ReduceTest<short,
raft::KeyValuePair<int, short>,
int,
ValueToKVP<short, int>,
ArgMaxOp<int, short>,
raft::identity_op>),
ReduceTestKVPISI32,
inputs_kvpis_i32);
REDUCE_TEST((ReduceTest<float,
raft::KeyValuePair<int, float>,
int,
ValueToKVP<float, int>,
ArgMaxOp<int, float>,
raft::identity_op>),
ReduceTestKVPIFI32,
inputs_kvpif_i32);
REDUCE_TEST((ReduceTest<double,
raft::KeyValuePair<int, double>,
int,
ValueToKVP<double, int>,
ArgMaxOp<int, double>,
raft::identity_op>),
ReduceTestKVPIDI32,
inputs_kvpid_i32);
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/binary_op.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../test_utils.cuh"
#include <raft/linalg/binary_op.cuh>
#include <raft/util/cuda_utils.cuh>
namespace raft {
namespace linalg {
template <typename InType, typename OutType, typename IdxType>
RAFT_KERNEL naiveAddKernel(OutType* out, const InType* in1, const InType* in2, IdxType len)
{
IdxType idx = threadIdx.x + ((IdxType)blockIdx.x * (IdxType)blockDim.x);
if (idx < len) { out[idx] = static_cast<OutType>(in1[idx] + in2[idx]); }
}
template <typename InType, typename IdxType = int, typename OutType = InType>
void naiveAdd(OutType* out, const InType* in1, const InType* in2, IdxType len)
{
static const IdxType TPB = 64;
IdxType nblks = raft::ceildiv(len, TPB);
naiveAddKernel<InType, OutType, IdxType><<<nblks, TPB>>>(out, in1, in2, len);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename InType, typename IdxType = int, typename OutType = InType>
struct BinaryOpInputs {
InType tolerance;
IdxType len;
unsigned long long int seed;
};
template <typename InType, typename IdxType = int, typename OutType = InType>
::std::ostream& operator<<(::std::ostream& os, const BinaryOpInputs<InType, IdxType, OutType>& d)
{
return os;
}
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/reduce_rows_by_key.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <iostream>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/reduce_rows_by_key.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename Type>
RAFT_KERNEL naiveReduceRowsByKeyKernel(const Type* d_A,
int lda,
uint32_t* d_keys,
const Type* d_weight,
char* d_char_keys,
int nrows,
int ncols,
int nkeys,
Type* d_sums)
{
int c = threadIdx.x + blockIdx.x * blockDim.x;
if (c >= ncols) return;
int this_key = threadIdx.y + blockIdx.y * blockDim.y;
Type sum = 0.0;
for (int r = 0; r < nrows; r++) {
if (this_key != d_keys[r]) continue;
Type wt = 1;
if (d_weight) wt = d_weight[r];
sum += d_A[lda * r + c] * wt;
}
d_sums[this_key * ncols + c] = sum;
}
template <typename Type>
void naiveReduceRowsByKey(const Type* d_A,
int lda,
uint32_t* d_keys,
const Type* d_weight,
char* d_char_keys,
int nrows,
int ncols,
int nkeys,
Type* d_sums,
cudaStream_t stream)
{
cudaMemset(d_sums, 0, sizeof(Type) * nkeys * ncols);
naiveReduceRowsByKeyKernel<<<dim3((ncols + 31) / 32, nkeys), dim3(32, 1), 0, stream>>>(
d_A, lda, d_keys, d_weight, d_char_keys, nrows, ncols, nkeys, d_sums);
}
template <typename T>
struct ReduceRowsInputs {
T tolerance;
int nobs;
uint32_t cols;
uint32_t nkeys;
unsigned long long int seed;
bool weighted;
T max_weight;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const ReduceRowsInputs<T>& dims)
{
return os;
}
template <typename T>
class ReduceRowTest : public ::testing::TestWithParam<ReduceRowsInputs<T>> {
public:
ReduceRowTest()
: params(::testing::TestWithParam<ReduceRowsInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in(params.nobs * params.cols, stream),
out(params.nkeys * params.cols, stream),
out_ref(params.nkeys * params.cols, stream),
keys(params.nobs, stream),
scratch_buf(params.nobs, stream)
{
}
protected:
void SetUp() override
{
raft::random::RngState r(params.seed);
raft::random::RngState r_int(params.seed);
uint32_t nobs = params.nobs;
uint32_t cols = params.cols;
uint32_t nkeys = params.nkeys;
uniform(handle, r, in.data(), nobs * cols, T(0.0), T(2.0 / nobs));
uniformInt(handle, r_int, keys.data(), nobs, (uint32_t)0, nkeys);
rmm::device_uvector<T> weight(0, stream);
if (params.weighted) {
weight.resize(nobs, stream);
raft::random::RngState r(params.seed);
uniform(handle, r, weight.data(), nobs, T(1), params.max_weight);
}
naiveReduceRowsByKey(in.data(),
cols,
keys.data(),
params.weighted ? weight.data() : nullptr,
scratch_buf.data(),
nobs,
cols,
nkeys,
out_ref.data(),
stream);
auto input_view = raft::make_device_matrix_view<const T>(
in.data(), params.cols, static_cast<uint32_t>(params.nobs));
auto output_view = raft::make_device_matrix_view(out.data(), params.cols, params.nkeys);
auto keys_view = raft::make_device_vector_view<const uint32_t>(
keys.data(), static_cast<uint32_t>(params.nobs));
auto scratch_buf_view =
raft::make_device_vector_view(scratch_buf.data(), static_cast<uint32_t>(params.nobs));
std::optional<raft::device_vector_view<const T>> weights_view;
if (params.weighted) {
weights_view.emplace(weight.data(), static_cast<uint32_t>(params.nobs));
}
reduce_rows_by_key(
handle, input_view, keys_view, output_view, params.nkeys, scratch_buf_view, weights_view);
resource::sync_stream(handle, stream);
}
protected:
ReduceRowsInputs<T> params;
raft::resources handle;
cudaStream_t stream = 0;
int device_count = 0;
rmm::device_uvector<T> in, out, out_ref;
rmm::device_uvector<uint32_t> keys;
rmm::device_uvector<char> scratch_buf;
};
// ReduceRowTestF
// 128 Obs, 32 cols, 6 clusters
const std::vector<ReduceRowsInputs<float>> inputsf2 = {{0.000001f, 128, 32, 6, 1234ULL, false},
{0.000001f, 128, 32, 6, 1234ULL, true, 1.0},
{0.000001f, 128, 32, 6, 1234ULL, true, 2.0}};
typedef ReduceRowTest<float> ReduceRowTestF;
TEST_P(ReduceRowTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(out_ref.data(),
out.data(),
params.cols * params.nkeys,
raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestF, ::testing::ValuesIn(inputsf2));
// ReduceRowTestD
// 128 Obs, 32 cols, 6 clusters, double precision
const std::vector<ReduceRowsInputs<double>> inputsd2 = {
{0.00000001, 128, 32, 6, 1234ULL, false},
{0.00000001, 128, 32, 6, 1234ULL, true, 2.0},
{0.00000001, 128, 32, 6, 1234ULL, true, 8.0}};
typedef ReduceRowTest<double> ReduceRowTestD;
TEST_P(ReduceRowTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(out_ref.data(),
out.data(),
params.cols * params.nkeys,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestD, ::testing::ValuesIn(inputsd2));
// ReduceRowTestSmallnKey
// 128 Obs, 32 cols, 3 clusters
const std::vector<ReduceRowsInputs<float>> inputsf_small_nkey = {
{0.000001f, 128, 32, 3, 1234ULL, false},
{0.000001f, 128, 32, 3, 1234ULL, true, 5.0},
{0.000001f, 128, 32, 3, 1234ULL, true, 8.0}};
typedef ReduceRowTest<float> ReduceRowTestSmallnKey;
TEST_P(ReduceRowTestSmallnKey, Result)
{
ASSERT_TRUE(raft::devArrMatch(out_ref.data(),
out.data(),
params.cols * params.nkeys,
raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests,
ReduceRowTestSmallnKey,
::testing::ValuesIn(inputsf_small_nkey));
// ReduceRowTestBigSpace
// 512 Obs, 1024 cols, 32 clusters, double precision
const std::vector<ReduceRowsInputs<double>> inputsd_big_space = {
{0.00000001, 512, 1024, 40, 1234ULL, false},
{0.00000001, 512, 1024, 40, 1234ULL, true, 4.0},
{0.00000001, 512, 1024, 40, 1234ULL, true, 16.0}};
typedef ReduceRowTest<double> ReduceRowTestBigSpace;
TEST_P(ReduceRowTestBigSpace, Result)
{
ASSERT_TRUE(raft::devArrMatch(out_ref.data(),
out.data(),
params.cols * params.nkeys,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests,
ReduceRowTestBigSpace,
::testing::ValuesIn(inputsd_big_space));
// ReduceRowTestManyObs
// 100000 Obs, 37 cols, 32 clusters
const std::vector<ReduceRowsInputs<float>> inputsf_many_obs = {
{0.00001f, 100000, 37, 32, 1234ULL, false},
{0.00001f, 100000, 37, 32, 1234ULL, true, 4.0},
{0.00001f, 100000, 37, 32, 1234ULL, true, 16.0}};
typedef ReduceRowTest<float> ReduceRowTestManyObs;
TEST_P(ReduceRowTestManyObs, Result)
{
ASSERT_TRUE(raft::devArrMatch(out_ref.data(),
out.data(),
params.cols * params.nkeys,
raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests,
ReduceRowTestManyObs,
::testing::ValuesIn(inputsf_many_obs));
// ReduceRowTestManyClusters
// 100000 Obs, 37 cols, 2048 clusters
const std::vector<ReduceRowsInputs<float>> inputsf_many_cluster = {
{0.00001f, 100000, 37, 2048, 1234ULL, false},
{0.00001f, 100000, 37, 2048, 1234ULL, true, 32.0},
{0.00001f, 100000, 37, 2048, 1234ULL, true, 16.0}};
typedef ReduceRowTest<float> ReduceRowTestManyClusters;
TEST_P(ReduceRowTestManyClusters, Result)
{
ASSERT_TRUE(raft::devArrMatch(out_ref.data(),
out.data(),
params.cols * params.nkeys,
raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ReduceRowTests,
ReduceRowTestManyClusters,
::testing::ValuesIn(inputsf_many_cluster));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/mean_squared_error.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/mean_squared_error.cuh>
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_scalar.hpp>
namespace raft {
namespace linalg {
// reference MSE calculation
template <typename T>
RAFT_KERNEL naiveMeanSquaredError(const int n, const T* a, const T* b, T weight, T* out)
{
T err = 0;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
T diff = a[i] - b[i];
err += weight * diff * diff / n;
}
atomicAdd(out, err);
}
template <typename T, typename IndexType = std::uint32_t>
struct MeanSquaredErrorInputs {
T tolerance;
IndexType len;
T weight;
unsigned long long int seed;
};
template <typename T>
class MeanSquaredErrorTest : public ::testing::TestWithParam<MeanSquaredErrorInputs<T>> {
protected:
MeanSquaredErrorInputs<T> params;
raft::resources handle;
rmm::device_scalar<T> output;
rmm::device_scalar<T> refoutput;
public:
MeanSquaredErrorTest()
: testing::TestWithParam<MeanSquaredErrorInputs<T>>(),
output(0, resource::get_cuda_stream(handle)),
refoutput(0, resource::get_cuda_stream(handle))
{
resource::sync_stream(handle);
}
protected:
void SetUp() override
{
params = ::testing::TestWithParam<MeanSquaredErrorInputs<T>>::GetParam();
cudaStream_t stream = resource::get_cuda_stream(handle);
raft::random::RngState r(params.seed);
rmm::device_uvector<T> a(params.len, stream);
rmm::device_uvector<T> b(params.len, stream);
uniform(handle, r, a.data(), params.len, T(-1.0), T(1.0));
uniform(handle, r, b.data(), params.len, T(-1.0), T(1.0));
resource::sync_stream(handle);
mean_squared_error<T, std::uint32_t, T>(handle,
make_device_vector_view<const T>(a.data(), params.len),
make_device_vector_view<const T>(b.data(), params.len),
make_device_scalar_view<T>(output.data()),
params.weight);
naiveMeanSquaredError<<<256, 256, 0, stream>>>(
params.len, a.data(), b.data(), params.weight, refoutput.data());
resource::sync_stream(handle);
}
void TearDown() override {}
};
const std::vector<MeanSquaredErrorInputs<float>> inputsf = {
{0.0001f, 1024 * 1024, 1.0, 1234ULL},
{0.0001f, 4 * 1024 * 1024, 8.0, 1234ULL},
{0.0001f, 16 * 1024 * 1024, 24.0, 1234ULL},
{0.0001f, 98689, 1.0, 1234ULL},
};
const std::vector<MeanSquaredErrorInputs<double>> inputsd = {
{0.0001f, 1024 * 1024, 1.0, 1234ULL},
{0.0001f, 4 * 1024 * 1024, 8.0, 1234ULL},
{0.0001f, 16 * 1024 * 1024, 24.0, 1234ULL},
{0.0001f, 98689, 1.0, 1234ULL},
};
typedef MeanSquaredErrorTest<float> MeanSquaredErrorTestF;
TEST_P(MeanSquaredErrorTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(
refoutput.data(), output.data(), 1, raft::CompareApprox<float>(params.tolerance)));
}
typedef MeanSquaredErrorTest<double> MeanSquaredErrorTestD;
TEST_P(MeanSquaredErrorTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(
refoutput.data(), output.data(), 1, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MeanSquaredErrorTests,
MeanSquaredErrorTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(MeanSquaredErrorTests,
MeanSquaredErrorTestD,
::testing::ValuesIn(inputsd));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/multiply.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include "unary_op.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/multiply.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename T>
class MultiplyTest : public ::testing::TestWithParam<UnaryOpInputs<T>> {
public:
MultiplyTest()
: params(::testing::TestWithParam<UnaryOpInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in(params.len, stream),
out_ref(params.len, stream),
out(params.len, stream)
{
}
protected:
void SetUp() override
{
params = ::testing::TestWithParam<UnaryOpInputs<T>>::GetParam();
raft::random::RngState r(params.seed);
int len = params.len;
uniform(handle, r, in.data(), len, T(-1.0), T(1.0));
naiveScale(out_ref.data(), in.data(), params.scalar, len, stream);
auto out_view = raft::make_device_vector_view(out.data(), len);
auto in_view = raft::make_device_vector_view<const T>(in.data(), len);
auto scalar_view = raft::make_host_scalar_view<const T>(¶ms.scalar);
multiply_scalar(handle, in_view, out_view, scalar_view);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
UnaryOpInputs<T> params;
rmm::device_uvector<T> in, out_ref, out;
};
const std::vector<UnaryOpInputs<float>> inputsf = {{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
typedef MultiplyTest<float> MultiplyTestF;
TEST_P(MultiplyTestF, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MultiplyTests, MultiplyTestF, ::testing::ValuesIn(inputsf));
typedef MultiplyTest<double> MultiplyTestD;
const std::vector<UnaryOpInputs<double>> inputsd = {{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
TEST_P(MultiplyTestD, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MultiplyTests, MultiplyTestD, ::testing::ValuesIn(inputsd));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/binary_op.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include "binary_op.cuh"
#include <gtest/gtest.h>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/binary_op.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace linalg {
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename InType, typename IdxType, typename OutType>
void binaryOpLaunch(
const raft::resources& handle, OutType* out, const InType* in1, const InType* in2, IdxType len)
{
auto out_view = raft::make_device_vector_view(out, len);
auto in1_view = raft::make_device_vector_view(in1, len);
auto in2_view = raft::make_device_vector_view(in2, len);
binary_op(handle, in1_view, in2_view, out_view, raft::add_op{});
}
template <typename InType, typename IdxType, typename OutType = InType>
class BinaryOpTest : public ::testing::TestWithParam<BinaryOpInputs<InType, IdxType, OutType>> {
public:
BinaryOpTest()
: params(::testing::TestWithParam<BinaryOpInputs<InType, IdxType, OutType>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in1(params.len, stream),
in2(params.len, stream),
out_ref(params.len, stream),
out(params.len, stream)
{
}
protected:
void SetUp() override
{
raft::random::RngState r(params.seed);
IdxType len = params.len;
uniform(handle, r, in1.data(), len, InType(-1.0), InType(1.0));
uniform(handle, r, in2.data(), len, InType(-1.0), InType(1.0));
naiveAdd(out_ref.data(), in1.data(), in2.data(), len);
binaryOpLaunch(handle, out.data(), in1.data(), in2.data(), len);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
BinaryOpInputs<InType, IdxType, OutType> params;
rmm::device_uvector<InType> in1;
rmm::device_uvector<InType> in2;
rmm::device_uvector<OutType> out_ref;
rmm::device_uvector<OutType> out;
};
const std::vector<BinaryOpInputs<float, int>> inputsf_i32 = {{0.000001f, 1024 * 1024, 1234ULL}};
typedef BinaryOpTest<float, int> BinaryOpTestF_i32;
TEST_P(BinaryOpTestF_i32, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, CompareApprox<float>(params.tolerance), stream));
}
INSTANTIATE_TEST_SUITE_P(BinaryOpTests, BinaryOpTestF_i32, ::testing::ValuesIn(inputsf_i32));
const std::vector<BinaryOpInputs<float, size_t>> inputsf_i64 = {{0.000001f, 1024 * 1024, 1234ULL}};
typedef BinaryOpTest<float, size_t> BinaryOpTestF_i64;
TEST_P(BinaryOpTestF_i64, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, CompareApprox<float>(params.tolerance), stream));
}
INSTANTIATE_TEST_SUITE_P(BinaryOpTests, BinaryOpTestF_i64, ::testing::ValuesIn(inputsf_i64));
const std::vector<BinaryOpInputs<float, int, double>> inputsf_i32_d = {
{0.000001f, 1024 * 1024, 1234ULL}};
typedef BinaryOpTest<float, int, double> BinaryOpTestF_i32_D;
TEST_P(BinaryOpTestF_i32_D, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, CompareApprox<double>(params.tolerance), stream));
}
INSTANTIATE_TEST_SUITE_P(BinaryOpTests, BinaryOpTestF_i32_D, ::testing::ValuesIn(inputsf_i32_d));
const std::vector<BinaryOpInputs<double, int>> inputsd_i32 = {{0.00000001, 1024 * 1024, 1234ULL}};
typedef BinaryOpTest<double, int> BinaryOpTestD_i32;
TEST_P(BinaryOpTestD_i32, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, CompareApprox<double>(params.tolerance), stream));
}
INSTANTIATE_TEST_SUITE_P(BinaryOpTests, BinaryOpTestD_i32, ::testing::ValuesIn(inputsd_i32));
const std::vector<BinaryOpInputs<double, size_t>> inputsd_i64 = {
{0.00000001, 1024 * 1024, 1234ULL}};
typedef BinaryOpTest<double, size_t> BinaryOpTestD_i64;
TEST_P(BinaryOpTestD_i64, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, CompareApprox<double>(params.tolerance), stream));
}
INSTANTIATE_TEST_SUITE_P(BinaryOpTests, BinaryOpTestD_i64, ::testing::ValuesIn(inputsd_i64));
template <typename math_t>
class BinaryOpAlignment : public ::testing::Test {
protected:
public:
void Misaligned()
{
auto stream = resource::get_cuda_stream(handle);
// Test to trigger cudaErrorMisalignedAddress if veclen is incorrectly
// chosen.
int n = 1024;
rmm::device_uvector<math_t> x(n, stream);
rmm::device_uvector<math_t> y(n, stream);
rmm::device_uvector<math_t> z(n, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(x.data(), 0, n * sizeof(math_t), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(y.data(), 0, n * sizeof(math_t), stream));
raft::linalg::binaryOp(z.data() + 9,
x.data() + 137,
y.data() + 19,
256,
raft::add_op{},
resource::get_cuda_stream(handle));
}
raft::resources handle;
};
typedef ::testing::Types<float, double> FloatTypes;
TYPED_TEST_CASE(BinaryOpAlignment, FloatTypes);
TYPED_TEST(BinaryOpAlignment, Misaligned) { this->Misaligned(); }
} // namespace linalg
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/matrix_vector_op.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include "matrix_vector_op.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/itertools.hpp>
#include <type_traits>
namespace raft {
namespace linalg {
template <typename IdxType = int>
struct MatVecOpInputs {
IdxType rows, cols;
bool rowMajor, bcastAlongRows;
IdxType inAlignOffset, outAlignOffset;
unsigned long long int seed;
};
template <typename IdxType>
::std::ostream& operator<<(::std::ostream& os, const MatVecOpInputs<IdxType>& dims)
{
return os;
}
template <typename T, typename LenT>
inline void gen_uniform(const raft::resources& handle,
raft::random::RngState& rng,
T* ptr,
LenT len)
{
if constexpr (std::is_integral_v<T>) {
raft::random::uniformInt(handle, rng, ptr, len, (T)0, (T)100);
} else {
raft::random::uniform(handle, rng, ptr, len, (T)-10.0, (T)10.0);
}
}
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename OpT, typename MatT, typename IdxType, typename Vec1T, typename Vec2T>
void matrixVectorOpLaunch(const raft::resources& handle,
MatT* out,
const MatT* in,
const Vec1T* vec1,
const Vec2T* vec2,
IdxType D,
IdxType N,
bool rowMajor,
bool bcastAlongRows)
{
auto out_row_major = raft::make_device_matrix_view<MatT, IdxType, raft::row_major>(out, N, D);
auto in_row_major = raft::make_device_matrix_view<const MatT, IdxType, raft::row_major>(in, N, D);
auto out_col_major = raft::make_device_matrix_view<MatT, IdxType, raft::col_major>(out, N, D);
auto in_col_major = raft::make_device_matrix_view<const MatT, IdxType, raft::col_major>(in, N, D);
auto apply = bcastAlongRows ? Apply::ALONG_ROWS : Apply::ALONG_COLUMNS;
auto len = bcastAlongRows ? D : N;
auto vec1_view = raft::make_device_vector_view<const Vec1T, IdxType>(vec1, len);
if constexpr (OpT::useTwoVectors) {
auto vec2_view = raft::make_device_vector_view<const Vec2T, IdxType>(vec2, len);
if (rowMajor) {
matrix_vector_op(handle, in_row_major, vec1_view, vec2_view, out_row_major, apply, OpT{});
} else {
matrix_vector_op(handle, in_col_major, vec1_view, vec2_view, out_col_major, apply, OpT{});
}
} else {
if (rowMajor) {
matrix_vector_op(handle, in_row_major, vec1_view, out_row_major, apply, OpT{});
} else {
matrix_vector_op(handle, in_col_major, vec1_view, out_col_major, apply, OpT{});
}
}
}
template <typename OpT,
typename MatT,
typename IdxType,
typename Vec1T = MatT,
typename Vec2T = Vec1T>
class MatVecOpTest : public ::testing::TestWithParam<MatVecOpInputs<IdxType>> {
public:
MatVecOpTest()
: stream(resource::get_cuda_stream(handle)),
params(::testing::TestWithParam<MatVecOpInputs<IdxType>>::GetParam()),
vec_size(params.bcastAlongRows ? params.cols : params.rows),
in(params.rows * params.cols + params.inAlignOffset, stream),
out_ref(params.rows * params.cols + params.outAlignOffset, stream),
out(params.rows * params.cols + params.outAlignOffset, stream),
vec1(vec_size, stream),
vec2(vec_size, stream)
{
}
protected:
void SetUp() override
{
MatT* in_ptr = in.data() + params.inAlignOffset;
MatT* out_ptr = out.data() + params.outAlignOffset;
MatT* out_ref_ptr = out_ref.data() + params.outAlignOffset;
raft::random::RngState r(params.seed);
IdxType len = params.rows * params.cols;
gen_uniform<MatT>(handle, r, in_ptr, len);
gen_uniform<Vec1T>(handle, r, vec1.data(), vec_size);
gen_uniform<Vec2T>(handle, r, vec2.data(), vec_size);
if constexpr (OpT::useTwoVectors) {
naiveMatVec(out_ref_ptr,
in_ptr,
vec1.data(),
vec2.data(),
params.cols,
params.rows,
params.rowMajor,
params.bcastAlongRows,
OpT{},
stream);
} else {
naiveMatVec(out_ref_ptr,
in_ptr,
vec1.data(),
params.cols,
params.rows,
params.rowMajor,
params.bcastAlongRows,
OpT{},
stream);
}
matrixVectorOpLaunch<OpT>(handle,
out_ptr,
in_ptr,
vec1.data(),
vec2.data(),
params.cols,
params.rows,
params.rowMajor,
params.bcastAlongRows);
resource::sync_stream(handle);
}
protected:
raft::resources handle;
cudaStream_t stream;
MatVecOpInputs<IdxType> params;
IdxType vec_size;
rmm::device_uvector<MatT> in;
rmm::device_uvector<MatT> out;
rmm::device_uvector<MatT> out_ref;
rmm::device_uvector<Vec1T> vec1;
rmm::device_uvector<Vec2T> vec2;
};
#define MVTEST(TestClass, OutType, inputs, tolerance) \
TEST_P(TestClass, Result) \
{ \
if constexpr (std::is_floating_point_v<OutType>) { \
ASSERT_TRUE(devArrMatch(out_ref.data() + params.outAlignOffset, \
out.data() + params.outAlignOffset, \
params.rows * params.cols, \
CompareApprox<OutType>(tolerance))); \
} else { \
ASSERT_TRUE(devArrMatch(out_ref.data() + params.outAlignOffset, \
out.data() + params.outAlignOffset, \
params.rows * params.cols, \
Compare<OutType>())); \
} \
} \
INSTANTIATE_TEST_SUITE_P(MatVecOpTests, TestClass, ::testing::ValuesIn(inputs))
#define MV_EPS_F 0.00001f
#define MV_EPS_D 0.0000001
/*
* This set of tests covers cases where all the types are the same.
*/
const std::vector<MatVecOpInputs<int>> inputs_i32 =
raft::util::itertools::product<MatVecOpInputs<int>>(
{1024}, {32, 64}, {true, false}, {true, false}, {0, 1, 2}, {0, 1, 2}, {1234ULL});
const std::vector<MatVecOpInputs<int64_t>> inputs_i64 =
raft::util::itertools::product<MatVecOpInputs<int64_t>>(
{2500}, {250}, {false}, {false}, {0, 1}, {0, 1}, {1234ULL});
template <typename T>
struct Add1Vec {
static constexpr bool useTwoVectors = false;
HDI T operator()(T a, T b) const { return a + b; };
};
template <typename T>
struct Add2Vec {
static constexpr bool useTwoVectors = true;
HDI T operator()(T a, T b, T c) const { return a + b + c; };
};
typedef MatVecOpTest<Add1Vec<float>, float, int> MatVecOpTestF_i32_add1vec;
typedef MatVecOpTest<Add2Vec<float>, float, int> MatVecOpTestF_i32_add2vec;
typedef MatVecOpTest<Add1Vec<float>, float, int64_t> MatVecOpTestF_i64_add1vec;
typedef MatVecOpTest<Add2Vec<float>, float, int64_t> MatVecOpTestF_i64_add2vec;
typedef MatVecOpTest<Add1Vec<double>, double, int> MatVecOpTestD_i32_add1vec;
typedef MatVecOpTest<Add2Vec<double>, double, int> MatVecOpTestD_i32_add2vec;
typedef MatVecOpTest<Add1Vec<double>, double, int64_t> MatVecOpTestD_i64_add1vec;
typedef MatVecOpTest<Add2Vec<double>, double, int64_t> MatVecOpTestD_i64_add2vec;
MVTEST(MatVecOpTestF_i32_add1vec, float, inputs_i32, MV_EPS_F);
MVTEST(MatVecOpTestF_i32_add2vec, float, inputs_i32, MV_EPS_F);
MVTEST(MatVecOpTestF_i64_add1vec, float, inputs_i64, MV_EPS_F);
MVTEST(MatVecOpTestF_i64_add2vec, float, inputs_i64, MV_EPS_F);
MVTEST(MatVecOpTestD_i32_add1vec, double, inputs_i32, MV_EPS_D);
MVTEST(MatVecOpTestD_i32_add2vec, double, inputs_i32, MV_EPS_D);
MVTEST(MatVecOpTestD_i64_add1vec, double, inputs_i64, MV_EPS_D);
MVTEST(MatVecOpTestD_i64_add2vec, double, inputs_i64, MV_EPS_D);
/*
* This set of tests covers cases with different types.
*/
template <typename MatT, typename Vec1T, typename Vec2T>
struct MulAndAdd {
static constexpr bool useTwoVectors = true;
HDI MatT operator()(MatT a, Vec1T b, Vec2T c) const { return a * b + c; };
};
typedef MatVecOpTest<MulAndAdd<float, int32_t, float>, float, int, int32_t, float>
MatVecOpTestF_i32_MulAndAdd_i32_f;
typedef MatVecOpTest<MulAndAdd<float, int32_t, double>, float, int, int32_t, double>
MatVecOpTestF_i32_MulAndAdd_i32_d;
typedef MatVecOpTest<MulAndAdd<float, int64_t, float>, float, int, int64_t, float>
MatVecOpTestF_i32_MulAndAdd_i64_f;
typedef MatVecOpTest<MulAndAdd<double, int32_t, float>, double, int, int32_t, float>
MatVecOpTestD_i32_MulAndAdd_i32_f;
MVTEST(MatVecOpTestF_i32_MulAndAdd_i32_f, float, inputs_i32, MV_EPS_F);
MVTEST(MatVecOpTestF_i32_MulAndAdd_i32_d, float, inputs_i32, MV_EPS_F);
MVTEST(MatVecOpTestF_i32_MulAndAdd_i64_f, float, inputs_i32, MV_EPS_F);
MVTEST(MatVecOpTestD_i32_MulAndAdd_i32_f, double, inputs_i32, (double)MV_EPS_F);
struct DQMultiply {
static constexpr bool useTwoVectors = true;
HDI int8_t operator()(int8_t a, float b, float c) const
{
return static_cast<int8_t>((static_cast<float>(a) / 100.0f * (b + c) / 20.0f) * 100.0f);
};
};
typedef MatVecOpTest<DQMultiply, int8_t, int, float, float> MatVecOpTestI8_i32_DQMultiply_f_f;
MVTEST(MatVecOpTestI8_i32_DQMultiply_f_f, int8_t, inputs_i32, 0);
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/coalesced_reduction.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include "reduce.cuh"
#include <gtest/gtest.h>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/coalesced_reduction.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename T>
struct coalescedReductionInputs {
T tolerance;
int rows, cols;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const coalescedReductionInputs<T>& dims)
{
return os;
}
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename T>
void coalescedReductionLaunch(
const raft::resources& handle, T* dots, const T* data, int cols, int rows, bool inplace = false)
{
auto dots_view = raft::make_device_vector_view(dots, rows);
auto data_view = raft::make_device_matrix_view(data, rows, cols);
coalesced_reduction(handle, data_view, dots_view, (T)0, inplace, raft::sq_op{});
}
template <typename T>
class coalescedReductionTest : public ::testing::TestWithParam<coalescedReductionInputs<T>> {
public:
coalescedReductionTest()
: params(::testing::TestWithParam<coalescedReductionInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
data(params.rows * params.cols, stream),
dots_exp(params.rows * params.cols, stream),
dots_act(params.rows * params.cols, stream)
{
}
protected:
void SetUp() override
{
raft::random::RngState r(params.seed);
int rows = params.rows, cols = params.cols;
int len = rows * cols;
uniform(handle, r, data.data(), len, T(-1.0), T(1.0));
// Perform reduction with default inplace = false first and inplace = true next
naiveCoalescedReduction(dots_exp.data(),
data.data(),
cols,
rows,
stream,
T(0),
false,
raft::sq_op{},
raft::add_op{},
raft::identity_op{});
naiveCoalescedReduction(dots_exp.data(),
data.data(),
cols,
rows,
stream,
T(0),
true,
raft::sq_op{},
raft::add_op{},
raft::identity_op{});
coalescedReductionLaunch(handle, dots_act.data(), data.data(), cols, rows);
coalescedReductionLaunch(handle, dots_act.data(), data.data(), cols, rows, true);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
coalescedReductionInputs<T> params;
rmm::device_uvector<T> data;
rmm::device_uvector<T> dots_exp;
rmm::device_uvector<T> dots_act;
};
const std::vector<coalescedReductionInputs<float>> inputsf = {{0.000002f, 1024, 32, 1234ULL},
{0.000002f, 1024, 64, 1234ULL},
{0.000002f, 1024, 128, 1234ULL},
{0.000002f, 1024, 256, 1234ULL}};
const std::vector<coalescedReductionInputs<double>> inputsd = {{0.000000001, 1024, 32, 1234ULL},
{0.000000001, 1024, 64, 1234ULL},
{0.000000001, 1024, 128, 1234ULL},
{0.000000001, 1024, 256, 1234ULL}};
typedef coalescedReductionTest<float> coalescedReductionTestF;
TEST_P(coalescedReductionTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(dots_exp.data(),
dots_act.data(),
params.rows,
raft::CompareApprox<float>(params.tolerance),
stream));
}
typedef coalescedReductionTest<double> coalescedReductionTestD;
TEST_P(coalescedReductionTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(dots_exp.data(),
dots_act.data(),
params.rows,
raft::CompareApprox<double>(params.tolerance),
stream));
}
INSTANTIATE_TEST_CASE_P(coalescedReductionTests,
coalescedReductionTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(coalescedReductionTests,
coalescedReductionTestD,
::testing::ValuesIn(inputsd));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/cholesky_r1.cu | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/cusolver_dn_handle.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/cholesky_r1_update.cuh>
#include <raft/linalg/detail/cusolver_wrappers.hpp>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include "../test_utils.cuh"
#include <sstream>
#include <vector>
namespace raft {
namespace linalg {
template <typename math_t>
class CholeskyR1Test : public ::testing::Test {
protected:
CholeskyR1Test()
: G(n_rows * n_rows, resource::get_cuda_stream(handle)),
L(n_rows * n_rows, resource::get_cuda_stream(handle)),
L_exp(n_rows * n_rows, resource::get_cuda_stream(handle)),
devInfo(resource::get_cuda_stream(handle)),
workspace(0, resource::get_cuda_stream(handle))
{
raft::update_device(G.data(), G_host, n_rows * n_rows, resource::get_cuda_stream(handle));
// Allocate workspace
solver_handle = resource::get_cusolver_dn_handle(handle);
// TODO: Call from public API when ready
RAFT_CUSOLVER_TRY(raft::linalg::detail::cusolverDnpotrf_bufferSize(
solver_handle, CUBLAS_FILL_MODE_LOWER, n_rows, L.data(), n_rows, &Lwork));
int n_bytes = 0;
// Initializing in CUBLAS_FILL_MODE_LOWER, because that has larger workspace
// requirements.
raft::linalg::choleskyRank1Update(handle,
L.data(),
n_rows,
n_rows,
nullptr,
&n_bytes,
CUBLAS_FILL_MODE_LOWER,
resource::get_cuda_stream(handle));
Lwork = std::max(Lwork * sizeof(math_t), (size_t)n_bytes);
workspace.resize(Lwork, resource::get_cuda_stream(handle));
}
void testR1Update()
{
int n = n_rows * n_rows;
std::vector<cublasFillMode_t> fillmode{CUBLAS_FILL_MODE_LOWER, CUBLAS_FILL_MODE_UPPER};
for (auto uplo : fillmode) {
raft::copy(L.data(), G.data(), n, resource::get_cuda_stream(handle));
for (int rank = 1; rank <= n_rows; rank++) {
std::stringstream ss;
ss << "Rank " << rank << ((uplo == CUBLAS_FILL_MODE_LOWER) ? ", lower" : ", upper");
SCOPED_TRACE(ss.str());
// Expected solution using Cholesky factorization from scratch
raft::copy(L_exp.data(), G.data(), n, resource::get_cuda_stream(handle));
// TODO: Call from public API when ready
RAFT_CUSOLVER_TRY(raft::linalg::detail::cusolverDnpotrf(solver_handle,
uplo,
rank,
L_exp.data(),
n_rows,
(math_t*)workspace.data(),
Lwork,
devInfo.data(),
resource::get_cuda_stream(handle)));
// Incremental Cholesky factorization using rank one updates.
raft::linalg::choleskyRank1Update(handle,
L.data(),
rank,
n_rows,
workspace.data(),
&Lwork,
uplo,
resource::get_cuda_stream(handle));
ASSERT_TRUE(raft::devArrMatch(L_exp.data(),
L.data(),
n_rows * rank,
raft::CompareApprox<math_t>(3e-3),
resource::get_cuda_stream(handle)));
}
}
}
void testR1Error()
{
raft::update_device(G.data(), G2_host, 4, resource::get_cuda_stream(handle));
std::vector<cublasFillMode_t> fillmode{CUBLAS_FILL_MODE_LOWER, CUBLAS_FILL_MODE_UPPER};
for (auto uplo : fillmode) {
raft::copy(L.data(), G.data(), 4, resource::get_cuda_stream(handle));
ASSERT_NO_THROW(raft::linalg::choleskyRank1Update(
handle, L.data(), 1, 2, workspace.data(), &Lwork, uplo, resource::get_cuda_stream(handle)));
ASSERT_THROW(raft::linalg::choleskyRank1Update(handle,
L.data(),
2,
2,
workspace.data(),
&Lwork,
uplo,
resource::get_cuda_stream(handle)),
raft::exception);
math_t eps = std::numeric_limits<math_t>::epsilon();
ASSERT_NO_THROW(raft::linalg::choleskyRank1Update(handle,
L.data(),
2,
2,
workspace.data(),
&Lwork,
uplo,
resource::get_cuda_stream(handle),
eps));
}
}
raft::resources handle;
cudaStream_t stream;
cusolverDnHandle_t solver_handle;
int n_rows = 4;
int Lwork;
math_t G_host[16] = // clang-format off
{107., 1393., 1141., 91.,
1393., 21132., 15689., 9539.,
1141., 15689., 13103., 2889.,
91., 9539., 2889., 23649.};
// clang-format on
math_t G2_host[4] = {3, 4, 2, 1};
rmm::device_scalar<int> devInfo;
rmm::device_uvector<math_t> G;
rmm::device_uvector<math_t> L_exp;
rmm::device_uvector<math_t> L;
rmm::device_uvector<char> workspace;
};
typedef ::testing::Types<float, double> FloatTypes;
TYPED_TEST_CASE(CholeskyR1Test, FloatTypes);
TYPED_TEST(CholeskyR1Test, update) { this->testR1Update(); }
TYPED_TEST(CholeskyR1Test, throwError) { this->testR1Error(); }
}; // namespace linalg
}; // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/gemv.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/gemv.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
namespace raft {
namespace linalg {
template <typename T>
struct GemvInputs {
int n_rows;
int n_cols;
int lda;
bool trans_a;
unsigned long long int seed;
};
// Reference GEMV implementation.
template <typename T>
RAFT_KERNEL naiveGemv(T* y,
const T* A,
const T* x,
const int n_rows,
const int n_cols,
const int lda,
const bool trans_a)
{
int istart = blockIdx.x * blockDim.x + threadIdx.x;
int istep = blockDim.x * gridDim.x;
if (!trans_a) {
for (int i = istart; i < n_rows; i += istep) {
T t = T(0.0);
for (int j = 0; j < n_cols; j++) {
t += A[i + lda * j] * x[j];
}
y[i] = t;
}
} else {
for (int i = istart; i < n_cols; i += istep) {
T t = T(0.0);
for (int j = 0; j < n_rows; j++) {
t += A[lda * i + j] * x[j];
}
y[i] = t;
}
}
}
template <typename T>
class GemvTest : public ::testing::TestWithParam<GemvInputs<T>> {
protected:
GemvInputs<T> params;
rmm::device_uvector<T> refy; // Reference result for comparison
rmm::device_uvector<T> y; // Computed result
public:
GemvTest()
: testing::TestWithParam<GemvInputs<T>>(),
refy(0, rmm::cuda_stream_default),
y(0, rmm::cuda_stream_default)
{
rmm::cuda_stream_default.synchronize();
}
protected:
void SetUp() override
{
params = ::testing::TestWithParam<GemvInputs<T>>::GetParam();
raft::resources handle;
cudaStream_t stream = resource::get_cuda_stream(handle);
raft::random::RngState r(params.seed);
// We compute y = op(A) * x and compare against reference result
size_t aElems = params.lda * params.n_cols;
size_t xElems = params.trans_a ? params.n_rows : params.n_cols;
size_t yElems = params.trans_a ? params.n_cols : params.n_rows;
rmm::device_uvector<T> A(aElems, stream);
rmm::device_uvector<T> x(xElems, stream);
refy.resize(yElems, stream);
y.resize(yElems, stream);
uniform(handle, r, x.data(), xElems, T(-10.0), T(10.0));
uniform(handle, r, A.data(), aElems, T(-10.0), T(10.0));
dim3 blocks(raft::ceildiv<int>(yElems, 256), 1, 1);
dim3 threads(256, 1, 1);
naiveGemv<<<blocks, threads, 0, stream>>>(
refy.data(), A.data(), x.data(), params.n_rows, params.n_cols, params.lda, params.trans_a);
auto A_row_major =
raft::make_device_matrix_view<const T>(A.data(), params.n_rows, params.n_cols);
auto A_col_major = raft::make_device_matrix_view<const T, int, raft::col_major>(
A.data(), params.n_rows, params.n_cols);
auto x_view = raft::make_device_vector_view<const T, int>(x.data(), xElems);
auto y_view = raft::make_device_vector_view<T, int>(y.data(), yElems);
if (params.trans_a) {
gemv(handle, A_row_major, x_view, y_view);
} else {
gemv(handle, A_col_major, x_view, y_view);
}
resource::sync_stream(handle);
}
void TearDown() override {}
};
const std::vector<GemvInputs<float>> inputsf = {{80, 70, 80, true, 76433ULL},
{80, 100, 80, true, 426646ULL},
{20, 100, 20, true, 37703ULL},
{90, 90, 90, false, 538147ULL},
{30, 100, 30, false, 412352ULL}};
const std::vector<GemvInputs<double>> inputsd = {{10, 70, 10, true, 535648ULL},
{30, 30, 30, true, 956681ULL},
{70, 80, 70, true, 875083ULL},
{90, 90, 90, false, 506321ULL},
{80, 50, 80, false, 701529ULL}};
typedef GemvTest<float> GemvTestF;
TEST_P(GemvTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(refy.data(),
y.data(),
params.trans_a ? params.n_cols : params.n_rows,
raft::CompareApprox<float>(1e-4)));
}
typedef GemvTest<double> GemvTestD;
TEST_P(GemvTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(refy.data(),
y.data(),
params.trans_a ? params.n_cols : params.n_rows,
raft::CompareApprox<float>(1e-6)));
}
INSTANTIATE_TEST_SUITE_P(GemvTests, GemvTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(GemvTests, GemvTestD, ::testing::ValuesIn(inputsd));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/rsvd.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/rsvd.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <algorithm>
namespace raft {
namespace linalg {
template <typename T>
struct RsvdInputs {
T tolerance;
int n_row;
int n_col;
float redundancy;
T PC_perc;
T UpS_perc;
int k;
int p;
bool use_bbt;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const RsvdInputs<T>& dims)
{
return os;
}
template <typename T>
class RsvdTest : public ::testing::TestWithParam<RsvdInputs<T>> {
protected:
RsvdTest()
: A(0, stream),
U(0, stream),
S(0, stream),
V(0, stream),
left_eig_vectors_ref(0, stream),
right_eig_vectors_ref(0, stream),
sing_vals_ref(0, stream)
{
}
void SetUp() override
{
raft::resources handle;
stream = resource::get_cuda_stream(handle);
params = ::testing::TestWithParam<RsvdInputs<T>>::GetParam();
// rSVD seems to be very sensitive to the random number sequence as well!
raft::random::RngState r(params.seed, raft::random::GenPC);
int m = params.n_row, n = params.n_col;
T eig_svd_tol = 1.e-7;
int max_sweeps = 100;
T mu = 0.0, sigma = 1.0;
A.resize(m * n, stream);
if (params.tolerance > 1) { // Sanity check
ASSERT(m == 3, "This test only supports mxn=3x2!");
ASSERT(m * n == 6, "This test only supports mxn=3x2!");
T data_h[] = {1.0, 4.0, 2.0, 2.0, 5.0, 1.0};
raft::update_device(A.data(), data_h, m * n, stream);
T left_eig_vectors_ref_h[] = {-0.308219, -0.906133, -0.289695};
T right_eig_vectors_ref_h[] = {-0.638636, -0.769509};
T sing_vals_ref_h[] = {7.065283};
left_eig_vectors_ref.resize(m, stream);
right_eig_vectors_ref.resize(n, stream);
sing_vals_ref.resize(1, stream);
raft::update_device(left_eig_vectors_ref.data(), left_eig_vectors_ref_h, m * 1, stream);
raft::update_device(right_eig_vectors_ref.data(), right_eig_vectors_ref_h, n * 1, stream);
raft::update_device(sing_vals_ref.data(), sing_vals_ref_h, 1, stream);
} else { // Other normal tests
int n_informative = int(0.25f * n); // Informative cols
int len_informative = m * n_informative;
int n_redundant = n - n_informative; // Redundant cols
int len_redundant = m * n_redundant;
normal(handle, r, A.data(), len_informative, mu, sigma);
RAFT_CUDA_TRY(cudaMemcpyAsync(A.data() + len_informative,
A.data(),
len_redundant * sizeof(T),
cudaMemcpyDeviceToDevice,
stream));
}
std::vector<T> A_backup_cpu(m *
n); // Backup A matrix as svdJacobi will destroy the content of A
raft::update_host(A_backup_cpu.data(), A.data(), m * n, stream);
if (params.k == 0) {
params.k = std::max((int)(std::min(m, n) * params.PC_perc), 1);
params.p = std::max((int)(std::min(m, n) * params.UpS_perc), 1);
}
U.resize(m * params.k, stream);
S.resize(params.k, stream);
V.resize(n * params.k, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(U.data(), 0, U.size() * sizeof(T), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(S.data(), 0, S.size() * sizeof(T), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(V.data(), 0, V.size() * sizeof(T), stream));
auto A_view = raft::make_device_matrix_view<const T, int, raft::col_major>(A.data(), m, n);
std::optional<raft::device_matrix_view<T, int, raft::col_major>> U_view =
raft::make_device_matrix_view<T, int, raft::col_major>(U.data(), m, params.k);
std::optional<raft::device_matrix_view<T, int, raft::col_major>> V_view =
raft::make_device_matrix_view<T, int, raft::col_major>(V.data(), params.k, n);
auto S_vec_view = raft::make_device_vector_view(S.data(), params.k);
// RSVD tests
if (params.k == 0) { // Test with PC and upsampling ratio
if (params.use_bbt) {
rsvd_perc_symmetric(
handle, A_view, S_vec_view, params.PC_perc, params.UpS_perc, U_view, V_view);
} else {
rsvd_perc(handle, A_view, S_vec_view, params.PC_perc, params.UpS_perc, U_view, V_view);
}
} else { // Test with directly given fixed rank
if (params.use_bbt) {
rsvd_fixed_rank_symmetric_jacobi(
handle, A_view, S_vec_view, params.p, eig_svd_tol, max_sweeps, U_view, V_view);
} else {
rsvd_fixed_rank_jacobi(
handle, A_view, S_vec_view, params.p, eig_svd_tol, max_sweeps, U_view, V_view);
}
}
raft::update_device(A.data(), A_backup_cpu.data(), m * n, stream);
}
protected:
cudaStream_t stream = 0;
RsvdInputs<T> params;
rmm::device_uvector<T> A, U, S, V, left_eig_vectors_ref, right_eig_vectors_ref, sing_vals_ref;
};
const std::vector<RsvdInputs<float>> inputs_fx = {
// Test with ratios
{0.20f, 256, 256, 0.25f, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Square + BBT
{0.20f, 2048, 256, 0.25f, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Tall + BBT
{0.20f, 256, 256, 0.25f, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.20f, 2048, 256, 0.25f, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Tall + non-BBT
{0.20f, 2048, 2048, 0.25f, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Square + BBT
{0.60f, 16384, 2048, 0.25f, 0.2f, 0.05f, 0, 0, true, 4321ULL}, // Tall + BBT
{0.20f, 2048, 2048, 0.25f, 0.2f, 0.05f, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.60f, 16384, 2048, 0.25f, 0.2f, 0.05f, 0, 0, false, 4321ULL} // Tall + non-BBT
, // Test with fixed ranks
{0.10f, 256, 256, 0.25f, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Square + BBT
{0.12f, 2048, 256, 0.25f, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Tall + BBT
{0.10f, 256, 256, 0.25f, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Square + non-BBT
{0.12f, 2048, 256, 0.25f, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Tall + non-BBT
{0.60f, 2048, 2048, 0.25f, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Square + BBT
{1.00f, 16384, 2048, 0.25f, 0.0f, 0.0f, 100, 5, true, 4321ULL}, // Tall + BBT
{0.60f, 2048, 2048, 0.25f, 0.0f, 0.0f, 100, 5, false, 4321ULL}, // Square + non-BBT
{1.00f, 16384, 2048, 0.25f, 0.0f, 0.0f, 100, 5, false, 4321ULL} // Tall + non-BBT
};
const std::vector<RsvdInputs<double>> inputs_dx = {
// Test with ratios
{0.20, 256, 256, 0.25f, 0.2, 0.05, 0, 0, true, 4321ULL}, // Square + BBT
{0.20, 2048, 256, 0.25f, 0.2, 0.05, 0, 0, true, 4321ULL}, // Tall + BBT
{0.20, 256, 256, 0.25f, 0.2, 0.05, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.20, 2048, 256, 0.25f, 0.2, 0.05, 0, 0, false, 4321ULL}, // Tall + non-BBT
{0.20, 2048, 2048, 0.25f, 0.2, 0.05, 0, 0, true, 4321ULL}, // Square + BBT
{0.60, 16384, 2048, 0.25f, 0.2, 0.05, 0, 0, true, 4321ULL}, // Tall + BBT
{0.20, 2048, 2048, 0.25f, 0.2, 0.05, 0, 0, false, 4321ULL}, // Square + non-BBT
{0.60, 16384, 2048, 0.25f, 0.2, 0.05, 0, 0, false, 4321ULL} // Tall + non-BBT
, // Test with fixed ranks
{0.10, 256, 256, 0.25f, 0.0, 0.0, 100, 5, true, 4321ULL}, // Square + BBT
{0.12, 2048, 256, 0.25f, 0.0, 0.0, 100, 5, true, 4321ULL}, // Tall + BBT
{0.10, 256, 256, 0.25f, 0.0, 0.0, 100, 5, false, 4321ULL}, // Square + non-BBT
{0.12, 2048, 256, 0.25f, 0.0, 0.0, 100, 5, false, 4321ULL}, // Tall + non-BBT
{0.60, 2048, 2048, 0.25f, 0.0, 0.0, 100, 5, true, 4321ULL}, // Square + BBT
{1.00, 16384, 2048, 0.25f, 0.0, 0.0, 100, 5, true, 4321ULL}, // Tall + BBT
{0.60, 2048, 2048, 0.25f, 0.0, 0.0, 100, 5, false, 4321ULL}, // Square + non-BBT
{1.00, 16384, 2048, 0.25f, 0.0, 0.0, 100, 5, false, 4321ULL} // Tall + non-BBT
};
const std::vector<RsvdInputs<float>> sanity_inputs_fx = {
{100000000000000000.0f, 3, 2, 0.25f, 0.2f, 0.05f, 0, 0, true, 4321ULL},
{100000000000000000.0f, 3, 2, 0.25f, 0.0f, 0.0f, 1, 1, true, 4321ULL},
{100000000000000000.0f, 3, 2, 0.25f, 0.2f, 0.05f, 0, 0, false, 4321ULL},
{100000000000000000.0f, 3, 2, 0.25f, 0.0f, 0.0f, 1, 1, false, 4321ULL}};
const std::vector<RsvdInputs<double>> sanity_inputs_dx = {
{100000000000000000.0, 3, 2, 0.25f, 0.2, 0.05, 0, 0, true, 4321ULL},
{100000000000000000.0, 3, 2, 0.25f, 0.0, 0.0, 1, 1, true, 4321ULL},
{100000000000000000.0, 3, 2, 0.25f, 0.2, 0.05, 0, 0, false, 4321ULL},
{100000000000000000.0, 3, 2, 0.25f, 0.0, 0.0, 1, 1, false, 4321ULL}};
typedef RsvdTest<float> RsvdSanityCheckValF;
TEST_P(RsvdSanityCheckValF, Result)
{
ASSERT_TRUE(devArrMatch(
sing_vals_ref.data(), S.data(), params.k, raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef RsvdTest<double> RsvdSanityCheckValD;
TEST_P(RsvdSanityCheckValD, Result)
{
ASSERT_TRUE(devArrMatch(
sing_vals_ref.data(), S.data(), params.k, raft::CompareApproxAbs<double>(params.tolerance)));
}
typedef RsvdTest<float> RsvdSanityCheckLeftVecF;
TEST_P(RsvdSanityCheckLeftVecF, Result)
{
ASSERT_TRUE(devArrMatch(left_eig_vectors_ref.data(),
U.data(),
params.n_row * params.k,
raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef RsvdTest<double> RsvdSanityCheckLeftVecD;
TEST_P(RsvdSanityCheckLeftVecD, Result)
{
ASSERT_TRUE(devArrMatch(left_eig_vectors_ref.data(),
U.data(),
params.n_row * params.k,
raft::CompareApproxAbs<double>(params.tolerance)));
}
typedef RsvdTest<float> RsvdSanityCheckRightVecF;
TEST_P(RsvdSanityCheckRightVecF, Result)
{
ASSERT_TRUE(devArrMatch(right_eig_vectors_ref.data(),
V.data(),
params.n_col * params.k,
raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef RsvdTest<double> RsvdSanityCheckRightVecD;
TEST_P(RsvdSanityCheckRightVecD, Result)
{
ASSERT_TRUE(devArrMatch(right_eig_vectors_ref.data(),
V.data(),
params.n_col * params.k,
raft::CompareApproxAbs<double>(params.tolerance)));
}
typedef RsvdTest<float> RsvdTestSquareMatrixNormF;
TEST_P(RsvdTestSquareMatrixNormF, Result)
{
raft::resources handle;
ASSERT_TRUE(raft::linalg::evaluateSVDByL2Norm(handle,
A.data(),
U.data(),
S.data(),
V.data(),
params.n_row,
params.n_col,
params.k,
4 * params.tolerance,
resource::get_cuda_stream(handle)));
}
typedef RsvdTest<double> RsvdTestSquareMatrixNormD;
TEST_P(RsvdTestSquareMatrixNormD, Result)
{
raft::resources handle;
ASSERT_TRUE(raft::linalg::evaluateSVDByL2Norm(handle,
A.data(),
U.data(),
S.data(),
V.data(),
params.n_row,
params.n_col,
params.k,
4 * params.tolerance,
resource::get_cuda_stream(handle)));
}
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckValF, ::testing::ValuesIn(sanity_inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckValD, ::testing::ValuesIn(sanity_inputs_dx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckLeftVecF, ::testing::ValuesIn(sanity_inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckLeftVecD, ::testing::ValuesIn(sanity_inputs_dx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckRightVecF, ::testing::ValuesIn(sanity_inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdSanityCheckRightVecD, ::testing::ValuesIn(sanity_inputs_dx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdTestSquareMatrixNormF, ::testing::ValuesIn(inputs_fx));
INSTANTIATE_TEST_CASE_P(RsvdTests, RsvdTestSquareMatrixNormD, ::testing::ValuesIn(inputs_dx));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/ternary_op.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/ternary_op.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename InType, typename IdxType = int, typename OutType = InType>
struct BinaryOpInputs {
InType tolerance;
IdxType len;
unsigned long long int seed;
};
template <typename InType, typename IdxType = int, typename OutType = InType>
::std::ostream& operator<<(::std::ostream& os, const BinaryOpInputs<InType, IdxType, OutType>& d)
{
return os;
}
template <typename T>
class ternaryOpTest : public ::testing::TestWithParam<BinaryOpInputs<T>> {
public:
ternaryOpTest()
: params(::testing::TestWithParam<BinaryOpInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
out_add_ref(params.len, stream),
out_add(params.len, stream),
out_mul_ref(params.len, stream),
out_mul(params.len, stream)
{
}
void SetUp() override
{
raft::random::RngState rng(params.seed);
int len = params.len;
rmm::device_uvector<T> in1(len, stream);
rmm::device_uvector<T> in2(len, stream);
rmm::device_uvector<T> in3(len, stream);
fill(handle, rng, out_add_ref.data(), len, T(6.0));
fill(handle, rng, out_mul_ref.data(), len, T(6.0));
fill(handle, rng, in1.data(), len, T(1.0));
fill(handle, rng, in2.data(), len, T(2.0));
fill(handle, rng, in3.data(), len, T(3.0));
auto add = [] __device__(T a, T b, T c) { return a + b + c; };
auto mul = [] __device__(T a, T b, T c) { return a * b * c; };
auto out_add_view = raft::make_device_vector_view(out_add.data(), len);
auto out_mul_view = raft::make_device_vector_view(out_mul.data(), len);
auto in1_view = raft::make_device_vector_view<const T>(in1.data(), len);
auto in2_view = raft::make_device_vector_view<const T>(in2.data(), len);
auto in3_view = raft::make_device_vector_view<const T>(in3.data(), len);
ternary_op(handle, in1_view, in2_view, in3_view, out_add_view, add);
ternary_op(handle, in1_view, in2_view, in3_view, out_mul_view, mul);
}
protected:
BinaryOpInputs<T> params;
raft::resources handle;
cudaStream_t stream = 0;
rmm::device_uvector<T> out_add_ref, out_add, out_mul_ref, out_mul;
};
const std::vector<BinaryOpInputs<float>> inputsf = {{0.000001f, 1024 * 1024, 1234ULL},
{0.000001f, 1024 * 1024 + 2, 1234ULL},
{0.000001f, 1024 * 1024 + 1, 1234ULL}};
typedef ternaryOpTest<float> ternaryOpTestF;
TEST_P(ternaryOpTestF, Result)
{
ASSERT_TRUE(devArrMatch(
out_add_ref.data(), out_add.data(), params.len, raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(
out_mul_ref.data(), out_mul.data(), params.len, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ternaryOpTests, ternaryOpTestF, ::testing::ValuesIn(inputsf));
const std::vector<BinaryOpInputs<double>> inputsd = {{0.00000001, 1024 * 1024, 1234ULL},
{0.00000001, 1024 * 1024 + 2, 1234ULL},
{0.00000001, 1024 * 1024 + 1, 1234ULL}};
typedef ternaryOpTest<double> ternaryOpTestD;
TEST_P(ternaryOpTestD, Result)
{
ASSERT_TRUE(devArrMatch(
out_add_ref.data(), out_add.data(), params.len, raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(
out_mul_ref.data(), out_mul.data(), params.len, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(ternaryOpTests, ternaryOpTestD, ::testing::ValuesIn(inputsd));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/map_then_reduce.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <limits>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/map_reduce.cuh>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace linalg {
template <typename InType, typename OutType, typename MapOp>
RAFT_KERNEL naiveMapReduceKernel(OutType* out, const InType* in, size_t len, MapOp map)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) { raft::myAtomicAdd(out, (OutType)map(in[idx])); }
}
template <typename InType, typename OutType, typename MapOp>
void naiveMapReduce(OutType* out, const InType* in, size_t len, MapOp map, cudaStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(len, (size_t)TPB);
naiveMapReduceKernel<InType, OutType, MapOp><<<nblks, TPB, 0, stream>>>(out, in, len, map);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename T>
struct MapReduceInputs {
T tolerance;
size_t len;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const MapReduceInputs<T>& dims)
{
return os;
}
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename InType, typename OutType>
void mapReduceLaunch(
OutType* out_ref, OutType* out, const InType* in, size_t len, cudaStream_t stream)
{
naiveMapReduce(out_ref, in, len, raft::identity_op{}, stream);
mapThenSumReduce(out, len, raft::identity_op{}, 0, in);
}
template <typename InType, typename OutType>
class MapReduceTest : public ::testing::TestWithParam<MapReduceInputs<InType>> {
public:
MapReduceTest()
: params(::testing::TestWithParam<MapReduceInputs<InType>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in(params.len, stream),
out_ref(params.len, stream),
out(params.len, stream)
{
}
protected:
void SetUp() override
{
raft::random::RngState r(params.seed);
auto len = params.len;
uniform(handle, r, in.data(), len, InType(-1.0), InType(1.0));
mapReduceLaunch(out_ref.data(), out.data(), in.data(), len, stream);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
MapReduceInputs<InType> params;
rmm::device_uvector<InType> in;
rmm::device_uvector<OutType> out_ref, out;
};
const std::vector<MapReduceInputs<float>> inputsf = {{0.001f, 1024 * 1024, 1234ULL}};
typedef MapReduceTest<float, float> MapReduceTestFF;
TEST_P(MapReduceTestFF, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, CompareApprox<float>(params.tolerance), stream));
}
INSTANTIATE_TEST_SUITE_P(MapReduceTests, MapReduceTestFF, ::testing::ValuesIn(inputsf));
typedef MapReduceTest<float, double> MapReduceTestFD;
TEST_P(MapReduceTestFD, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, CompareApprox<double>(params.tolerance), stream));
}
INSTANTIATE_TEST_SUITE_P(MapReduceTests, MapReduceTestFD, ::testing::ValuesIn(inputsf));
const std::vector<MapReduceInputs<double>> inputsd = {{0.000001, 1024 * 1024, 1234ULL}};
typedef MapReduceTest<double, double> MapReduceTestDD;
TEST_P(MapReduceTestDD, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, CompareApprox<double>(params.tolerance), stream));
}
INSTANTIATE_TEST_SUITE_P(MapReduceTests, MapReduceTestDD, ::testing::ValuesIn(inputsd));
template <typename T>
class MapGenericReduceTest : public ::testing::Test {
using InType = typename T::first_type;
using OutType = typename T::second_type;
protected:
MapGenericReduceTest()
: input(n, resource::get_cuda_stream(handle)), output(resource::get_cuda_stream(handle))
{
initInput(input.data(), input.size(), resource::get_cuda_stream(handle));
}
public:
void initInput(InType* input, int n, cudaStream_t stream)
{
raft::random::RngState r(137);
uniform(handle, r, input, n, InType(2), InType(3));
InType val = 1;
raft::update_device(input + 42, &val, 1, resource::get_cuda_stream(handle));
val = 5;
raft::update_device(input + 337, &val, 1, resource::get_cuda_stream(handle));
}
void testMin()
{
OutType neutral = std::numeric_limits<InType>::max();
auto output_view = raft::make_device_scalar_view(output.data());
auto input_view = raft::make_device_vector_view<const InType>(
input.data(), static_cast<std::uint32_t>(input.size()));
map_reduce(handle, input_view, output_view, neutral, raft::identity_op{}, cub::Min());
EXPECT_TRUE(raft::devArrMatch(
OutType(1), output.data(), 1, raft::Compare<OutType>(), resource::get_cuda_stream(handle)));
}
void testMax()
{
OutType neutral = std::numeric_limits<InType>::min();
auto output_view = raft::make_device_scalar_view(output.data());
auto input_view = raft::make_device_vector_view<const InType>(
input.data(), static_cast<std::uint32_t>(input.size()));
map_reduce(handle, input_view, output_view, neutral, raft::identity_op{}, cub::Max());
EXPECT_TRUE(raft::devArrMatch(
OutType(5), output.data(), 1, raft::Compare<OutType>(), resource::get_cuda_stream(handle)));
}
protected:
raft::resources handle;
cudaStream_t stream;
int n = 1237;
rmm::device_uvector<InType> input;
rmm::device_scalar<OutType> output;
};
using IoTypePair =
::testing::Types<std::pair<float, float>, std::pair<float, double>, std::pair<double, double>>;
TYPED_TEST_CASE(MapGenericReduceTest, IoTypePair);
TYPED_TEST(MapGenericReduceTest, min) { this->testMin(); }
TYPED_TEST(MapGenericReduceTest, max) { this->testMax(); }
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/strided_reduction.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include "reduce.cuh"
#include <gtest/gtest.h>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/strided_reduction.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename T>
struct stridedReductionInputs {
T tolerance;
int rows, cols;
unsigned long long int seed;
};
template <typename T>
void stridedReductionLaunch(
T* dots, const T* data, int cols, int rows, bool inplace, cudaStream_t stream)
{
raft::resources handle;
resource::set_cuda_stream(handle, stream);
auto dots_view = raft::make_device_vector_view(dots, cols);
auto data_view = raft::make_device_matrix_view(data, rows, cols);
strided_reduction(handle, data_view, dots_view, (T)0, inplace, raft::sq_op{});
}
template <typename T>
class stridedReductionTest : public ::testing::TestWithParam<stridedReductionInputs<T>> {
public:
stridedReductionTest()
: params(::testing::TestWithParam<stridedReductionInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
data(params.rows * params.cols, stream),
dots_exp(params.cols, stream), // expected dot products (from test)
dots_act(params.cols, stream) // actual dot products (from prim)
{
}
protected:
void SetUp() override
{
raft::random::RngState r(params.seed);
int rows = params.rows, cols = params.cols;
int len = rows * cols;
uniform(handle, r, data.data(), len, T(-1.0), T(1.0)); // initialize matrix to random
// Perform reduction with default inplace = false first and inplace = true next
naiveStridedReduction(dots_exp.data(),
data.data(),
cols,
rows,
stream,
T(0),
false,
raft::sq_op{},
raft::add_op{},
raft::identity_op{});
naiveStridedReduction(dots_exp.data(),
data.data(),
cols,
rows,
stream,
T(0),
true,
raft::sq_op{},
raft::add_op{},
raft::identity_op{});
stridedReductionLaunch(dots_act.data(), data.data(), cols, rows, false, stream);
stridedReductionLaunch(dots_act.data(), data.data(), cols, rows, true, stream);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
stridedReductionInputs<T> params;
rmm::device_uvector<T> data, dots_exp, dots_act;
};
const std::vector<stridedReductionInputs<float>> inputsf = {{0.00001f, 1024, 32, 1234ULL},
{0.00001f, 1024, 64, 1234ULL},
{0.00001f, 1024, 128, 1234ULL},
{0.00001f, 1024, 256, 1234ULL}};
const std::vector<stridedReductionInputs<double>> inputsd = {{0.000000001, 1024, 32, 1234ULL},
{0.000000001, 1024, 64, 1234ULL},
{0.000000001, 1024, 128, 1234ULL},
{0.000000001, 1024, 256, 1234ULL}};
typedef stridedReductionTest<float> stridedReductionTestF;
TEST_P(stridedReductionTestF, Result)
{
ASSERT_TRUE(devArrMatch(
dots_exp.data(), dots_act.data(), params.cols, raft::CompareApprox<float>(params.tolerance)));
}
typedef stridedReductionTest<double> stridedReductionTestD;
TEST_P(stridedReductionTestD, Result)
{
ASSERT_TRUE(devArrMatch(
dots_exp.data(), dots_act.data(), params.cols, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(stridedReductionTests, stridedReductionTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(stridedReductionTests, stridedReductionTestD, ::testing::ValuesIn(inputsd));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/add.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/add.cuh>
#include <raft/util/cuda_utils.cuh>
namespace raft {
namespace linalg {
template <typename InT, typename OutT = InT>
RAFT_KERNEL naiveAddElemKernel(OutT* out, const InT* in1, const InT* in2, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) { out[idx] = OutT(in1[idx] + in2[idx]); }
}
template <typename InT, typename OutT = InT>
void naiveAddElem(OutT* out, const InT* in1, const InT* in2, int len, cudaStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(len, TPB);
naiveAddElemKernel<InT, OutT><<<nblks, TPB, 0, stream>>>(out, in1, in2, len);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename InT, typename OutT = InT>
struct AddInputs {
OutT tolerance;
int len;
unsigned long long int seed;
};
template <typename InT, typename OutT = InT>
::std::ostream& operator<<(::std::ostream& os, const AddInputs<InT, OutT>& dims)
{
return os;
}
}; // end namespace linalg
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/divide.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include "unary_op.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/divide.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename Type>
RAFT_KERNEL naiveDivideKernel(Type* out, const Type* in, Type scalar, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) { out[idx] = in[idx] / scalar; }
}
template <typename Type>
void naiveDivide(Type* out, const Type* in, Type scalar, int len, cudaStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(len, TPB);
naiveDivideKernel<Type><<<nblks, TPB, 0, stream>>>(out, in, scalar, len);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename T>
class DivideTest : public ::testing::TestWithParam<raft::linalg::UnaryOpInputs<T>> {
public:
DivideTest()
: params(::testing::TestWithParam<raft::linalg::UnaryOpInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in(params.len, stream),
out_ref(params.len, stream),
out(params.len, stream)
{
}
protected:
void SetUp() override
{
raft::random::RngState r(params.seed);
int len = params.len;
uniform(handle, r, in.data(), len, T(-1.0), T(1.0));
naiveDivide(out_ref.data(), in.data(), params.scalar, len, stream);
auto out_view = raft::make_device_vector_view(out.data(), len);
auto in_view = raft::make_device_vector_view<const T>(in.data(), len);
auto scalar_view = raft::make_host_scalar_view<const T>(¶ms.scalar);
divide_scalar(handle, in_view, out_view, scalar_view);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
UnaryOpInputs<T> params;
rmm::device_uvector<T> in;
rmm::device_uvector<T> out_ref;
rmm::device_uvector<T> out;
};
const std::vector<UnaryOpInputs<float>> inputsf = {{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
typedef DivideTest<float> DivideTestF;
TEST_P(DivideTestF, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<float>(params.tolerance), stream));
}
INSTANTIATE_TEST_SUITE_P(DivideTests, DivideTestF, ::testing::ValuesIn(inputsf));
typedef DivideTest<double> DivideTestD;
const std::vector<UnaryOpInputs<double>> inputsd = {{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
TEST_P(DivideTestD, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<double>(params.tolerance), stream));
}
INSTANTIATE_TEST_SUITE_P(DivideTests, DivideTestD, ::testing::ValuesIn(inputsd));
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/transpose.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/transpose.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <gtest/gtest.h>
#include <raft/core/device_mdspan.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace linalg {
template <typename T>
struct TranposeInputs {
T tolerance;
int len;
int n_row;
int n_col;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const TranposeInputs<T>& dims)
{
return os;
}
template <typename T>
class TransposeTest : public ::testing::TestWithParam<TranposeInputs<T>> {
public:
TransposeTest()
: params(::testing::TestWithParam<TranposeInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
data(params.len, stream),
data_trans_ref(params.len, stream),
data_trans(params.len, stream)
{
}
protected:
void SetUp() override
{
int len = params.len;
ASSERT(params.len == 9, "This test works only with len=9!");
T data_h[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
raft::update_device(data.data(), data_h, len, stream);
T data_ref_h[] = {1.0, 4.0, 7.0, 2.0, 5.0, 8.0, 3.0, 6.0, 9.0};
raft::update_device(data_trans_ref.data(), data_ref_h, len, stream);
transpose(handle, data.data(), data_trans.data(), params.n_row, params.n_col, stream);
transpose(data.data(), params.n_row, stream);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
TranposeInputs<T> params;
rmm::device_uvector<T> data, data_trans, data_trans_ref;
};
const std::vector<TranposeInputs<float>> inputsf2 = {{0.1f, 3 * 3, 3, 3, 1234ULL}};
const std::vector<TranposeInputs<double>> inputsd2 = {{0.1, 3 * 3, 3, 3, 1234ULL}};
typedef TransposeTest<float> TransposeTestValF;
TEST_P(TransposeTestValF, Result)
{
ASSERT_TRUE(raft::devArrMatch(data_trans_ref.data(),
data_trans.data(),
params.len,
raft::CompareApproxAbs<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(data_trans_ref.data(),
data.data(),
params.len,
raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef TransposeTest<double> TransposeTestValD;
TEST_P(TransposeTestValD, Result)
{
ASSERT_TRUE(raft::devArrMatch(data_trans_ref.data(),
data_trans.data(),
params.len,
raft::CompareApproxAbs<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(data_trans_ref.data(),
data.data(),
params.len,
raft::CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(TransposeTests, TransposeTestValF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(TransposeTests, TransposeTestValD, ::testing::ValuesIn(inputsd2));
namespace {
/**
* We hide these functions in tests for now until we have a heterogeneous mdarray
* implementation.
*/
/**
* @brief Transpose a matrix. The output has same layout policy as the input.
*
* @tparam T Data type of input matrix elements.
* @tparam LayoutPolicy Layout type of the input matrix. When layout is strided, it can
* be a submatrix of a larger matrix. Arbitrary stride is not supported.
*
* @param[in] handle raft handle for managing expensive cuda resources.
* @param[in] in Input matrix.
*
* @return The transposed matrix.
*/
template <typename T, typename IndexType, typename LayoutPolicy>
[[nodiscard]] auto transpose(raft::resources const& handle,
device_matrix_view<T, IndexType, LayoutPolicy> in)
-> std::enable_if_t<std::is_floating_point_v<T> &&
(std::is_same_v<LayoutPolicy, layout_c_contiguous> ||
std::is_same_v<LayoutPolicy, layout_f_contiguous>),
device_matrix<T, IndexType, LayoutPolicy>>
{
auto out = make_device_matrix<T, IndexType, LayoutPolicy>(handle, in.extent(1), in.extent(0));
::raft::linalg::transpose(handle, in, out.view());
return out;
}
/**
* @brief Transpose a matrix. The output has same layout policy as the input.
*
* @tparam T Data type of input matrix elements.
* @tparam LayoutPolicy Layout type of the input matrix. When layout is strided, it can
* be a submatrix of a larger matrix. Arbitrary stride is not supported.
*
* @param[in] handle raft handle for managing expensive cuda resources.
* @param[in] in Input matrix.
*
* @return The transposed matrix.
*/
template <typename T, typename IndexType>
[[nodiscard]] auto transpose(raft::resources const& handle,
device_matrix_view<T, IndexType, layout_stride> in)
-> std::enable_if_t<std::is_floating_point_v<T>, device_matrix<T, IndexType, layout_stride>>
{
matrix_extent<size_t> exts{in.extent(1), in.extent(0)};
using policy_type =
typename raft::device_matrix<T, IndexType, layout_stride>::container_policy_type;
policy_type policy{};
RAFT_EXPECTS(in.stride(0) == 1 || in.stride(1) == 1, "Unsupported matrix layout.");
if (in.stride(1) == 1) {
// row-major submatrix
std::array<size_t, 2> strides{in.extent(0), 1};
auto layout = layout_stride::mapping<matrix_extent<size_t>>{exts, strides};
raft::device_matrix<T, IndexType, layout_stride> out{handle, layout, policy};
::raft::linalg::transpose(handle, in, out.view());
return out;
} else {
// col-major submatrix
std::array<size_t, 2> strides{1, in.extent(1)};
auto layout = layout_stride::mapping<matrix_extent<size_t>>{exts, strides};
raft::device_matrix<T, IndexType, layout_stride> out{handle, layout, policy};
::raft::linalg::transpose(handle, in, out.view());
return out;
}
}
template <typename T, typename LayoutPolicy>
void test_transpose_with_mdspan()
{
raft::resources handle;
auto v = make_device_matrix<T, size_t, LayoutPolicy>(handle, 32, 3);
T k{0};
for (size_t i = 0; i < v.extent(0); ++i) {
for (size_t j = 0; j < v.extent(1); ++j) {
v(i, j) = k++;
}
}
auto out = transpose(handle, v.view());
static_assert(std::is_same_v<LayoutPolicy, typename decltype(out)::layout_type>);
ASSERT_EQ(out.extent(0), v.extent(1));
ASSERT_EQ(out.extent(1), v.extent(0));
k = 0;
for (size_t i = 0; i < out.extent(1); ++i) {
for (size_t j = 0; j < out.extent(0); ++j) {
ASSERT_EQ(out(j, i), k++);
}
}
}
} // namespace
TEST(TransposeTest, MDSpan)
{
test_transpose_with_mdspan<float, layout_c_contiguous>();
test_transpose_with_mdspan<double, layout_c_contiguous>();
test_transpose_with_mdspan<float, layout_f_contiguous>();
test_transpose_with_mdspan<double, layout_f_contiguous>();
}
namespace {
template <typename T, typename LayoutPolicy>
void test_transpose_submatrix()
{
raft::resources handle;
auto v = make_device_matrix<T, size_t, LayoutPolicy>(handle, 32, 33);
T k{0};
size_t row_beg{3}, row_end{13}, col_beg{2}, col_end{11};
for (size_t i = row_beg; i < row_end; ++i) {
for (size_t j = col_beg; j < col_end; ++j) {
v(i, j) = k++;
}
}
auto vv = v.view();
auto submat = std::experimental::submdspan(
vv, std::make_tuple(row_beg, row_end), std::make_tuple(col_beg, col_end));
static_assert(std::is_same_v<typename decltype(submat)::layout_type, layout_stride>);
auto out = transpose(handle, submat);
ASSERT_EQ(out.extent(0), submat.extent(1));
ASSERT_EQ(out.extent(1), submat.extent(0));
k = 0;
for (size_t i = 0; i < out.extent(1); ++i) {
for (size_t j = 0; j < out.extent(0); ++j) {
ASSERT_EQ(out(j, i), k++);
}
}
}
} // namespace
TEST(TransposeTest, SubMatrix)
{
test_transpose_submatrix<float, layout_c_contiguous>();
test_transpose_submatrix<double, layout_c_contiguous>();
test_transpose_submatrix<float, layout_f_contiguous>();
test_transpose_submatrix<double, layout_f_contiguous>();
}
} // end namespace linalg
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/linalg/eig.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/eig.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename T>
struct EigInputs {
T tolerance;
int len;
int n_row;
int n_col;
unsigned long long int seed;
int n;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const EigInputs<T>& dims)
{
return os;
}
template <typename T>
class EigTest : public ::testing::TestWithParam<EigInputs<T>> {
public:
EigTest()
: params(::testing::TestWithParam<EigInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
cov_matrix(params.len, stream),
eig_vectors(params.len, stream),
eig_vectors_jacobi(params.len, stream),
eig_vectors_ref(params.len, stream),
eig_vals(params.n_col, stream),
eig_vals_jacobi(params.n_col, stream),
eig_vals_ref(params.n_col, stream),
cov_matrix_large(params.n * params.n, stream),
eig_vectors_large(params.n * params.n, stream),
eig_vectors_jacobi_large(params.n * params.n, stream),
eig_vals_large(params.n, stream),
eig_vals_jacobi_large(params.n, stream)
{
}
protected:
void SetUp() override
{
raft::random::RngState r(params.seed);
int len = params.len;
T cov_matrix_h[] = {
1.0, 0.9, 0.81, 0.729, 0.9, 1.0, 0.9, 0.81, 0.81, 0.9, 1.0, 0.9, 0.729, 0.81, 0.9, 1.0};
ASSERT(len == 16, "This test only works with 4x4 matrices!");
raft::update_device(cov_matrix.data(), cov_matrix_h, len, stream);
T eig_vectors_ref_h[] = {0.2790,
-0.6498,
0.6498,
-0.2789,
-0.5123,
0.4874,
0.4874,
-0.5123,
0.6498,
0.2789,
-0.2789,
-0.6498,
0.4874,
0.5123,
0.5123,
0.4874};
T eig_vals_ref_h[] = {0.0614, 0.1024, 0.3096, 3.5266};
raft::update_device(eig_vectors_ref.data(), eig_vectors_ref_h, len, stream);
raft::update_device(eig_vals_ref.data(), eig_vals_ref_h, params.n_col, stream);
auto cov_matrix_view = raft::make_device_matrix_view<const T, std::uint32_t, raft::col_major>(
cov_matrix.data(), params.n_row, params.n_col);
auto eig_vectors_view = raft::make_device_matrix_view<T, std::uint32_t, raft::col_major>(
eig_vectors.data(), params.n_row, params.n_col);
auto eig_vals_view =
raft::make_device_vector_view<T, std::uint32_t>(eig_vals.data(), params.n_row);
auto eig_vectors_jacobi_view = raft::make_device_matrix_view<T, std::uint32_t, raft::col_major>(
eig_vectors_jacobi.data(), params.n_row, params.n_col);
auto eig_vals_jacobi_view =
raft::make_device_vector_view<T, std::uint32_t>(eig_vals_jacobi.data(), params.n_row);
eig_dc(handle, cov_matrix_view, eig_vectors_view, eig_vals_view);
T tol = 1.e-7;
int sweeps = 15;
eig_jacobi(handle, cov_matrix_view, eig_vectors_jacobi_view, eig_vals_jacobi_view, tol, sweeps);
// test code for comparing two methods
len = params.n * params.n;
uniform(handle, r, cov_matrix_large.data(), len, T(-1.0), T(1.0));
auto cov_matrix_large_view =
raft::make_device_matrix_view<const T, std::uint32_t, raft::col_major>(
cov_matrix_large.data(), params.n, params.n);
auto eig_vectors_large_view = raft::make_device_matrix_view<T, std::uint32_t, raft::col_major>(
eig_vectors_large.data(), params.n, params.n);
auto eig_vals_large_view =
raft::make_device_vector_view<T, std::uint32_t>(eig_vals_large.data(), params.n);
auto eig_vectors_jacobi_large_view =
raft::make_device_matrix_view<T, std::uint32_t, raft::col_major>(
eig_vectors_jacobi_large.data(), params.n, params.n);
auto eig_vals_jacobi_large_view =
raft::make_device_vector_view<T, std::uint32_t>(eig_vals_jacobi_large.data(), params.n);
eig_dc(handle, cov_matrix_large_view, eig_vectors_large_view, eig_vals_large_view);
eig_jacobi(handle,
cov_matrix_large_view,
eig_vectors_jacobi_large_view,
eig_vals_jacobi_large_view,
tol,
sweeps);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
EigInputs<T> params;
rmm::device_uvector<T> cov_matrix, eig_vectors, eig_vectors_jacobi, eig_vectors_ref, eig_vals,
eig_vals_jacobi, eig_vals_ref;
rmm::device_uvector<T> cov_matrix_large, eig_vectors_large, eig_vectors_jacobi_large,
eig_vals_large, eig_vals_jacobi_large;
};
const std::vector<EigInputs<float>> inputsf2 = {{0.001f, 4 * 4, 4, 4, 1234ULL, 256}};
const std::vector<EigInputs<double>> inputsd2 = {{0.001, 4 * 4, 4, 4, 1234ULL, 256}};
typedef EigTest<float> EigTestValF;
TEST_P(EigTestValF, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vals_ref.data(),
eig_vals.data(),
params.n_col,
raft::CompareApproxAbs<float>(params.tolerance),
stream));
}
typedef EigTest<double> EigTestValD;
TEST_P(EigTestValD, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vals_ref.data(),
eig_vals.data(),
params.n_col,
raft::CompareApproxAbs<double>(params.tolerance),
stream));
}
typedef EigTest<float> EigTestVecF;
TEST_P(EigTestVecF, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vectors_ref.data(),
eig_vectors.data(),
params.len,
raft::CompareApproxAbs<float>(params.tolerance),
stream));
}
typedef EigTest<double> EigTestVecD;
TEST_P(EigTestVecD, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vectors_ref.data(),
eig_vectors.data(),
params.len,
raft::CompareApproxAbs<double>(params.tolerance),
stream));
}
typedef EigTest<float> EigTestValJacobiF;
TEST_P(EigTestValJacobiF, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vals_ref.data(),
eig_vals_jacobi.data(),
params.n_col,
raft::CompareApproxAbs<float>(params.tolerance),
stream));
}
typedef EigTest<double> EigTestValJacobiD;
TEST_P(EigTestValJacobiD, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vals_ref.data(),
eig_vals_jacobi.data(),
params.n_col,
raft::CompareApproxAbs<double>(params.tolerance),
stream));
}
typedef EigTest<float> EigTestVecJacobiF;
TEST_P(EigTestVecJacobiF, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vectors_ref.data(),
eig_vectors_jacobi.data(),
params.len,
raft::CompareApproxAbs<float>(params.tolerance),
stream));
}
typedef EigTest<double> EigTestVecJacobiD;
TEST_P(EigTestVecJacobiD, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vectors_ref.data(),
eig_vectors_jacobi.data(),
params.len,
raft::CompareApproxAbs<double>(params.tolerance),
stream));
}
typedef EigTest<float> EigTestVecCompareF;
TEST_P(EigTestVecCompareF, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vectors_large.data(),
eig_vectors_jacobi_large.data(),
(params.n * params.n),
raft::CompareApproxAbs<float>(params.tolerance),
stream));
}
typedef EigTest<double> EigTestVecCompareD;
TEST_P(EigTestVecCompareD, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vectors_large.data(),
eig_vectors_jacobi_large.data(),
(params.n * params.n),
raft::CompareApproxAbs<double>(params.tolerance),
stream));
}
INSTANTIATE_TEST_SUITE_P(EigTests, EigTestValF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(EigTests, EigTestValD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_SUITE_P(EigTests, EigTestVecF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(EigTests, EigTestVecD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_SUITE_P(EigTests, EigTestValJacobiF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(EigTests, EigTestValJacobiD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_SUITE_P(EigTests, EigTestVecJacobiF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(EigTests, EigTestVecJacobiD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_SUITE_P(EigTests, EigTestVecCompareF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(EigTests, EigTestVecCompareD, ::testing::ValuesIn(inputsd2));
} // namespace linalg
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test/mr | rapidsai_public_repos/raft/cpp/test/mr/host/buffer.cpp | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <iostream>
#include <memory>
#include <raft/mr/host/buffer.hpp>
namespace raft {
namespace mr {
namespace host {
TEST(Raft, HostBuffer)
{
auto alloc = std::make_shared<default_allocator>();
cudaStream_t stream;
RAFT_CUDA_TRY(cudaStreamCreate(&stream));
// no allocation at construction
buffer<char> buff(alloc, stream);
ASSERT_EQ(0, buff.size());
// explicit allocation after construction
buff.resize(20, stream);
ASSERT_EQ(20, buff.size());
// resizing to a smaller buffer size
buff.resize(10, stream);
ASSERT_EQ(10, buff.size());
// explicit deallocation
buff.release(stream);
ASSERT_EQ(0, buff.size());
// use these methods without the explicit stream parameter
buff.resize(20);
ASSERT_EQ(20, buff.size());
buff.resize(10);
ASSERT_EQ(10, buff.size());
buff.release();
ASSERT_EQ(0, buff.size());
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
RAFT_CUDA_TRY(cudaStreamDestroy(stream));
}
TEST(Raft, DeviceToHostBuffer)
{
auto d_alloc = std::make_shared<device::default_allocator>();
auto h_alloc = std::make_shared<default_allocator>();
cudaStream_t stream;
RAFT_CUDA_TRY(cudaStreamCreate(&stream));
device::buffer<char> d_buff(d_alloc, stream, 32);
RAFT_CUDA_TRY(cudaMemsetAsync(d_buff.data(), 0, sizeof(char) * d_buff.size(), stream));
buffer<char> h_buff(h_alloc, d_buff);
ASSERT_EQ(d_buff.size(), h_buff.size());
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
RAFT_CUDA_TRY(cudaStreamDestroy(stream));
}
} // namespace host
} // namespace mr
} // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/test/mr | rapidsai_public_repos/raft/cpp/test/mr/device/buffer.cpp | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <iostream>
#include <memory>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/limiting_resource_adaptor.hpp>
namespace raft {
namespace mr {
namespace device {
TEST(Raft, DeviceBufferAlloc)
{
cudaStream_t stream;
RAFT_CUDA_TRY(cudaStreamCreate(&stream));
// no allocation at construction
rmm::device_uvector<char> buff(0, stream);
ASSERT_EQ(0, buff.size());
// explicit allocation after construction
buff.resize(20, stream);
ASSERT_EQ(20, buff.size());
// resizing to a smaller buffer size
buff.resize(10, stream);
ASSERT_EQ(10, buff.size());
// explicit deallocation
buff.release();
ASSERT_EQ(0, buff.size());
// use these methods without the explicit stream parameter
buff.resize(20, stream);
ASSERT_EQ(20, buff.size());
buff.resize(10, stream);
ASSERT_EQ(10, buff.size());
buff.release();
ASSERT_EQ(0, buff.size());
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
RAFT_CUDA_TRY(cudaStreamDestroy(stream));
}
TEST(Raft, DeviceBufferZeroResize)
{
// Create a limiting_resource_adaptor to track allocations
auto curr_mr =
dynamic_cast<rmm::mr::cuda_memory_resource*>(rmm::mr::get_current_device_resource());
auto limit_mr =
std::make_shared<rmm::mr::limiting_resource_adaptor<rmm::mr::cuda_memory_resource>>(curr_mr,
1000);
rmm::mr::set_current_device_resource(limit_mr.get());
cudaStream_t stream;
RAFT_CUDA_TRY(cudaStreamCreate(&stream));
// no allocation at construction
rmm::device_uvector<char> buff(10, stream);
ASSERT_EQ(10, buff.size());
// explicit allocation after construction
buff.resize(0, stream);
ASSERT_EQ(0, buff.size());
// resizing to a smaller buffer size
buff.resize(20, stream);
ASSERT_EQ(20, buff.size());
// explicit deallocation
buff.release();
ASSERT_EQ(0, buff.size());
// Now check that there is no memory left. (Used to not be true)
ASSERT_EQ(0, limit_mr->get_allocated_bytes());
rmm::mr::set_current_device_resource(curr_mr);
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
RAFT_CUDA_TRY(cudaStreamDestroy(stream));
}
} // namespace device
} // namespace mr
} // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/matrix/scatter.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/cudart_utils.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/scatter.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/itertools.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/shuffle.h>
namespace raft {
template <typename InputIteratorT, typename MapIteratorT, typename OutputIteratorT, typename IdxT>
void naiveScatter(
InputIteratorT in, IdxT D, IdxT N, MapIteratorT map, IdxT map_length, OutputIteratorT out)
{
for (IdxT outRow = 0; outRow < map_length; ++outRow) {
typename std::iterator_traits<MapIteratorT>::value_type map_val = map[outRow];
IdxT outRowStart = map_val * D;
IdxT inRowStart = outRow * D;
for (IdxT i = 0; i < D; ++i) {
out[outRowStart + i] = in[inRowStart + i];
}
}
}
template <typename IdxT>
struct ScatterInputs {
IdxT nrows;
IdxT ncols;
IdxT col_batch_size;
unsigned long long int seed;
};
template <typename MatrixT, typename IdxT>
class ScatterTest : public ::testing::TestWithParam<ScatterInputs<IdxT>> {
protected:
ScatterTest()
: stream(resource::get_cuda_stream(handle)),
params(::testing::TestWithParam<ScatterInputs<IdxT>>::GetParam()),
d_in(0, stream),
d_out_exp(0, stream),
d_map(0, stream)
{
}
void SetUp() override
{
raft::random::RngState r(params.seed);
raft::random::RngState r_int(params.seed);
IdxT len = params.nrows * params.ncols;
// input matrix setup
d_in.resize(params.nrows * params.ncols, stream);
h_in.resize(params.nrows * params.ncols);
raft::random::uniform(handle, r, d_in.data(), len, MatrixT(-1.0), MatrixT(1.0));
raft::update_host(h_in.data(), d_in.data(), len, stream);
// map setup
d_map.resize(params.nrows, stream);
h_map.resize(params.nrows);
auto exec_policy = raft::resource::get_thrust_policy(handle);
thrust::counting_iterator<IdxT> permute_iter(0);
thrust::copy(exec_policy, permute_iter, permute_iter + params.nrows, d_map.data());
thrust::default_random_engine g;
thrust::shuffle(exec_policy, d_map.data(), d_map.data() + params.nrows, g);
raft::update_host(h_map.data(), d_map.data(), params.nrows, stream);
resource::sync_stream(handle, stream);
// expected and actual output matrix setup
h_out.resize(params.nrows * params.ncols);
d_out_exp.resize(params.nrows * params.ncols, stream);
// launch scatter on the host and copy the results to device
naiveScatter(h_in.data(), params.ncols, params.nrows, h_map.data(), params.nrows, h_out.data());
raft::update_device(d_out_exp.data(), h_out.data(), params.nrows * params.ncols, stream);
auto inout_view = raft::make_device_matrix_view<MatrixT, IdxT, row_major>(
d_in.data(), params.nrows, params.ncols);
auto map_view = raft::make_device_vector_view<const IdxT, IdxT>(d_map.data(), params.nrows);
raft::matrix::scatter(handle, inout_view, map_view, params.col_batch_size);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream = 0;
ScatterInputs<IdxT> params;
std::vector<MatrixT> h_in, h_out;
std::vector<IdxT> h_map;
rmm::device_uvector<MatrixT> d_in, d_out_exp;
rmm::device_uvector<IdxT> d_map;
};
#define SCATTER_TEST(test_type, test_name, test_inputs) \
typedef RAFT_DEPAREN(test_type) test_name; \
TEST_P(test_name, Result) \
{ \
ASSERT_TRUE( \
devArrMatch(d_in.data(), d_out_exp.data(), d_out_exp.size(), raft::Compare<float>())); \
} \
INSTANTIATE_TEST_CASE_P(ScatterTests, test_name, ::testing::ValuesIn(test_inputs))
const std::vector<ScatterInputs<int>> inputs_i32 =
raft::util::itertools::product<ScatterInputs<int>>(
{25, 2000}, {6, 31, 129}, {0, 1, 2, 3, 6, 100}, {1234ULL});
const std::vector<ScatterInputs<int64_t>> inputs_i64 =
raft::util::itertools::product<ScatterInputs<int64_t>>(
{25, 2000}, {6, 31, 129}, {0, 1, 2, 3, 6, 100}, {1234ULL});
SCATTER_TEST((ScatterTest<float, int>), ScatterTestFI32, inputs_i32);
SCATTER_TEST((ScatterTest<float, int64_t>), ScatterTestFI64, inputs_i64);
} // end namespace raft | 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/matrix/slice.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/slice.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
namespace raft {
namespace matrix {
template <typename T>
struct SliceInputs {
int rows, cols;
unsigned long long int seed;
bool rowMajor;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const SliceInputs<T>& I)
{
os << "{ " << I.rows << ", " << I.cols << ", " << I.seed << ", " << I.rowMajor << '}'
<< std::endl;
return os;
}
// Col-major slice reference test
template <typename Type>
void naiveSlice(
const Type* in, Type* out, int in_lda, int x1, int y1, int x2, int y2, bool row_major)
{
int out_lda = row_major ? y2 - y1 : x2 - x1;
for (int j = y1; j < y2; ++j) {
for (int i = x1; i < x2; ++i) {
if (row_major)
out[(i - x1) * out_lda + (j - y1)] = in[j + i * in_lda];
else
out[(i - x1) + (j - y1) * out_lda] = in[i + j * in_lda];
}
}
}
template <typename T>
class SliceTest : public ::testing::TestWithParam<SliceInputs<T>> {
public:
SliceTest()
: params(::testing::TestWithParam<SliceInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
data(params.rows * params.cols, stream)
{
}
void SetUp() override
{
std::random_device rd;
std::default_random_engine dre(rd());
raft::random::RngState r(params.seed);
int rows = params.rows, cols = params.cols, len = rows * cols;
auto lda = params.rowMajor ? cols : rows;
uniform(handle, r, data.data(), len, T(-10.0), T(10.0));
std::uniform_int_distribution<int> rowGenerator(0, (rows / 2) - 1);
auto row1 = rowGenerator(dre);
auto row2 = rowGenerator(dre) + rows / 2;
std::uniform_int_distribution<int> colGenerator(0, (cols / 2) - 1);
auto col1 = colGenerator(dre);
auto col2 = colGenerator(dre) + cols / 2;
rmm::device_uvector<T> d_act_result((row2 - row1) * (col2 - col1), stream);
act_result.resize((row2 - row1) * (col2 - col1));
exp_result.resize((row2 - row1) * (col2 - col1));
std::vector<T> h_data(rows * cols);
raft::update_host(h_data.data(), data.data(), rows * cols, stream);
naiveSlice(h_data.data(), exp_result.data(), lda, row1, col1, row2, col2, params.rowMajor);
auto input_F =
raft::make_device_matrix_view<const T, int, raft::col_major>(data.data(), rows, cols);
auto output_F = raft::make_device_matrix_view<T, int, raft::col_major>(
d_act_result.data(), row2 - row1, col2 - col1);
auto input_C =
raft::make_device_matrix_view<const T, int, raft::row_major>(data.data(), rows, cols);
auto output_C = raft::make_device_matrix_view<T, int, raft::row_major>(
d_act_result.data(), row2 - row1, col2 - col1);
if (params.rowMajor)
slice(handle, input_C, output_C, slice_coordinates(row1, col1, row2, col2));
else
slice(handle, input_F, output_F, slice_coordinates(row1, col1, row2, col2));
raft::update_host(act_result.data(), d_act_result.data(), d_act_result.size(), stream);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
SliceInputs<T> params;
rmm::device_uvector<T> data;
std::vector<T> exp_result, act_result;
};
///// Row- and column-wise tests
const std::vector<SliceInputs<float>> inputsf = {{32, 1024, 1234ULL, true},
{64, 1024, 1234ULL, false},
{128, 1024, 1234ULL, true},
{256, 1024, 1234ULL, false},
{512, 512, 1234ULL, true},
{1024, 32, 1234ULL, false},
{1024, 64, 1234ULL, true},
{1024, 128, 1234ULL, false},
{1024, 256, 1234ULL, true}};
const std::vector<SliceInputs<double>> inputsd = {
{32, 1024, 1234ULL, true},
{64, 1024, 1234ULL, false},
{128, 1024, 1234ULL, true},
{256, 1024, 1234ULL, false},
{512, 512, 1234ULL, true},
{1024, 32, 1234ULL, false},
{1024, 64, 1234ULL, true},
{1024, 128, 1234ULL, false},
{1024, 256, 1234ULL, true},
};
typedef SliceTest<float> SliceTestF;
TEST_P(SliceTestF, Result)
{
ASSERT_TRUE(hostVecMatch(exp_result, act_result, raft::Compare<float>()));
}
typedef SliceTest<double> SliceTestD;
TEST_P(SliceTestD, Result)
{
ASSERT_TRUE(hostVecMatch(exp_result, act_result, raft::Compare<double>()));
}
INSTANTIATE_TEST_CASE_P(SliceTests, SliceTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(SliceTests, SliceTestD, ::testing::ValuesIn(inputsd));
} // end namespace matrix
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/matrix/argmax.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <cstdint>
#include <gtest/gtest.h>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/argmax.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace matrix {
template <typename T, typename IdxT>
struct ArgMaxInputs {
std::vector<T> input_matrix;
std::vector<IdxT> output_matrix;
std::size_t n_rows;
std::size_t n_cols;
};
template <typename T, typename IdxT>
::std::ostream& operator<<(::std::ostream& os, const ArgMaxInputs<T, IdxT>& dims)
{
return os;
}
template <typename T, typename IdxT>
class ArgMaxTest : public ::testing::TestWithParam<ArgMaxInputs<T, IdxT>> {
public:
ArgMaxTest()
: params(::testing::TestWithParam<ArgMaxInputs<T, IdxT>>::GetParam()),
input(raft::make_device_matrix<T, std::uint32_t, row_major>(
handle, params.n_rows, params.n_cols)),
output(raft::make_device_vector<IdxT, std::uint32_t>(handle, params.n_rows)),
expected(raft::make_device_vector<IdxT, std::uint32_t>(handle, params.n_rows))
{
raft::update_device(input.data_handle(),
params.input_matrix.data(),
params.input_matrix.size(),
resource::get_cuda_stream(handle));
raft::update_device(expected.data_handle(),
params.output_matrix.data(),
params.output_matrix.size(),
resource::get_cuda_stream(handle));
auto input_const_view = raft::make_device_matrix_view<const T, std::uint32_t, row_major>(
input.data_handle(), input.extent(0), input.extent(1));
raft::matrix::argmax(handle, input_const_view, output.view());
resource::sync_stream(handle);
}
protected:
raft::resources handle;
ArgMaxInputs<T, IdxT> params;
raft::device_matrix<T, std::uint32_t, row_major> input;
raft::device_vector<IdxT, std::uint32_t> output;
raft::device_vector<IdxT, std::uint32_t> expected;
};
const std::vector<ArgMaxInputs<float, int>> inputsf = {
{{0.1f, 0.2f, 0.3f, 0.4f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.5f, 0.0f}, {3, 0, 2}, 3, 4}};
const std::vector<ArgMaxInputs<double, int>> inputsd = {
{{0.1, 0.2, 0.3, 0.4, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.5, 0.0}, {3, 0, 2}, 3, 4}};
typedef ArgMaxTest<float, int> ArgMaxTestF;
TEST_P(ArgMaxTestF, Result)
{
ASSERT_TRUE(devArrMatch(expected.data_handle(),
output.data_handle(),
params.n_rows,
Compare<int>(),
resource::get_cuda_stream(handle)));
}
typedef ArgMaxTest<double, int> ArgMaxTestD;
TEST_P(ArgMaxTestD, Result)
{
ASSERT_TRUE(devArrMatch(expected.data_handle(),
output.data_handle(),
params.n_rows,
Compare<int>(),
resource::get_cuda_stream(handle)));
}
INSTANTIATE_TEST_SUITE_P(ArgMaxTest, ArgMaxTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(ArgMaxTest, ArgMaxTestD, ::testing::ValuesIn(inputsd));
} // namespace matrix
} // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/matrix/columnSort.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <algorithm>
#include <gtest/gtest.h>
#include <numeric>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/matrix/col_wise_sort.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace matrix {
template <typename T>
std::vector<int>* sort_indexes(const std::vector<T>& v)
{
// initialize original index locations
std::vector<int>* idx = new std::vector<int>(v.size());
std::iota((*idx).begin(), (*idx).end(), 0);
// sort indexes based on comparing values in v
std::sort((*idx).begin(), (*idx).end(), [&v](int i1, int i2) { return v[i1] < v[i2]; });
return idx;
}
template <typename T>
struct columnSort {
T tolerance;
int n_row;
int n_col;
bool testKeys;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const columnSort<T>& dims)
{
return os;
}
template <typename T>
class ColumnSort : public ::testing::TestWithParam<columnSort<T>> {
protected:
ColumnSort()
: keyIn(0, resource::get_cuda_stream(handle)),
keySorted(0, resource::get_cuda_stream(handle)),
keySortGolden(0, resource::get_cuda_stream(handle)),
valueOut(0, resource::get_cuda_stream(handle)),
goldenValOut(0, resource::get_cuda_stream(handle))
{
}
void SetUp() override
{
params = ::testing::TestWithParam<columnSort<T>>::GetParam();
int len = params.n_row * params.n_col;
keyIn.resize(len, resource::get_cuda_stream(handle));
valueOut.resize(len, resource::get_cuda_stream(handle));
goldenValOut.resize(len, resource::get_cuda_stream(handle));
if (params.testKeys) {
keySorted.resize(len, resource::get_cuda_stream(handle));
keySortGolden.resize(len, resource::get_cuda_stream(handle));
}
std::vector<T> vals(len);
std::vector<int> cValGolden(len);
std::iota(vals.begin(), vals.end(),
1.0f); // will have to change input param type
std::random_shuffle(vals.begin(), vals.end());
std::vector<T> cKeyGolden(len);
for (int i = 0; i < params.n_row; i++) {
std::vector<T> tmp(vals.begin() + i * params.n_col, vals.begin() + (i + 1) * params.n_col);
auto cpuOut = sort_indexes(tmp);
std::copy((*cpuOut).begin(), (*cpuOut).end(), cValGolden.begin() + i * params.n_col);
delete cpuOut;
if (params.testKeys) {
std::sort(tmp.begin(), tmp.end());
std::copy(tmp.begin(), tmp.end(), cKeyGolden.begin() + i * params.n_col);
}
}
raft::update_device(keyIn.data(), &vals[0], len, resource::get_cuda_stream(handle));
raft::update_device(
goldenValOut.data(), &cValGolden[0], len, resource::get_cuda_stream(handle));
if (params.testKeys)
raft::update_device(
keySortGolden.data(), &cKeyGolden[0], len, resource::get_cuda_stream(handle));
auto key_in_view = raft::make_device_matrix_view<const T, int, row_major>(
keyIn.data(), params.n_row, params.n_col);
auto value_out_view = raft::make_device_matrix_view<int, int, row_major>(
valueOut.data(), params.n_row, params.n_col);
auto key_sorted_view = raft::make_device_matrix_view<T, int, row_major>(
keySorted.data(), params.n_row, params.n_col);
raft::matrix::sort_cols_per_row(
handle, key_in_view, value_out_view, std::make_optional(key_sorted_view));
RAFT_CUDA_TRY(cudaStreamSynchronize(resource::get_cuda_stream(handle)));
}
protected:
raft::resources handle;
columnSort<T> params;
rmm::device_uvector<T> keyIn, keySorted, keySortGolden;
rmm::device_uvector<int> valueOut, goldenValOut; // valueOut are indexes
};
const std::vector<columnSort<float>> inputsf1 = {{0.000001f, 503, 2000, false},
{0.000001f, 113, 20000, true},
{0.000001f, 503, 2000, false},
{0.000001f, 113, 20000, true}};
typedef ColumnSort<float> ColumnSortF;
TEST_P(ColumnSortF, Result)
{
// Remove this condition once the implementation of of descending sort is
// fixed.
ASSERT_TRUE(devArrMatch(valueOut.data(),
goldenValOut.data(),
params.n_row * params.n_col,
raft::CompareApprox<float>(params.tolerance)));
if (params.testKeys) {
ASSERT_TRUE(devArrMatch(keySorted.data(),
keySortGolden.data(),
params.n_row * params.n_col,
raft::CompareApprox<float>(params.tolerance)));
}
}
INSTANTIATE_TEST_CASE_P(ColumnSortTests, ColumnSortF, ::testing::ValuesIn(inputsf1));
} // end namespace matrix
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/matrix/select_k.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft_internal/matrix/select_k.cuh>
#include <raft/core/resources.hpp>
#include <raft/random/rng.cuh>
#include <raft/sparse/detail/utils.h>
#include <raft/util/cudart_utils.hpp>
#include <gtest/gtest.h>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <algorithm>
#include <numeric>
namespace raft::matrix {
template <typename IdxT>
auto gen_simple_ids(uint32_t batch_size, uint32_t len) -> std::vector<IdxT>
{
std::vector<IdxT> out(batch_size * len);
auto s = rmm::cuda_stream_default;
rmm::device_uvector<IdxT> out_d(out.size(), s);
sparse::iota_fill(out_d.data(), IdxT(batch_size), IdxT(len), s);
update_host(out.data(), out_d.data(), out.size(), s);
s.synchronize();
return out;
}
template <typename KeyT, typename IdxT>
struct io_simple {
public:
bool not_supported = false;
std::optional<select::Algo> algo = std::nullopt;
io_simple(const select::params& spec,
const std::vector<KeyT>& in_dists,
const std::optional<std::vector<IdxT>>& in_ids,
const std::vector<KeyT>& out_dists,
const std::vector<IdxT>& out_ids)
: in_dists_(in_dists),
in_ids_(in_ids.value_or(gen_simple_ids<IdxT>(spec.batch_size, spec.len))),
out_dists_(out_dists),
out_ids_(out_ids)
{
}
auto get_in_dists() -> std::vector<KeyT>& { return in_dists_; }
auto get_in_ids() -> std::vector<IdxT>& { return in_ids_; }
auto get_out_dists() -> std::vector<KeyT>& { return out_dists_; }
auto get_out_ids() -> std::vector<IdxT>& { return out_ids_; }
private:
std::vector<KeyT> in_dists_;
std::vector<IdxT> in_ids_;
std::vector<KeyT> out_dists_;
std::vector<IdxT> out_ids_;
};
template <typename KeyT, typename IdxT>
struct io_computed {
public:
bool not_supported = false;
select::Algo algo;
io_computed(const select::params& spec,
const select::Algo& algo,
const std::vector<KeyT>& in_dists,
const std::optional<std::vector<IdxT>>& in_ids = std::nullopt)
: algo(algo),
in_dists_(in_dists),
in_ids_(in_ids.value_or(gen_simple_ids<IdxT>(spec.batch_size, spec.len))),
out_dists_(spec.batch_size * spec.k),
out_ids_(spec.batch_size * spec.k)
{
// check if the size is supported by the algorithm
switch (algo) {
case select::Algo::kWarpAuto:
case select::Algo::kWarpImmediate:
case select::Algo::kWarpFiltered:
case select::Algo::kWarpDistributed:
case select::Algo::kWarpDistributedShm: {
if (spec.k > raft::matrix::detail::select::warpsort::kMaxCapacity) {
not_supported = true;
return;
}
} break;
default: break;
}
resources handle{};
auto stream = resource::get_cuda_stream(handle);
rmm::device_uvector<KeyT> in_dists_d(in_dists_.size(), stream);
rmm::device_uvector<IdxT> in_ids_d(in_ids_.size(), stream);
rmm::device_uvector<KeyT> out_dists_d(out_dists_.size(), stream);
rmm::device_uvector<IdxT> out_ids_d(out_ids_.size(), stream);
update_device(in_dists_d.data(), in_dists_.data(), in_dists_.size(), stream);
update_device(in_ids_d.data(), in_ids_.data(), in_ids_.size(), stream);
select::select_k_impl<KeyT, IdxT>(handle,
algo,
in_dists_d.data(),
spec.use_index_input ? in_ids_d.data() : nullptr,
spec.batch_size,
spec.len,
spec.k,
out_dists_d.data(),
out_ids_d.data(),
spec.select_min);
update_host(out_dists_.data(), out_dists_d.data(), out_dists_.size(), stream);
update_host(out_ids_.data(), out_ids_d.data(), out_ids_.size(), stream);
interruptible::synchronize(stream);
auto p = topk_sort_permutation(out_dists_, out_ids_, spec.k, spec.select_min);
apply_permutation(out_dists_, p);
apply_permutation(out_ids_, p);
}
auto get_in_dists() -> std::vector<KeyT>& { return in_dists_; }
auto get_in_ids() -> std::vector<IdxT>& { return in_ids_; }
auto get_out_dists() -> std::vector<KeyT>& { return out_dists_; }
auto get_out_ids() -> std::vector<IdxT>& { return out_ids_; }
private:
std::vector<KeyT> in_dists_;
std::vector<IdxT> in_ids_;
std::vector<KeyT> out_dists_;
std::vector<IdxT> out_ids_;
auto topk_sort_permutation(const std::vector<KeyT>& vec,
const std::vector<IdxT>& inds,
uint32_t k,
bool select_min) -> std::vector<IdxT>
{
std::vector<IdxT> p(vec.size());
std::iota(p.begin(), p.end(), 0);
if (select_min) {
std::sort(p.begin(), p.end(), [&vec, &inds, k](IdxT i, IdxT j) {
const IdxT ik = i / k;
const IdxT jk = j / k;
if (ik == jk) {
if (vec[i] == vec[j]) { return inds[i] < inds[j]; }
return vec[i] < vec[j];
}
return ik < jk;
});
} else {
std::sort(p.begin(), p.end(), [&vec, &inds, k](IdxT i, IdxT j) {
const IdxT ik = i / k;
const IdxT jk = j / k;
if (ik == jk) {
if (vec[i] == vec[j]) { return inds[i] < inds[j]; }
return vec[i] > vec[j];
}
return ik < jk;
});
}
return p;
}
template <typename T>
void apply_permutation(std::vector<T>& vec, const std::vector<IdxT>& p) // NOLINT
{
for (auto i = IdxT(vec.size()) - 1; i > 0; i--) {
auto j = p[i];
while (j > i)
j = p[j];
std::swap(vec[j], vec[i]);
}
}
};
template <typename InOut>
using Params = std::tuple<select::params, select::Algo, InOut>;
template <typename KeyT, typename IdxT, template <typename, typename> typename ParamsReader>
struct SelectK // NOLINT
: public testing::TestWithParam<typename ParamsReader<KeyT, IdxT>::params_t> {
const select::params spec;
const select::Algo algo;
typename ParamsReader<KeyT, IdxT>::io_t ref;
io_computed<KeyT, IdxT> res;
explicit SelectK(Params<typename ParamsReader<KeyT, IdxT>::io_t> ps)
: spec(std::get<0>(ps)),
algo(std::get<1>(ps)), // NOLINT
ref(std::get<2>(ps)), // NOLINT
res(spec, algo, ref.get_in_dists(), ref.get_in_ids()) // NOLINT
{
}
explicit SelectK(typename ParamsReader<KeyT, IdxT>::params_t ps)
: SelectK(ParamsReader<KeyT, IdxT>::read(ps))
{
}
SelectK()
: SelectK(testing::TestWithParam<typename ParamsReader<KeyT, IdxT>::params_t>::GetParam())
{
}
void run()
{
if (ref.not_supported || res.not_supported) { GTEST_SKIP(); }
ASSERT_TRUE(hostVecMatch(ref.get_out_dists(), res.get_out_dists(), Compare<KeyT>()));
// If the dists (keys) are the same, different corresponding ids may end up in the selection
// due to non-deterministic nature of some implementations.
auto compare_ids = [this](const IdxT& i, const IdxT& j) {
if (i == j) return true;
auto& in_ids = ref.get_in_ids();
auto& in_dists = ref.get_in_dists();
auto ix_i = static_cast<int64_t>(std::find(in_ids.begin(), in_ids.end(), i) - in_ids.begin());
auto ix_j = static_cast<int64_t>(std::find(in_ids.begin(), in_ids.end(), j) - in_ids.begin());
auto forgive_i = forgive_algo(ref.algo, i);
auto forgive_j = forgive_algo(res.algo, j);
// Some algorithms return invalid indices in special cases.
// TODO: https://github.com/rapidsai/raft/issues/1822
if (static_cast<size_t>(ix_i) >= in_ids.size()) return forgive_i;
if (static_cast<size_t>(ix_j) >= in_ids.size()) return forgive_j;
auto dist_i = in_dists[ix_i];
auto dist_j = in_dists[ix_j];
if (dist_i == dist_j) return true;
const auto bound = spec.select_min ? raft::upper_bound<KeyT>() : raft::lower_bound<KeyT>();
if (forgive_i && dist_i == bound) return true;
if (forgive_j && dist_j == bound) return true;
// Otherwise really fail
std::cout << "ERROR: ref[" << ix_i << "] = " << dist_i << " != "
<< "res[" << ix_j << "] = " << dist_j << std::endl;
return false;
};
ASSERT_TRUE(hostVecMatch(ref.get_out_ids(), res.get_out_ids(), compare_ids));
}
auto forgive_algo(const std::optional<select::Algo>& algo, IdxT ix) const -> bool
{
if (!algo.has_value()) { return false; }
switch (algo.value()) {
// not sure which algo this is.
case select::Algo::kPublicApi: return true;
// warp-sort-based algos currently return zero index for inf distances.
case select::Algo::kWarpAuto:
case select::Algo::kWarpImmediate:
case select::Algo::kWarpFiltered:
case select::Algo::kWarpDistributed:
case select::Algo::kWarpDistributedShm: return ix == 0;
// FAISS version returns a special invalid value:
case select::Algo::kFaissBlockSelect: return ix == std::numeric_limits<IdxT>::max();
// Do not forgive by default
default: return false;
}
}
};
template <typename KeyT, typename IdxT>
struct params_simple {
using io_t = io_simple<KeyT, IdxT>;
using input_t = std::tuple<select::params,
std::vector<KeyT>,
std::optional<std::vector<IdxT>>,
std::vector<KeyT>,
std::vector<IdxT>>;
using params_t = std::tuple<input_t, select::Algo>;
static auto read(params_t ps) -> Params<io_t>
{
auto ins = std::get<0>(ps);
auto algo = std::get<1>(ps);
return std::make_tuple(
std::get<0>(ins),
algo,
io_simple<KeyT, IdxT>(
std::get<0>(ins), std::get<1>(ins), std::get<2>(ins), std::get<3>(ins), std::get<4>(ins)));
}
};
auto inf_f = std::numeric_limits<float>::max();
auto inputs_simple_f = testing::Values(
params_simple<float, uint32_t>::input_t(
{5, 5, 5, true, true},
{5.0, 4.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 2.0, 3.0, 5.0,
1.0, 4.0, 5.0, 3.0, 2.0, 4.0, 1.0, 1.0, 3.0, 2.0, 5.0, 4.0},
std::nullopt,
{1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0,
4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0},
{4, 3, 2, 1, 0, 0, 1, 2, 3, 4, 3, 0, 1, 4, 2, 4, 2, 1, 3, 0, 0, 2, 1, 4, 3}),
params_simple<float, uint32_t>::input_t(
{5, 5, 3, true, true},
{5.0, 4.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 2.0, 3.0, 5.0,
1.0, 4.0, 5.0, 3.0, 2.0, 4.0, 1.0, 1.0, 3.0, 2.0, 5.0, 4.0},
std::nullopt,
{1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0},
{4, 3, 2, 0, 1, 2, 3, 0, 1, 4, 2, 1, 0, 2, 1}),
params_simple<float, uint32_t>::input_t(
{5, 5, 5, true, false},
{5.0, 4.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 2.0, 3.0, 5.0,
1.0, 4.0, 5.0, 3.0, 2.0, 4.0, 1.0, 1.0, 3.0, 2.0, 5.0, 4.0},
std::nullopt,
{1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0,
4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0},
{4, 3, 2, 1, 0, 0, 1, 2, 3, 4, 3, 0, 1, 4, 2, 4, 2, 1, 3, 0, 0, 2, 1, 4, 3}),
params_simple<float, uint32_t>::input_t(
{5, 5, 3, true, false},
{5.0, 4.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 2.0, 3.0, 5.0,
1.0, 4.0, 5.0, 3.0, 2.0, 4.0, 1.0, 1.0, 3.0, 2.0, 5.0, 4.0},
std::nullopt,
{1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0},
{4, 3, 2, 0, 1, 2, 3, 0, 1, 4, 2, 1, 0, 2, 1}),
params_simple<float, uint32_t>::input_t(
{5, 7, 3, true, true},
{5.0, 4.0, 3.0, 2.0, 1.3, 7.5, 19.0, 9.0, 2.0, 3.0, 3.0, 5.0, 6.0, 4.0, 2.0, 3.0, 5.0, 1.0,
4.0, 1.0, 1.0, 5.0, 7.0, 2.5, 4.0, 7.0, 8.0, 8.0, 1.0, 3.0, 2.0, 5.0, 4.0, 1.1, 1.2},
std::nullopt,
{1.3, 2.0, 3.0, 2.0, 3.0, 3.0, 1.0, 1.0, 1.0, 2.5, 4.0, 5.0, 1.0, 1.1, 1.2},
{4, 3, 2, 1, 2, 3, 3, 5, 6, 2, 3, 0, 0, 5, 6}),
params_simple<float, uint32_t>::input_t({1, 7, 3, true, true},
{2.0, 3.0, 5.0, 1.0, 4.0, 1.0, 1.0},
std::nullopt,
{1.0, 1.0, 1.0},
{3, 5, 6}),
params_simple<float, uint32_t>::input_t({1, 7, 3, false, false},
{2.0, 3.0, 5.0, 1.0, 4.0, 1.0, 1.0},
std::nullopt,
{5.0, 4.0, 3.0},
{2, 4, 1}),
params_simple<float, uint32_t>::input_t({1, 7, 3, false, true},
{2.0, 3.0, 5.0, 9.0, 4.0, 9.0, 9.0},
std::nullopt,
{9.0, 9.0, 9.0},
{3, 5, 6}),
params_simple<float, uint32_t>::input_t(
{1, 130, 5, false, true},
{19, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
0, 1, 0, 1, 0, 1, 0, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2,
1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 4, 4, 2, 3, 2, 3, 2, 3, 2, 3, 2, 20},
std::nullopt,
{20, 19, 18, 17, 16},
{129, 0, 117, 116, 115}),
params_simple<float, uint32_t>::input_t(
{1, 130, 15, false, true},
{19, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
0, 1, 0, 1, 0, 1, 0, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2,
1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 4, 4, 2, 3, 2, 3, 2, 3, 2, 3, 2, 20},
std::nullopt,
{20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6},
{129, 0, 117, 116, 115, 114, 113, 112, 111, 110, 109, 108, 107, 106, 105}),
params_simple<float, uint32_t>::input_t(
select::params{1, 32, 31, true, true},
{0, 1, 2, 3, inf_f, inf_f, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
std::optional{std::vector<uint32_t>{31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21,
20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10,
9, 8, 7, 6, 75, 74, 3, 2, 1, 0}},
{0, 1, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, inf_f},
{31, 30, 29, 28, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14,
13, 12, 11, 10, 9, 8, 7, 6, 75, 74, 3, 2, 1, 0, 27}));
using SimpleFloatInt = SelectK<float, uint32_t, params_simple>;
TEST_P(SimpleFloatInt, Run) { run(); } // NOLINT
INSTANTIATE_TEST_CASE_P( // NOLINT
SelectK,
SimpleFloatInt,
testing::Combine(inputs_simple_f,
testing::Values(select::Algo::kPublicApi,
select::Algo::kRadix8bits,
select::Algo::kRadix11bits,
select::Algo::kRadix11bitsExtraPass,
select::Algo::kWarpImmediate,
select::Algo::kWarpFiltered,
select::Algo::kWarpDistributed)));
template <typename KeyT>
struct replace_with_mask {
KeyT replacement;
constexpr auto inline operator()(KeyT x, uint8_t mask) -> KeyT { return mask ? replacement : x; }
};
template <select::Algo RefAlgo>
struct with_ref {
template <typename KeyT, typename IdxT>
struct params_random {
using io_t = io_computed<KeyT, IdxT>;
using params_t = std::tuple<select::params, select::Algo>;
static auto read(params_t ps) -> Params<io_t>
{
auto spec = std::get<0>(ps);
auto algo = std::get<1>(ps);
std::vector<KeyT> dists(spec.len * spec.batch_size);
raft::resources handle;
{
auto s = resource::get_cuda_stream(handle);
rmm::device_uvector<KeyT> dists_d(spec.len * spec.batch_size, s);
raft::random::RngState r(42);
normal(handle, r, dists_d.data(), dists_d.size(), KeyT(10.0), KeyT(100.0));
if (spec.frac_infinities > 0.0) {
rmm::device_uvector<uint8_t> mask_buf(dists_d.size(), s);
auto mask = make_device_vector_view<uint8_t, size_t>(mask_buf.data(), mask_buf.size());
raft::random::bernoulli(handle, r, mask, spec.frac_infinities);
KeyT bound = spec.select_min ? raft::upper_bound<KeyT>() : raft::lower_bound<KeyT>();
auto mask_in =
make_device_vector_view<const uint8_t, size_t>(mask_buf.data(), mask_buf.size());
auto dists_in = make_device_vector_view<const KeyT>(dists_d.data(), dists_d.size());
auto dists_out = make_device_vector_view<KeyT>(dists_d.data(), dists_d.size());
raft::linalg::map(handle, dists_out, replace_with_mask<KeyT>{bound}, dists_in, mask_in);
}
update_host(dists.data(), dists_d.data(), dists_d.size(), s);
s.synchronize();
}
return std::make_tuple(spec, algo, io_computed<KeyT, IdxT>(spec, RefAlgo, dists));
}
};
};
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/matrix/select_k.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "select_k.cuh"
namespace raft::matrix {
auto inputs_random_longlist = testing::Values(select::params{1, 130, 15, false},
select::params{1, 128, 15, false},
select::params{20, 700, 1, true},
select::params{20, 700, 2, true},
select::params{20, 700, 3, true},
select::params{20, 700, 4, true},
select::params{20, 700, 5, true},
select::params{20, 700, 6, true},
select::params{20, 700, 7, true},
select::params{20, 700, 8, true},
select::params{20, 700, 9, true},
select::params{20, 700, 10, true, false},
select::params{20, 700, 11, true},
select::params{20, 700, 12, true},
select::params{20, 700, 16, true},
select::params{100, 1700, 17, true},
select::params{100, 1700, 31, true, false},
select::params{100, 1700, 32, false},
select::params{100, 1700, 33, false},
select::params{100, 1700, 63, false},
select::params{100, 1700, 64, false, false},
select::params{100, 1700, 65, false},
select::params{100, 1700, 255, true},
select::params{100, 1700, 256, true},
select::params{100, 1700, 511, false},
select::params{100, 1700, 512, true},
select::params{100, 1700, 1023, false, false},
select::params{100, 1700, 1024, true},
select::params{100, 1700, 1700, true});
auto inputs_random_largesize = testing::Values(select::params{100, 100000, 1, true},
select::params{100, 100000, 2, true},
select::params{100, 100000, 3, true, false},
select::params{100, 100000, 7, true},
select::params{100, 100000, 16, true},
select::params{100, 100000, 31, true},
select::params{100, 100000, 32, true, false},
select::params{100, 100000, 60, true},
select::params{100, 100000, 100, true, false},
select::params{100, 100000, 200, true},
select::params{100000, 100, 100, false},
select::params{1, 1000000000, 1, true},
select::params{1, 1000000000, 16, false, false},
select::params{1, 1000000000, 64, false},
select::params{1, 1000000000, 128, true, false},
select::params{1, 1000000000, 256, false, false});
auto inputs_random_largek = testing::Values(select::params{100, 100000, 1000, true},
select::params{100, 100000, 2000, false},
select::params{100, 100000, 100000, true, false},
select::params{100, 100000, 2048, false},
select::params{100, 100000, 1237, true});
auto inputs_random_many_infs =
testing::Values(select::params{10, 100000, 1, true, false, false, true, 0.9},
select::params{10, 100000, 16, true, false, false, true, 0.9},
select::params{10, 100000, 64, true, false, false, true, 0.9},
select::params{10, 100000, 128, true, false, false, true, 0.9},
select::params{10, 100000, 256, true, false, false, true, 0.9},
select::params{1000, 10000, 1, true, false, false, true, 0.9},
select::params{1000, 10000, 16, true, false, false, true, 0.9},
select::params{1000, 10000, 64, true, false, false, true, 0.9},
select::params{1000, 10000, 128, true, false, false, true, 0.9},
select::params{1000, 10000, 256, true, false, false, true, 0.9},
select::params{10, 100000, 1, true, false, false, true, 0.999},
select::params{10, 100000, 16, true, false, false, true, 0.999},
select::params{10, 100000, 64, true, false, false, true, 0.999},
select::params{10, 100000, 128, true, false, false, true, 0.999},
select::params{10, 100000, 256, true, false, false, true, 0.999},
select::params{1000, 10000, 1, true, false, false, true, 0.999},
select::params{1000, 10000, 16, true, false, false, true, 0.999},
select::params{1000, 10000, 64, true, false, false, true, 0.999},
select::params{1000, 10000, 128, true, false, false, true, 0.999},
select::params{1000, 10000, 256, true, false, false, true, 0.999});
using ReferencedRandomFloatInt =
SelectK<float, uint32_t, with_ref<select::Algo::kPublicApi>::params_random>;
TEST_P(ReferencedRandomFloatInt, Run) { run(); } // NOLINT
INSTANTIATE_TEST_CASE_P( // NOLINT
SelectK,
ReferencedRandomFloatInt,
testing::Combine(inputs_random_longlist,
testing::Values(select::Algo::kRadix8bits,
select::Algo::kRadix11bits,
select::Algo::kRadix11bitsExtraPass,
select::Algo::kWarpImmediate,
select::Algo::kWarpFiltered,
select::Algo::kWarpDistributed,
select::Algo::kWarpDistributedShm)));
using ReferencedRandomDoubleSizeT =
SelectK<double, int64_t, with_ref<select::Algo::kPublicApi>::params_random>;
TEST_P(ReferencedRandomDoubleSizeT, Run) { run(); } // NOLINT
INSTANTIATE_TEST_CASE_P( // NOLINT
SelectK,
ReferencedRandomDoubleSizeT,
testing::Combine(inputs_random_longlist,
testing::Values(select::Algo::kRadix8bits,
select::Algo::kRadix11bits,
select::Algo::kRadix11bitsExtraPass,
select::Algo::kWarpImmediate,
select::Algo::kWarpFiltered,
select::Algo::kWarpDistributed,
select::Algo::kWarpDistributedShm)));
using ReferencedRandomDoubleInt =
SelectK<double, uint32_t, with_ref<select::Algo::kRadix11bits>::params_random>;
TEST_P(ReferencedRandomDoubleInt, LargeSize) { run(); } // NOLINT
INSTANTIATE_TEST_CASE_P( // NOLINT
SelectK,
ReferencedRandomDoubleInt,
testing::Combine(inputs_random_largesize,
testing::Values(select::Algo::kWarpAuto,
select::Algo::kRadix8bits,
select::Algo::kRadix11bits,
select::Algo::kRadix11bitsExtraPass)));
using ReferencedRandomFloatIntkWarpsortAsGT =
SelectK<float, uint32_t, with_ref<select::Algo::kWarpImmediate>::params_random>;
TEST_P(ReferencedRandomFloatIntkWarpsortAsGT, Run) { run(); } // NOLINT
INSTANTIATE_TEST_CASE_P( // NOLINT
SelectK,
ReferencedRandomFloatIntkWarpsortAsGT,
testing::Combine(inputs_random_many_infs,
testing::Values(select::Algo::kRadix8bits,
select::Algo::kRadix11bits,
select::Algo::kRadix11bitsExtraPass)));
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/matrix/norm.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/norm.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
namespace raft {
namespace matrix {
template <typename T>
struct NormInputs {
T tolerance;
int rows, cols;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const NormInputs<T>& I)
{
os << "{ " << I.tolerance << ", " << I.rows << ", " << I.cols << ", " << I.seed << '}'
<< std::endl;
return os;
}
template <typename Type>
Type naiveNorm(const Type* data, int D, int N)
{
Type out_scalar = 0;
for (int i = 0; i < N * D; ++i) {
out_scalar += data[i] * data[i];
}
out_scalar = std::sqrt(out_scalar);
return out_scalar;
}
template <typename T>
class NormTest : public ::testing::TestWithParam<NormInputs<T>> {
public:
NormTest()
: params(::testing::TestWithParam<NormInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
data(params.rows * params.cols, stream)
{
}
void SetUp() override
{
raft::random::RngState r(params.seed);
int rows = params.rows, cols = params.cols, len = rows * cols;
uniform(handle, r, data.data(), len, T(-10.0), T(10.0));
std::vector<T> h_data(rows * cols);
raft::update_host(h_data.data(), data.data(), rows * cols, stream);
out_scalar_exp = naiveNorm(h_data.data(), cols, rows);
auto input = raft::make_device_matrix_view<const T, int>(data.data(), params.rows, params.cols);
out_scalar_act = l2_norm(handle, input);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
NormInputs<T> params;
rmm::device_uvector<T> data;
T out_scalar_exp = 0;
T out_scalar_act = 0;
};
///// Row- and column-wise tests
const std::vector<NormInputs<float>> inputsf = {{0.00001f, 32, 1024, 1234ULL},
{0.00001f, 64, 1024, 1234ULL},
{0.00001f, 128, 1024, 1234ULL},
{0.00001f, 256, 1024, 1234ULL},
{0.00001f, 512, 512, 1234ULL},
{0.00001f, 1024, 32, 1234ULL},
{0.00001f, 1024, 64, 1234ULL},
{0.00001f, 1024, 128, 1234ULL},
{0.00001f, 1024, 256, 1234ULL}};
const std::vector<NormInputs<double>> inputsd = {
{0.00000001, 32, 1024, 1234ULL},
{0.00000001, 64, 1024, 1234ULL},
{0.00000001, 128, 1024, 1234ULL},
{0.00000001, 256, 1024, 1234ULL},
{0.00000001, 512, 512, 1234ULL},
{0.00000001, 1024, 32, 1234ULL},
{0.00000001, 1024, 64, 1234ULL},
{0.00000001, 1024, 128, 1234ULL},
{0.00000001, 1024, 256, 1234ULL},
};
typedef NormTest<float> NormTestF;
TEST_P(NormTestF, Result)
{
ASSERT_NEAR(out_scalar_exp, out_scalar_act, params.tolerance * params.rows * params.cols);
}
typedef NormTest<double> NormTestD;
TEST_P(NormTestD, Result)
{
ASSERT_NEAR(out_scalar_exp, out_scalar_act, params.tolerance * params.rows * params.cols);
}
INSTANTIATE_TEST_CASE_P(NormTests, NormTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(NormTests, NormTestD, ::testing::ValuesIn(inputsd));
} // end namespace matrix
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/matrix/gather.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/cudart_utils.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/gather.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/itertools.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
template <bool Conditional,
bool MapTransform,
typename InputIteratorT,
typename MapIteratorT,
typename StencilIteratorT,
typename UnaryPredicateOp,
typename MapTransformOp,
typename OutputIteratorT,
typename IdxT>
void naiveGather(InputIteratorT in,
IdxT D,
IdxT N,
MapIteratorT map,
StencilIteratorT stencil,
IdxT map_length,
OutputIteratorT out,
UnaryPredicateOp pred_op,
MapTransformOp transform_op)
{
for (IdxT outRow = 0; outRow < map_length; ++outRow) {
if constexpr (Conditional) {
auto stencil_val = stencil[outRow];
if (!pred_op(stencil_val)) continue;
}
typename std::iterator_traits<MapIteratorT>::value_type map_val = map[outRow];
IdxT transformed_val;
if constexpr (MapTransform) {
transformed_val = transform_op(map_val);
} else {
transformed_val = map_val;
}
IdxT inRowStart = transformed_val * D;
IdxT outRowStart = outRow * D;
for (IdxT i = 0; i < D; ++i) {
out[outRowStart + i] = in[inRowStart + i];
}
}
}
template <typename IdxT>
struct GatherInputs {
IdxT nrows;
IdxT ncols;
IdxT map_length;
IdxT col_batch_size;
unsigned long long int seed;
};
template <bool Conditional,
bool MapTransform,
bool Inplace,
typename MatrixT,
typename MapT,
typename IdxT>
class GatherTest : public ::testing::TestWithParam<GatherInputs<IdxT>> {
protected:
GatherTest()
: stream(resource::get_cuda_stream(handle)),
params(::testing::TestWithParam<GatherInputs<IdxT>>::GetParam()),
d_in(0, stream),
d_out_exp(0, stream),
d_out_act(0, stream),
d_stencil(0, stream),
d_map(0, stream)
{
}
void SetUp() override
{
raft::random::RngState r(params.seed);
raft::random::RngState r_int(params.seed);
IdxT map_length = params.map_length;
IdxT len = params.nrows * params.ncols;
if (map_length > params.nrows) map_length = params.nrows;
// input matrix setup
d_in.resize(params.nrows * params.ncols, stream);
h_in.resize(params.nrows * params.ncols);
raft::random::uniform(handle, r, d_in.data(), len, MatrixT(-1.0), MatrixT(1.0));
raft::update_host(h_in.data(), d_in.data(), len, stream);
// map setup
d_map.resize(map_length, stream);
h_map.resize(map_length);
raft::random::uniformInt(handle, r_int, d_map.data(), map_length, (MapT)0, (MapT)params.nrows);
raft::update_host(h_map.data(), d_map.data(), map_length, stream);
// stencil setup
if (Conditional) {
d_stencil.resize(map_length, stream);
h_stencil.resize(map_length);
raft::random::uniform(handle, r, d_stencil.data(), map_length, MatrixT(-1.0), MatrixT(1.0));
raft::update_host(h_stencil.data(), d_stencil.data(), map_length, stream);
}
// unary predicate op (used only when Conditional is true)
auto pred_op = raft::plug_const_op(MatrixT(0.0), raft::greater_op());
// map transform op (used only when MapTransform is true)
auto transform_op =
raft::compose_op(raft::mod_const_op<IdxT>(params.nrows), raft::add_const_op<IdxT>(10));
// expected and actual output matrix setup
h_out.resize(map_length * params.ncols);
d_out_exp.resize(map_length * params.ncols, stream);
d_out_act.resize(map_length * params.ncols, stream);
// launch gather on the host and copy the results to device
naiveGather<Conditional, MapTransform>(h_in.data(),
params.ncols,
params.nrows,
h_map.data(),
h_stencil.data(),
map_length,
h_out.data(),
pred_op,
transform_op);
raft::update_device(d_out_exp.data(), h_out.data(), map_length * params.ncols, stream);
auto in_view = raft::make_device_matrix_view<const MatrixT, IdxT, row_major>(
d_in.data(), params.nrows, params.ncols);
auto inout_view = raft::make_device_matrix_view<MatrixT, IdxT, row_major>(
d_in.data(), params.nrows, params.ncols);
auto out_view = raft::make_device_matrix_view<MatrixT, IdxT, row_major>(
d_out_act.data(), map_length, params.ncols);
auto map_view = raft::make_device_vector_view<const MapT, IdxT>(d_map.data(), map_length);
auto stencil_view =
raft::make_device_vector_view<const MatrixT, IdxT>(d_stencil.data(), map_length);
if (Conditional && MapTransform) {
raft::matrix::gather_if(
handle, in_view, out_view, map_view, stencil_view, pred_op, transform_op);
} else if (Conditional) {
raft::matrix::gather_if(handle, in_view, out_view, map_view, stencil_view, pred_op);
} else if (MapTransform && Inplace) {
raft::matrix::gather(handle, inout_view, map_view, params.col_batch_size, transform_op);
} else if (MapTransform) {
raft::matrix::gather(handle, in_view, map_view, out_view, transform_op);
} else if (Inplace) {
raft::matrix::gather(handle, inout_view, map_view, params.col_batch_size);
} else {
raft::matrix::gather(handle, in_view, map_view, out_view);
}
if (Inplace) {
raft::copy_async(d_out_act.data(),
d_in.data(),
map_length * params.ncols,
raft::resource::get_cuda_stream(handle));
}
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream = 0;
GatherInputs<IdxT> params;
std::vector<MatrixT> h_in, h_out, h_stencil;
std::vector<MapT> h_map;
rmm::device_uvector<MatrixT> d_in, d_out_exp, d_out_act, d_stencil;
rmm::device_uvector<MapT> d_map;
};
#define GATHER_TEST(test_type, test_name, test_inputs) \
typedef RAFT_DEPAREN(test_type) test_name; \
TEST_P(test_name, Result) \
{ \
ASSERT_TRUE( \
devArrMatch(d_out_exp.data(), d_out_act.data(), d_out_exp.size(), raft::Compare<float>())); \
} \
INSTANTIATE_TEST_CASE_P(GatherTests, test_name, ::testing::ValuesIn(test_inputs))
const std::vector<GatherInputs<int>> inputs_i32 = raft::util::itertools::product<GatherInputs<int>>(
{25, 2000}, {6, 31, 129}, {11, 999}, {2, 3, 6}, {1234ULL});
const std::vector<GatherInputs<int64_t>> inputs_i64 =
raft::util::itertools::product<GatherInputs<int64_t>>(
{25, 2000}, {6, 31, 129}, {11, 999}, {2, 3, 6}, {1234ULL});
const std::vector<GatherInputs<int>> inplace_inputs_i32 =
raft::util::itertools::product<GatherInputs<int>>(
{25, 2000}, {6, 31, 129}, {11, 999}, {0, 1, 2, 3, 6, 100}, {1234ULL});
const std::vector<GatherInputs<int64_t>> inplace_inputs_i64 =
raft::util::itertools::product<GatherInputs<int64_t>>(
{25, 2000}, {6, 31, 129}, {11, 999}, {0, 1, 2, 3, 6, 100}, {1234ULL});
GATHER_TEST((GatherTest<false, false, false, float, uint32_t, int>), GatherTestFU32I32, inputs_i32);
GATHER_TEST((GatherTest<false, true, false, float, uint32_t, int>),
GatherTransformTestFU32I32,
inputs_i32);
GATHER_TEST((GatherTest<true, false, false, float, uint32_t, int>),
GatherIfTestFU32I32,
inputs_i32);
GATHER_TEST((GatherTest<true, true, false, float, uint32_t, int>),
GatherIfTransformTestFU32I32,
inputs_i32);
GATHER_TEST((GatherTest<true, true, false, double, uint32_t, int>),
GatherIfTransformTestDU32I32,
inputs_i32);
GATHER_TEST((GatherTest<true, true, false, float, uint32_t, int64_t>),
GatherIfTransformTestFU32I64,
inputs_i64);
GATHER_TEST((GatherTest<true, true, false, float, int64_t, int64_t>),
GatherIfTransformTestFI64I64,
inputs_i64);
GATHER_TEST((GatherTest<false, false, true, float, uint32_t, int>),
GatherInplaceTestFU32I32,
inplace_inputs_i32);
GATHER_TEST((GatherTest<false, false, true, float, uint32_t, int64_t>),
GatherInplaceTestFU32I64,
inplace_inputs_i64);
GATHER_TEST((GatherTest<false, false, true, float, int64_t, int64_t>),
GatherInplaceTestFI64I64,
inplace_inputs_i64);
} // end namespace raft | 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/matrix/argmin.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <cstdint>
#include <gtest/gtest.h>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/argmin.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace matrix {
template <typename T, typename IdxT>
struct ArgMinInputs {
std::vector<T> input_matrix;
std::vector<IdxT> output_matrix;
std::size_t n_rows;
std::size_t n_cols;
};
template <typename T, typename IdxT>
::std::ostream& operator<<(::std::ostream& os, const ArgMinInputs<T, IdxT>& dims)
{
return os;
}
template <typename T, typename IdxT>
class ArgMinTest : public ::testing::TestWithParam<ArgMinInputs<T, IdxT>> {
public:
ArgMinTest()
: params(::testing::TestWithParam<ArgMinInputs<T, IdxT>>::GetParam()),
input(raft::make_device_matrix<T, std::uint32_t, row_major>(
handle, params.n_rows, params.n_cols)),
output(raft::make_device_vector<IdxT, std::uint32_t>(handle, params.n_rows)),
expected(raft::make_device_vector<IdxT, std::uint32_t>(handle, params.n_rows))
{
raft::update_device(input.data_handle(),
params.input_matrix.data(),
params.input_matrix.size(),
resource::get_cuda_stream(handle));
raft::update_device(expected.data_handle(),
params.output_matrix.data(),
params.output_matrix.size(),
resource::get_cuda_stream(handle));
auto input_const_view = raft::make_device_matrix_view<const T, std::uint32_t, row_major>(
input.data_handle(), input.extent(0), input.extent(1));
raft::matrix::argmin(handle, input_const_view, output.view());
resource::sync_stream(handle);
}
protected:
raft::resources handle;
ArgMinInputs<T, IdxT> params;
raft::device_matrix<T, std::uint32_t, row_major> input;
raft::device_vector<IdxT, std::uint32_t> output;
raft::device_vector<IdxT, std::uint32_t> expected;
};
const std::vector<ArgMinInputs<float, int>> inputsf = {
{{0.1f, 0.2f, 0.3f, 0.4f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.5f, 0.0f}, {0, 3, 3}, 3, 4}};
const std::vector<ArgMinInputs<double, int>> inputsd = {
{{0.1, 0.2, 0.3, 0.4, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.5, 0.0}, {0, 3, 3}, 3, 4}};
typedef ArgMinTest<float, int> ArgMinTestF;
TEST_P(ArgMinTestF, Result)
{
ASSERT_TRUE(devArrMatch(expected.data_handle(),
output.data_handle(),
params.n_rows,
Compare<int>(),
resource::get_cuda_stream(handle)));
}
typedef ArgMinTest<double, int> ArgMinTestD;
TEST_P(ArgMinTestD, Result)
{
ASSERT_TRUE(devArrMatch(expected.data_handle(),
output.data_handle(),
params.n_rows,
Compare<int>(),
resource::get_cuda_stream(handle)));
}
INSTANTIATE_TEST_SUITE_P(ArgMinTest, ArgMinTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(ArgMinTest, ArgMinTestD, ::testing::ValuesIn(inputsd));
} // namespace matrix
} // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/matrix/triangular.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/init.cuh>
#include <raft/matrix/triangular.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace matrix {
template <typename T>
struct TriangularInputs {
int rows, cols;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const TriangularInputs<T>& I)
{
os << "{ " << I.rows << ", " << I.cols << ", " << I.seed << '}' << std::endl;
return os;
}
// triangular reference test
template <typename Type>
void naive_triangular(std::vector<Type>& in, std::vector<Type>& out, int rows, int cols)
{
auto k = std::min(rows, cols);
for (int i = 0; i < k; ++i) {
for (int j = 0; j <= i; ++j) {
auto index = i * rows + j;
out[i * k + j] = in[index];
}
}
}
template <typename T>
class TriangularTest : public ::testing::TestWithParam<TriangularInputs<T>> {
public:
TriangularTest()
: params(::testing::TestWithParam<TriangularInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
data(params.rows * params.cols, stream)
{
}
void SetUp() override
{
std::random_device rd;
std::default_random_engine dre(rd());
raft::random::RngState r(params.seed);
int rows = params.rows, cols = params.cols, len = rows * cols;
auto k = std::min(rows, cols);
rmm::device_uvector<T> d_act_result(len, stream);
std::vector<T> h_data(len);
act_result.resize(k * k);
exp_result.resize(k * k);
uniform(handle, r, data.data(), len, T(-10.0), T(10.0));
raft::update_host(h_data.data(), data.data(), len, stream);
raft::matrix::fill(
handle,
raft::make_device_matrix_view<T, int, raft::col_major>(d_act_result.data(), k, k),
T(0));
upper_triangular(
handle,
raft::make_device_matrix_view<const T, int, raft::col_major>(data.data(), rows, cols),
raft::make_device_matrix_view<T, int, raft::col_major>(d_act_result.data(), k, k));
naive_triangular(h_data, exp_result, rows, cols);
raft::update_host(act_result.data(), d_act_result.data(), k * k, stream);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
TriangularInputs<T> params;
rmm::device_uvector<T> data;
std::vector<T> exp_result, act_result;
};
///// Row- and column-wise tests
const std::vector<TriangularInputs<float>> inputsf = {{4, 4, 1234ULL},
{2, 64, 1234ULL},
{64, 512, 1234ULL},
{64, 1024, 1234ULL},
{256, 1024, 1234ULL},
{512, 512, 1234ULL},
{1024, 32, 1234ULL},
{1024, 128, 1234ULL},
{1024, 256, 1234ULL}};
const std::vector<TriangularInputs<double>> inputsd = {{4, 4, 1234ULL},
{2, 64, 1234ULL},
{64, 512, 1234ULL},
{64, 1024, 1234ULL},
{256, 1024, 1234ULL},
{512, 512, 1234ULL},
{1024, 32, 1234ULL},
{1024, 128, 1234ULL},
{1024, 256, 1234ULL}};
typedef TriangularTest<float> TriangularTestF;
TEST_P(TriangularTestF, Result)
{
ASSERT_TRUE(hostVecMatch(exp_result, act_result, raft::Compare<float>()));
}
typedef TriangularTest<double> TriangularTestD;
TEST_P(TriangularTestD, Result)
{
ASSERT_TRUE(hostVecMatch(exp_result, act_result, raft::Compare<double>()));
}
INSTANTIATE_TEST_CASE_P(TriangularTests, TriangularTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(TriangularTests, TriangularTestD, ::testing::ValuesIn(inputsd));
} // end namespace matrix
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/matrix/select_large_k.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "select_k.cuh"
namespace raft::matrix {
auto inputs_random_largek = testing::Values(select::params{100, 100000, 1000, true},
select::params{100, 100000, 2000, false},
select::params{100, 100000, 100000, true, false},
select::params{100, 100000, 2048, false},
select::params{100, 100000, 1237, true});
using ReferencedRandomFloatSizeT =
SelectK<float, int64_t, with_ref<select::Algo::kRadix8bits>::params_random>;
TEST_P(ReferencedRandomFloatSizeT, LargeK) { run(); } // NOLINT
INSTANTIATE_TEST_CASE_P(SelectK, // NOLINT
ReferencedRandomFloatSizeT,
testing::Combine(inputs_random_largek,
testing::Values(select::Algo::kRadix11bits,
select::Algo::kRadix11bitsExtraPass)));
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/matrix/linewise_op.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../linalg/matrix_vector_op.cuh"
#include "../test_utils.cuh"
#include <cuda_profiler_api.h>
#include <gtest/gtest.h>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/nvtx.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/matrix/linewise_op.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace matrix {
constexpr std::size_t PTR_PADDING = 128;
struct LinewiseTestParams {
double tolerance;
std::size_t workSizeBytes;
uint64_t seed;
bool checkCorrectness;
int inAlignOffset;
int outAlignOffset;
};
template <typename T, typename I, typename ParamsReader>
struct LinewiseTest : public ::testing::TestWithParam<typename ParamsReader::Params> {
const raft::resources handle;
const LinewiseTestParams params;
rmm::cuda_stream_view stream;
LinewiseTest()
: testing::TestWithParam<typename ParamsReader::Params>(),
params(
ParamsReader::read(::testing::TestWithParam<typename ParamsReader::Params>::GetParam())),
handle(),
stream(resource::get_cuda_stream(handle))
{
}
template <typename layout>
void runLinewiseSum(T* out, const T* in, const I lineLen, const I nLines, const T* vec)
{
constexpr auto rowmajor = std::is_same_v<layout, row_major>;
I m = rowmajor ? lineLen : nLines;
I n = rowmajor ? nLines : lineLen;
auto in_view = raft::make_device_matrix_view<const T, I, layout>(in, n, m);
auto out_view = raft::make_device_matrix_view<T, I, layout>(out, n, m);
auto vec_view = raft::make_device_vector_view<const T>(vec, lineLen);
matrix::linewise_op(
handle, in_view, out_view, raft::is_row_major(in_view), raft::add_op{}, vec_view);
}
template <typename layout>
void runLinewiseSum(
T* out, const T* in, const I lineLen, const I nLines, const T* vec1, const T* vec2)
{
auto f = [] __device__(T a, T b, T c) -> T { return a + b + c; };
constexpr auto rowmajor = std::is_same_v<layout, row_major>;
I m = rowmajor ? lineLen : nLines;
I n = rowmajor ? nLines : lineLen;
auto in_view = raft::make_device_matrix_view<const T, I, layout>(in, n, m);
auto out_view = raft::make_device_matrix_view<T, I, layout>(out, n, m);
auto vec1_view = raft::make_device_vector_view<const T, I>(vec1, lineLen);
auto vec2_view = raft::make_device_vector_view<const T, I>(vec2, lineLen);
matrix::linewise_op(
handle, in_view, out_view, raft::is_row_major(in_view), f, vec1_view, vec2_view);
}
rmm::device_uvector<T> genData(size_t workSizeBytes)
{
raft::random::RngState r(params.seed);
const std::size_t workSizeElems = workSizeBytes / sizeof(T);
rmm::device_uvector<T> blob(workSizeElems, stream);
uniform(handle, r, blob.data(), workSizeElems, T(-1.0), T(1.0));
return blob;
}
template <typename layout>
void runLinewiseSumPadded(raft::device_aligned_matrix_view<T, I, layout> out,
raft::device_aligned_matrix_view<const T, I, layout> in,
const I lineLen,
const I nLines,
const bool alongLines,
const T* vec)
{
auto vec_view = raft::make_device_vector_view<const T, I>(vec, alongLines ? lineLen : nLines);
matrix::linewise_op(handle, in, out, alongLines, raft::add_op{}, vec_view);
}
/**
* Suggest multiple versions of matrix dimensions (n, m), such that
*
* (2 * n * m + numVectors * m + minUnused) * sizeof(T) <= workSize.
*
* This way I know I can create two matrices and numVectors vectors of size m,
* such that they fit into the allocated workSet.
*/
std::vector<std::tuple<I, I>> suggestDimensions(I numVectors)
{
const std::size_t workSizeElems = params.workSizeBytes / sizeof(T);
std::vector<std::tuple<I, I>> out;
const double b = double(numVectors);
const double s = double(workSizeElems) - double(PTR_PADDING * 2 * (2 + b));
double squareN = 0.25 * (sqrt(8.0 * s + b * b) - b);
auto solveForN = [s, b](I m) -> double { return (s - b * double(m)) / double(2 * m); };
auto solveForM = [s, b](I n) -> double { return s / double(2 * n + b); };
auto addIfMakesSense = [&out](double x, double y) {
if (x <= 0 || y <= 0) return;
I n = I(floor(x));
I m = I(floor(y));
if (n > 0 && m > 0) out.push_back(std::make_tuple(n, m));
};
std::vector<double> sizes = {15, 16, 17, 256, 257, 263, 1024};
addIfMakesSense(squareN, squareN);
for (I k : sizes) {
addIfMakesSense(solveForN(k), k);
addIfMakesSense(k, solveForM(k));
}
return out;
}
std::tuple<T*, const T*, const T*, const T*> assignSafePtrs(rmm::device_uvector<T>& blob,
I n,
I m)
{
typedef raft::Pow2<PTR_PADDING> Align;
T* out = Align::roundUp(blob.data()) + params.outAlignOffset;
const T* in =
const_cast<const T*>(Align::roundUp(out + n * m + PTR_PADDING)) + params.inAlignOffset;
const T* vec1 = Align::roundUp(in + n * m + PTR_PADDING);
const T* vec2 = Align::roundUp(vec1 + m + PTR_PADDING);
ASSERT(blob.data() + blob.size() >= vec2 + PTR_PADDING,
"Failed to allocate pointers: the workset is not big enough.");
return std::make_tuple(out, in, vec1, vec2);
}
testing::AssertionResult run(std::vector<std::tuple<I, I>>&& dims, rmm::device_uvector<T>&& blob)
{
rmm::device_uvector<T> blob_val(params.checkCorrectness ? blob.size() / 2 : 0, stream);
stream.synchronize();
cudaProfilerStart();
testing::AssertionResult r = testing::AssertionSuccess();
for (auto [n, m] : dims) {
if (!r) break;
auto [out, in, vec1, vec2] = assignSafePtrs(blob, n, m);
common::nvtx::range dims_scope("Dims-%zu-%zu", std::size_t(n), std::size_t(m));
for (auto alongRows : ::testing::Bool()) {
common::nvtx::range dir_scope(alongRows ? "alongRows" : "acrossRows");
auto lineLen = alongRows ? m : n;
auto nLines = alongRows ? n : m;
{
{
common::nvtx::range vecs_scope("one vec");
if (alongRows) {
runLinewiseSum<raft::row_major>(out, in, lineLen, nLines, vec1);
} else {
runLinewiseSum<raft::col_major>(out, in, lineLen, nLines, vec1);
}
}
if (params.checkCorrectness) {
linalg::naiveMatVec(
blob_val.data(), in, vec1, lineLen, nLines, true, alongRows, T(1), stream);
r = devArrMatch(blob_val.data(), out, n * m, CompareApprox<T>(params.tolerance))
<< " " << (alongRows ? "alongRows" : "acrossRows")
<< " with one vec; lineLen: " << lineLen << "; nLines " << nLines;
if (!r) break;
}
{
common::nvtx::range vecs_scope("two vecs");
if (alongRows) {
runLinewiseSum<raft::row_major>(out, in, lineLen, nLines, vec1, vec2);
} else {
runLinewiseSum<raft::col_major>(out, in, lineLen, nLines, vec1, vec2);
}
}
if (params.checkCorrectness) {
linalg::naiveMatVec(
blob_val.data(), in, vec1, vec2, lineLen, nLines, true, alongRows, T(1), stream);
r = devArrMatch(blob_val.data(), out, n * m, CompareApprox<T>(params.tolerance))
<< " " << (alongRows ? "alongRows" : "acrossRows")
<< " with two vecs; lineLen: " << lineLen << "; nLines " << nLines;
if (!r) break;
}
}
}
}
cudaProfilerStop();
return r;
}
testing::AssertionResult runWithPaddedSpan(std::vector<std::tuple<I, I>>&& dims,
rmm::device_uvector<T>&& blob)
{
rmm::device_uvector<T> blob_val(params.checkCorrectness ? blob.size() / 2 : 0, stream);
stream.synchronize();
cudaProfilerStart();
testing::AssertionResult r = testing::AssertionSuccess();
for (auto alongRows : ::testing::Bool()) {
for (auto [n, m] : dims) {
if (!r) break;
// take dense testdata
auto [out, in, vec1, vec2] = assignSafePtrs(blob, n, m);
common::nvtx::range dims_scope("Dims-%zu-%zu", std::size_t(n), std::size_t(m));
common::nvtx::range dir_scope(alongRows ? "alongRows" : "acrossRows");
auto lineLen = alongRows ? m : n;
auto nLines = alongRows ? n : m;
// create a padded span based on testdata (just for functional testing)
size_t matrix_size_padded;
if (alongRows) {
auto extents = matrix_extent<I>{n, m};
typename raft::layout_right_padded<T>::mapping<matrix_extent<I>> layout{extents};
matrix_size_padded = layout.required_span_size();
} else {
auto extents = matrix_extent<I>{n, m};
typename raft::layout_left_padded<T>::mapping<matrix_extent<I>> layout{extents};
matrix_size_padded = layout.required_span_size();
}
rmm::device_uvector<T> blob_in(matrix_size_padded, stream);
rmm::device_uvector<T> blob_out(matrix_size_padded, stream);
{
auto in2 = in;
// actual testrun
common::nvtx::range vecs_scope("one vec");
if (alongRows) {
auto inSpan = make_device_aligned_matrix_view<T, I, raft::layout_right_padded<T>>(
blob_in.data(), n, m);
auto outSpan = make_device_aligned_matrix_view<T, I, raft::layout_right_padded<T>>(
blob_out.data(), n, m);
// prep padded input data
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator(0ul),
nLines * lineLen,
[inSpan, in2, lineLen] __device__(size_t i) {
inSpan(i / lineLen, i % lineLen) = in2[i];
});
auto inSpanConst =
make_device_aligned_matrix_view<const T, I, raft::layout_right_padded<T>>(
blob_in.data(), n, m);
runLinewiseSumPadded<raft::layout_right_padded<T>>(
outSpan, inSpanConst, lineLen, nLines, alongRows, vec1);
if (params.checkCorrectness) {
runLinewiseSum<raft::row_major>(out, in, lineLen, nLines, vec1);
auto out_dense = blob_val.data();
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator(0ul),
nLines * lineLen,
[outSpan, out_dense, lineLen] __device__(size_t i) {
out_dense[i] = outSpan(i / lineLen, i % lineLen);
});
r = devArrMatch(out_dense, out, n * m, CompareApprox<T>(params.tolerance))
<< " " << (alongRows ? "alongRows" : "acrossRows")
<< " with one vec; lineLen: " << lineLen << "; nLines " << nLines;
if (!r) break;
}
} else {
auto inSpan = make_device_aligned_matrix_view<T, I, raft::layout_left_padded<T>>(
blob_in.data(), n, m);
auto outSpan = make_device_aligned_matrix_view<T, I, raft::layout_left_padded<T>>(
blob_out.data(), n, m);
// prep padded input data
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator(0ul),
nLines * lineLen,
[inSpan, in2, lineLen] __device__(size_t i) {
inSpan(i % lineLen, i / lineLen) = in2[i];
});
auto inSpanConst =
make_device_aligned_matrix_view<const T, I, raft::layout_left_padded<T>>(
blob_in.data(), n, m);
runLinewiseSumPadded<raft::layout_left_padded<T>>(
outSpan, inSpanConst, lineLen, nLines, alongRows, vec1);
if (params.checkCorrectness) {
runLinewiseSum<raft::col_major>(out, in, lineLen, nLines, vec1);
auto out_dense = blob_val.data();
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator(0ul),
nLines * lineLen,
[outSpan, out_dense, lineLen] __device__(size_t i) {
out_dense[i] = outSpan(i % lineLen, i / lineLen);
});
r = devArrMatch(out_dense, out, n * m, CompareApprox<T>(params.tolerance))
<< " " << (alongRows ? "alongRows" : "acrossRows")
<< " with one vec; lineLen: " << lineLen << "; nLines " << nLines;
if (!r) break;
}
}
}
}
}
cudaProfilerStop();
return r;
}
testing::AssertionResult run()
{
return run(suggestDimensions(2), genData(params.workSizeBytes));
}
testing::AssertionResult runWithPaddedSpan()
{
return runWithPaddedSpan(suggestDimensions(2), genData(params.workSizeBytes));
}
testing::AssertionResult runEdgeCases()
{
std::vector<I> sizes = {1, 2, 3, 4, 7, 16};
std::vector<std::tuple<I, I>> dims;
for (auto m : sizes) {
for (auto n : sizes) {
dims.push_back(std::make_tuple(m, n));
}
}
return run(std::move(dims), genData(1024 * 1024));
}
};
#define TEST_IT(fun, TestClass, ElemType, IndexType) \
typedef LinewiseTest<ElemType, IndexType, TestClass> TestClass##_##ElemType##_##IndexType; \
TEST_P(TestClass##_##ElemType##_##IndexType, fun) { ASSERT_TRUE(fun()); } \
INSTANTIATE_TEST_SUITE_P(LinewiseOp, TestClass##_##ElemType##_##IndexType, TestClass##Params)
#define TEST_IT_SPAN(fun, TestClass, ElemType, IndexType) \
typedef LinewiseTest<ElemType, IndexType, TestClass> TestClass##Span_##ElemType##_##IndexType; \
TEST_P(TestClass##Span_##ElemType##_##IndexType, fun) { ASSERT_TRUE(fun()); } \
INSTANTIATE_TEST_SUITE_P(LinewiseOpSpan, TestClass##Span_##ElemType##_##IndexType, SpanParams)
auto SpanParams = ::testing::Combine(::testing::Values(0), ::testing::Values(0));
auto TinyParams = ::testing::Combine(::testing::Values(0, 1, 2, 4), ::testing::Values(0, 1, 2, 3));
struct Tiny {
typedef std::tuple<int, int> Params;
static LinewiseTestParams read(Params ps)
{
return {/** .tolerance */ 0.00001,
/** .workSizeBytes */ 0 /* not used anyway */,
/** .seed */ 42ULL,
/** .checkCorrectness */ true,
/** .inAlignOffset */ std::get<0>(ps),
/** .outAlignOffset */ std::get<1>(ps)};
}
};
auto MegabyteParams = TinyParams;
struct Megabyte {
typedef std::tuple<int, int> Params;
static LinewiseTestParams read(Params ps)
{
return {/** .tolerance */ 0.00001,
/** .workSizeBytes */ 1024 * 1024,
/** .seed */ 42ULL,
/** .checkCorrectness */ true,
/** .inAlignOffset */ std::get<0>(ps),
/** .outAlignOffset */ std::get<1>(ps)};
}
};
auto GigabyteParams = ::testing::Combine(::testing::Values(0, 1, 2), ::testing::Values(0, 1, 2));
struct Gigabyte {
typedef std::tuple<int, int> Params;
static LinewiseTestParams read(Params ps)
{
return {/** .tolerance */ 0.00001,
/** .workSizeBytes */ 1024 * 1024 * 1024,
/** .seed */ 42ULL,
/** .checkCorrectness */ false,
/** .inAlignOffset */ std::get<0>(ps),
/** .outAlignOffset */ std::get<1>(ps)};
}
};
auto TenGigsParams = GigabyteParams;
struct TenGigs {
typedef std::tuple<int, int> Params;
static LinewiseTestParams read(Params ps)
{
return {/** .tolerance */ 0.00001,
/** .workSizeBytes */ 10ULL * 1024ULL * 1024ULL * 1024ULL,
/** .seed */ 42ULL,
/** .checkCorrectness */ false,
/** .inAlignOffset */ std::get<0>(ps),
/** .outAlignOffset */ std::get<1>(ps)};
}
};
TEST_IT(runEdgeCases, Tiny, float, int);
TEST_IT(runEdgeCases, Tiny, double, int);
TEST_IT(run, Megabyte, float, int);
TEST_IT(run, Megabyte, double, int);
TEST_IT(run, Gigabyte, float, int);
TEST_IT(run, Gigabyte, double, int);
TEST_IT(run, TenGigs, float, uint64_t);
TEST_IT(run, TenGigs, double, uint64_t);
TEST_IT_SPAN(runWithPaddedSpan, Megabyte, float, int);
TEST_IT_SPAN(runWithPaddedSpan, Megabyte, double, int);
TEST_IT_SPAN(runWithPaddedSpan, Gigabyte, float, int);
TEST_IT_SPAN(runWithPaddedSpan, Gigabyte, double, int);
} // namespace matrix
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/matrix/math.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/matrix/power.cuh>
#include <raft/matrix/ratio.cuh>
#include <raft/matrix/reciprocal.cuh>
#include <raft/matrix/sign_flip.cuh>
#include <raft/matrix/sqrt.cuh>
#include <raft/matrix/threshold.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace matrix {
template <typename Type>
RAFT_KERNEL naivePowerKernel(Type* in, Type* out, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) { out[idx] = in[idx] * in[idx]; }
}
template <typename Type>
void naivePower(Type* in, Type* out, int len, cudaStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(len, TPB);
naivePowerKernel<Type><<<nblks, TPB, 0, stream>>>(in, out, len);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename Type>
RAFT_KERNEL naiveSqrtKernel(Type* in, Type* out, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) { out[idx] = raft::sqrt(in[idx]); }
}
template <typename Type>
void naiveSqrt(Type* in, Type* out, int len, cudaStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(len, TPB);
naiveSqrtKernel<Type><<<nblks, TPB, 0, stream>>>(in, out, len);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename Type>
RAFT_KERNEL naiveSignFlipKernel(Type* in, Type* out, int rowCount, int colCount)
{
int d_i = blockIdx.x * rowCount;
int end = d_i + rowCount;
if (blockIdx.x < colCount) {
Type max = 0.0;
int max_index = 0;
for (int i = d_i; i < end; i++) {
Type val = in[i];
if (val < 0.0) { val = -val; }
if (val > max) {
max = val;
max_index = i;
}
}
for (int i = d_i; i < end; i++) {
if (in[max_index] < 0.0) {
out[i] = -in[i];
} else {
out[i] = in[i];
}
}
}
__syncthreads();
}
template <typename Type>
void naiveSignFlip(Type* in, Type* out, int rowCount, int colCount, cudaStream_t stream)
{
naiveSignFlipKernel<Type><<<colCount, 1, 0, stream>>>(in, out, rowCount, colCount);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename T>
struct MathInputs {
T tolerance;
int n_row;
int n_col;
int len;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const MathInputs<T>& dims)
{
return os;
}
template <typename T>
class MathTest : public ::testing::TestWithParam<MathInputs<T>> {
public:
MathTest()
: params(::testing::TestWithParam<MathInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in_power(params.len, stream),
out_power_ref(params.len, stream),
in_sqrt(params.len, stream),
out_sqrt_ref(params.len, stream),
in_sign_flip(params.len, stream),
out_sign_flip_ref(params.len, stream),
in_ratio(4, stream),
out_ratio_ref(4, stream),
in_recip(4, stream),
in_recip_ref(4, stream),
out_recip(4, stream),
in_smallzero(4, stream),
out_smallzero(4, stream),
out_smallzero_ref(4, stream)
{
}
protected:
void SetUp() override
{
random::RngState r(params.seed);
int len = params.len;
T in_ratio_h[4] = {1.0, 2.0, 2.0, 3.0};
update_device(in_ratio.data(), in_ratio_h, 4, stream);
T out_ratio_ref_h[4] = {0.125, 0.25, 0.25, 0.375};
update_device(out_ratio_ref.data(), out_ratio_ref_h, 4, stream);
uniform(handle, r, in_power.data(), len, T(-1.0), T(1.0));
uniform(handle, r, in_sqrt.data(), len, T(0.0), T(1.0));
// uniform(r, in_ratio, len, T(0.0), T(1.0));
uniform(handle, r, in_sign_flip.data(), len, T(-100.0), T(100.0));
naivePower(in_power.data(), out_power_ref.data(), len, stream);
auto in_power_view = raft::make_device_matrix_view<T>(in_power.data(), len, 1);
power<T>(handle, in_power_view);
naiveSqrt(in_sqrt.data(), out_sqrt_ref.data(), len, stream);
auto in_sqrt_view = raft::make_device_matrix_view(in_sqrt.data(), len, 1);
sqrt<T>(handle, in_sqrt_view);
auto in_ratio_view = raft::make_device_matrix_view<T>(in_ratio.data(), 4, 1);
ratio<T>(handle, in_ratio_view);
naiveSignFlip(
in_sign_flip.data(), out_sign_flip_ref.data(), params.n_row, params.n_col, stream);
auto in_sign_flip_view = raft::make_device_matrix_view<T, int, col_major>(
in_sign_flip.data(), params.n_row, params.n_col);
sign_flip<T>(handle, in_sign_flip_view);
// default threshold is 1e-15
std::vector<T> in_recip_h = {0.1, 0.01, -0.01, 0.1e-16};
std::vector<T> in_recip_ref_h = {10.0, 100.0, -100.0, 0.0};
update_device(in_recip.data(), in_recip_h.data(), 4, stream);
update_device(in_recip_ref.data(), in_recip_ref_h.data(), 4, stream);
T recip_scalar = T(1.0);
auto in_recip_view = raft::make_device_matrix_view<const T>(in_recip.data(), 4, 1);
auto out_recip_view = raft::make_device_matrix_view<T>(out_recip.data(), 4, 1);
// this `reciprocal()` has to go first bc next one modifies its input
reciprocal<T>(
handle, in_recip_view, out_recip_view, raft::make_host_scalar_view(&recip_scalar));
auto inout_recip_view = raft::make_device_matrix_view<T>(in_recip.data(), 4, 1);
reciprocal<T>(handle, inout_recip_view, raft::make_host_scalar_view(&recip_scalar), true);
std::vector<T> in_small_val_zero_h = {0.1, 1e-16, -1e-16, -0.1};
std::vector<T> in_small_val_zero_ref_h = {0.1, 0.0, 0.0, -0.1};
auto in_smallzero_view = raft::make_device_matrix_view<const T>(in_smallzero.data(), 4, 1);
auto inout_smallzero_view = raft::make_device_matrix_view<T>(in_smallzero.data(), 4, 1);
auto out_smallzero_view = raft::make_device_matrix_view<T>(out_smallzero.data(), 4, 1);
update_device(in_smallzero.data(), in_small_val_zero_h.data(), 4, stream);
update_device(out_smallzero_ref.data(), in_small_val_zero_ref_h.data(), 4, stream);
zero_small_values<T>(handle, in_smallzero_view, out_smallzero_view);
zero_small_values<T>(handle, inout_smallzero_view);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
MathInputs<T> params;
rmm::device_uvector<T> in_power, out_power_ref, in_sqrt, out_sqrt_ref, in_ratio, out_ratio_ref,
in_sign_flip, out_sign_flip_ref, in_recip, in_recip_ref, out_recip, in_smallzero, out_smallzero,
out_smallzero_ref;
};
const std::vector<MathInputs<float>> inputsf = {{0.00001f, 1024, 1024, 1024 * 1024, 1234ULL}};
const std::vector<MathInputs<double>> inputsd = {{0.00001, 1024, 1024, 1024 * 1024, 1234ULL}};
typedef MathTest<float> MathPowerTestF;
TEST_P(MathPowerTestF, Result)
{
ASSERT_TRUE(devArrMatch(in_power.data(),
out_power_ref.data(),
params.len,
CompareApprox<float>(params.tolerance),
stream));
}
typedef MathTest<double> MathPowerTestD;
TEST_P(MathPowerTestD, Result)
{
ASSERT_TRUE(devArrMatch(in_power.data(),
out_power_ref.data(),
params.len,
CompareApprox<double>(params.tolerance),
stream));
}
typedef MathTest<float> MathSqrtTestF;
TEST_P(MathSqrtTestF, Result)
{
ASSERT_TRUE(devArrMatch(in_sqrt.data(),
out_sqrt_ref.data(),
params.len,
CompareApprox<float>(params.tolerance),
stream));
}
typedef MathTest<double> MathSqrtTestD;
TEST_P(MathSqrtTestD, Result)
{
ASSERT_TRUE(devArrMatch(in_sqrt.data(),
out_sqrt_ref.data(),
params.len,
CompareApprox<double>(params.tolerance),
stream));
}
typedef MathTest<float> MathRatioTestF;
TEST_P(MathRatioTestF, Result)
{
ASSERT_TRUE(devArrMatch(
in_ratio.data(), out_ratio_ref.data(), 4, CompareApprox<float>(params.tolerance), stream));
}
typedef MathTest<double> MathRatioTestD;
TEST_P(MathRatioTestD, Result)
{
ASSERT_TRUE(devArrMatch(
in_ratio.data(), out_ratio_ref.data(), 4, CompareApprox<double>(params.tolerance), stream));
}
typedef MathTest<float> MathSignFlipTestF;
TEST_P(MathSignFlipTestF, Result)
{
ASSERT_TRUE(devArrMatch(in_sign_flip.data(),
out_sign_flip_ref.data(),
params.len,
CompareApprox<float>(params.tolerance),
stream));
}
typedef MathTest<double> MathSignFlipTestD;
TEST_P(MathSignFlipTestD, Result)
{
ASSERT_TRUE(devArrMatch(in_sign_flip.data(),
out_sign_flip_ref.data(),
params.len,
CompareApprox<double>(params.tolerance),
stream));
}
typedef MathTest<float> MathReciprocalTestF;
TEST_P(MathReciprocalTestF, Result)
{
ASSERT_TRUE(devArrMatch(
in_recip.data(), in_recip_ref.data(), 4, CompareApprox<float>(params.tolerance), stream));
// 4-th term tests `setzero=true` functionality, not present in this version of `reciprocal`.
ASSERT_TRUE(devArrMatch(
out_recip.data(), in_recip_ref.data(), 3, CompareApprox<float>(params.tolerance), stream));
}
typedef MathTest<double> MathReciprocalTestD;
TEST_P(MathReciprocalTestD, Result)
{
ASSERT_TRUE(devArrMatch(
in_recip.data(), in_recip_ref.data(), 4, CompareApprox<double>(params.tolerance), stream));
// 4-th term tests `setzero=true` functionality, not present in this version of `reciprocal`.
ASSERT_TRUE(devArrMatch(
out_recip.data(), in_recip_ref.data(), 3, CompareApprox<double>(params.tolerance), stream));
}
typedef MathTest<float> MathSetSmallZeroTestF;
TEST_P(MathSetSmallZeroTestF, Result)
{
ASSERT_TRUE(devArrMatch(in_smallzero.data(),
out_smallzero_ref.data(),
4,
CompareApprox<float>(params.tolerance),
stream));
ASSERT_TRUE(devArrMatch(out_smallzero.data(),
out_smallzero_ref.data(),
4,
CompareApprox<float>(params.tolerance),
stream));
}
typedef MathTest<double> MathSetSmallZeroTestD;
TEST_P(MathSetSmallZeroTestD, Result)
{
ASSERT_TRUE(devArrMatch(in_smallzero.data(),
out_smallzero_ref.data(),
4,
CompareApprox<double>(params.tolerance),
stream));
ASSERT_TRUE(devArrMatch(out_smallzero.data(),
out_smallzero_ref.data(),
4,
CompareApprox<double>(params.tolerance),
stream));
}
INSTANTIATE_TEST_SUITE_P(MathTests, MathPowerTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(MathTests, MathPowerTestD, ::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_SUITE_P(MathTests, MathSqrtTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(MathTests, MathSqrtTestD, ::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_SUITE_P(MathTests, MathRatioTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(MathTests, MathRatioTestD, ::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_SUITE_P(MathTests, MathSignFlipTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(MathTests, MathSignFlipTestD, ::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_SUITE_P(MathTests, MathReciprocalTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(MathTests, MathReciprocalTestD, ::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_SUITE_P(MathTests, MathSetSmallZeroTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(MathTests, MathSetSmallZeroTestD, ::testing::ValuesIn(inputsd));
} // namespace matrix
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/matrix/eye.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/diagonal.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft::matrix {
template <typename T>
struct InitInputs {
int n_row;
int n_col;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const InitInputs<T>& dims)
{
return os;
}
template <typename T>
class InitTest : public ::testing::TestWithParam<InitInputs<T>> {
public:
InitTest()
: params(::testing::TestWithParam<InitInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle))
{
}
protected:
void test_eye()
{
ASSERT_TRUE(params.n_row == 4 && params.n_col == 5);
auto eyemat_col =
raft::make_device_matrix<T, int, raft::col_major>(handle, params.n_row, params.n_col);
raft::matrix::eye(handle, eyemat_col.view());
std::vector<T> eye_exp{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0};
std::vector<T> eye_act(params.n_col * params.n_row);
raft::copy(eye_act.data(), eyemat_col.data_handle(), eye_act.size(), stream);
resource::sync_stream(handle, stream);
ASSERT_TRUE(hostVecMatch(eye_exp, eye_act, raft::Compare<T>()));
auto eyemat_row =
raft::make_device_matrix<T, int, raft::row_major>(handle, params.n_row, params.n_col);
raft::matrix::eye(handle, eyemat_row.view());
eye_exp = std::vector<T>{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0};
raft::copy(eye_act.data(), eyemat_row.data_handle(), eye_act.size(), stream);
resource::sync_stream(handle, stream);
ASSERT_TRUE(hostVecMatch(eye_exp, eye_act, raft::Compare<T>()));
}
void SetUp() override { test_eye(); }
protected:
raft::resources handle;
cudaStream_t stream;
InitInputs<T> params;
};
const std::vector<InitInputs<float>> inputsf1 = {{4, 5}};
const std::vector<InitInputs<double>> inputsd1 = {{4, 5}};
typedef InitTest<float> InitTestF;
TEST_P(InitTestF, Result) {}
typedef InitTest<double> InitTestD;
TEST_P(InitTestD, Result) {}
INSTANTIATE_TEST_SUITE_P(InitTests, InitTestF, ::testing::ValuesIn(inputsf1));
INSTANTIATE_TEST_SUITE_P(InitTests, InitTestD, ::testing::ValuesIn(inputsd1));
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/matrix/matrix.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/matrix/copy.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
namespace raft {
namespace matrix {
template <typename T>
struct MatrixInputs {
T tolerance;
int n_row;
int n_col;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const MatrixInputs<T>& dims)
{
return os;
}
template <typename T>
class MatrixTest : public ::testing::TestWithParam<MatrixInputs<T>> {
public:
MatrixTest()
: params(::testing::TestWithParam<MatrixInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in1(params.n_row * params.n_col, stream),
in2(params.n_row * params.n_col, stream),
in1_revr(params.n_row * params.n_col, stream)
{
}
protected:
void SetUp() override
{
raft::random::RngState r(params.seed);
int len = params.n_row * params.n_col;
uniform(handle, r, in1.data(), len, T(-1.0), T(1.0));
auto in1_view = raft::make_device_matrix_view<const T, int, col_major>(
in1.data(), params.n_row, params.n_col);
auto in2_view =
raft::make_device_matrix_view<T, int, col_major>(in2.data(), params.n_row, params.n_col);
copy<T, int>(handle, in1_view, in2_view);
// copy(in1, in1_revr, params.n_row, params.n_col);
// colReverse(in1_revr, params.n_row, params.n_col);
rmm::device_uvector<T> outTrunc(6, stream);
auto out_trunc_view = raft::make_device_matrix_view<T, int, col_major>(outTrunc.data(), 3, 2);
trunc_zero_origin<T, int>(handle, in1_view, out_trunc_view);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
MatrixInputs<T> params;
rmm::device_uvector<T> in1, in2, in1_revr;
};
const std::vector<MatrixInputs<float>> inputsf2 = {{0.000001f, 4, 4, 1234ULL}};
const std::vector<MatrixInputs<double>> inputsd2 = {{0.00000001, 4, 4, 1234ULL}};
typedef MatrixTest<float> MatrixTestF;
TEST_P(MatrixTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(in1.data(),
in2.data(),
params.n_row * params.n_col,
raft::CompareApprox<float>(params.tolerance),
stream));
}
typedef MatrixTest<double> MatrixTestD;
TEST_P(MatrixTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(in1.data(),
in2.data(),
params.n_row * params.n_col,
raft::CompareApprox<double>(params.tolerance),
stream));
}
INSTANTIATE_TEST_SUITE_P(MatrixTests, MatrixTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(MatrixTests, MatrixTestD, ::testing::ValuesIn(inputsd2));
template <typename T>
class MatrixCopyRowsTest : public ::testing::Test {
using math_t = typename std::tuple_element<0, T>::type;
using idx_t = typename std::tuple_element<1, T>::type;
using idx_array_t = typename std::tuple_element<2, T>::type;
protected:
MatrixCopyRowsTest()
: stream(resource::get_cuda_stream(handle)),
input(n_cols * n_rows, resource::get_cuda_stream(handle)),
indices(n_selected, resource::get_cuda_stream(handle)),
output(n_cols * n_selected, resource::get_cuda_stream(handle))
{
raft::update_device(indices.data(), indices_host, n_selected, stream);
// Init input array
thrust::counting_iterator<idx_t> first(0);
thrust::device_ptr<math_t> ptr(input.data());
thrust::copy(resource::get_thrust_policy(handle), first, first + n_cols * n_rows, ptr);
}
void testCopyRows()
{
auto input_view = raft::make_device_matrix_view<const math_t, idx_array_t, col_major>(
input.data(), n_rows, n_cols);
auto output_view = raft::make_device_matrix_view<math_t, idx_array_t, col_major>(
output.data(), n_selected, n_cols);
auto indices_view =
raft::make_device_vector_view<const idx_array_t, idx_array_t>(indices.data(), n_selected);
raft::matrix::copy_rows(handle, input_view, output_view, indices_view);
EXPECT_TRUE(raft::devArrMatchHost(
output_exp_colmajor, output.data(), n_selected * n_cols, raft::Compare<math_t>(), stream));
auto input_row_view = raft::make_device_matrix_view<const math_t, idx_array_t, row_major>(
input.data(), n_rows, n_cols);
auto output_row_view = raft::make_device_matrix_view<math_t, idx_array_t, row_major>(
output.data(), n_selected, n_cols);
raft::matrix::copy_rows(handle, input_row_view, output_row_view, indices_view);
EXPECT_TRUE(raft::devArrMatchHost(
output_exp_rowmajor, output.data(), n_selected * n_cols, raft::Compare<math_t>(), stream));
}
protected:
raft::resources handle;
cudaStream_t stream;
int n_rows = 10;
int n_cols = 3;
int n_selected = 5;
idx_array_t indices_host[5] = {0, 3, 4, 7, 9};
math_t output_exp_colmajor[15] = {0, 3, 4, 7, 9, 10, 13, 14, 17, 19, 20, 23, 24, 27, 29};
math_t output_exp_rowmajor[15] = {0, 1, 2, 9, 10, 11, 12, 13, 14, 21, 22, 23, 27, 28, 29};
rmm::device_uvector<math_t> input;
rmm::device_uvector<math_t> output;
rmm::device_uvector<idx_array_t> indices;
};
using TypeTuple = ::testing::Types<std::tuple<float, int, int>,
std::tuple<float, int64_t, int>,
std::tuple<double, int, int>,
std::tuple<double, int64_t, int>>;
TYPED_TEST_CASE(MatrixCopyRowsTest, TypeTuple);
TYPED_TEST(MatrixCopyRowsTest, CopyRows) { this->testCopyRows(); }
} // namespace matrix
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/matrix/reverse.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/reverse.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace matrix {
template <typename T>
struct ReverseInputs {
bool row_major, row_reverse;
int rows, cols;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const ReverseInputs<T>& I)
{
os << "{ " << I.row_major << ", " << I.row_reverse << ", " << I.rows << ", " << I.cols << ", "
<< I.seed << '}' << std::endl;
return os;
}
// col-reverse reference test
template <typename Type>
void naive_col_reverse(std::vector<Type>& data, int rows, int cols, bool row_major)
{
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols / 2; ++j) {
auto index_in = row_major ? i * cols + j : i + j * rows;
auto index_out = row_major ? i * cols + (cols - j - 1) : i + (cols - j - 1) * rows;
auto tmp = data[index_in];
data[index_in] = data[index_out];
data[index_out] = tmp;
}
}
}
// row-reverse reference test
template <typename Type>
void naive_row_reverse(std::vector<Type>& data, int rows, int cols, bool row_major)
{
for (int i = 0; i < rows / 2; ++i) {
for (int j = 0; j < cols; ++j) {
auto index_in = row_major ? i * cols + j : i + j * rows;
auto index_out = row_major ? (rows - i - 1) * cols + j : (rows - i - 1) + j * rows;
auto tmp = data[index_in];
data[index_in] = data[index_out];
data[index_out] = tmp;
}
}
}
template <typename T>
class ReverseTest : public ::testing::TestWithParam<ReverseInputs<T>> {
public:
ReverseTest()
: params(::testing::TestWithParam<ReverseInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
data(params.rows * params.cols, stream)
{
}
void SetUp() override
{
std::random_device rd;
std::default_random_engine dre(rd());
raft::random::RngState r(params.seed);
int rows = params.rows, cols = params.cols, len = rows * cols;
act_result.resize(len);
exp_result.resize(len);
uniform(handle, r, data.data(), len, T(-10.0), T(10.0));
raft::update_host(exp_result.data(), data.data(), len, stream);
auto input_col_major =
raft::make_device_matrix_view<T, int, raft::col_major>(data.data(), rows, cols);
auto input_row_major =
raft::make_device_matrix_view<T, int, raft::row_major>(data.data(), rows, cols);
if (params.row_major) {
if (params.row_reverse) {
row_reverse(handle, input_row_major);
naive_row_reverse(exp_result, rows, cols, params.row_major);
} else {
col_reverse(handle, input_row_major);
naive_col_reverse(exp_result, rows, cols, params.row_major);
}
} else {
if (params.row_reverse) {
row_reverse(handle, input_col_major);
naive_row_reverse(exp_result, rows, cols, params.row_major);
} else {
col_reverse(handle, input_col_major);
naive_col_reverse(exp_result, rows, cols, params.row_major);
}
}
raft::update_host(act_result.data(), data.data(), len, stream);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
ReverseInputs<T> params;
rmm::device_uvector<T> data;
std::vector<T> exp_result, act_result;
};
///// Row- and column-wise tests
const std::vector<ReverseInputs<float>> inputsf = {{true, true, 4, 4, 1234ULL},
{true, true, 2, 12, 1234ULL},
{true, false, 2, 12, 1234ULL},
{true, false, 2, 64, 1234ULL},
{true, true, 64, 512, 1234ULL},
{true, false, 64, 1024, 1234ULL},
{true, true, 128, 1024, 1234ULL},
{true, false, 256, 1024, 1234ULL},
{false, true, 512, 512, 1234ULL},
{false, false, 1024, 32, 1234ULL},
{false, true, 1024, 64, 1234ULL},
{false, false, 1024, 128, 1234ULL},
{false, true, 1024, 256, 1234ULL}};
const std::vector<ReverseInputs<double>> inputsd = {{true, true, 4, 4, 1234ULL},
{true, true, 2, 12, 1234ULL},
{true, false, 2, 12, 1234ULL},
{true, false, 2, 64, 1234ULL},
{true, true, 64, 512, 1234ULL},
{true, false, 64, 1024, 1234ULL},
{true, true, 128, 1024, 1234ULL},
{true, false, 256, 1024, 1234ULL},
{false, true, 512, 512, 1234ULL},
{false, false, 1024, 32, 1234ULL},
{false, true, 1024, 64, 1234ULL},
{false, false, 1024, 128, 1234ULL},
{false, true, 1024, 256, 1234ULL}};
typedef ReverseTest<float> ReverseTestF;
TEST_P(ReverseTestF, Result)
{
ASSERT_TRUE(hostVecMatch(exp_result, act_result, raft::Compare<float>()));
}
typedef ReverseTest<double> ReverseTestD;
TEST_P(ReverseTestD, Result)
{
ASSERT_TRUE(hostVecMatch(exp_result, act_result, raft::Compare<double>()));
}
INSTANTIATE_TEST_CASE_P(ReverseTests, ReverseTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(ReverseTests, ReverseTestD, ::testing::ValuesIn(inputsd));
} // end namespace matrix
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/matrix/diagonal.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/diagonal.cuh>
#include <raft/matrix/init.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace matrix {
template <typename T>
struct DiagonalInputs {
int n_rows;
int n_cols;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const DiagonalInputs<T>& dims)
{
return os;
}
template <typename T>
class DiagonalTest : public ::testing::TestWithParam<DiagonalInputs<T>> {
public:
DiagonalTest()
: params(::testing::TestWithParam<DiagonalInputs<T>>::GetParam()),
input(raft::make_device_matrix<T, std::uint32_t>(handle, params.n_rows, params.n_cols)),
diag_expected(raft::make_device_vector<T, std::uint32_t>(handle, diag_size)),
diag_actual(raft::make_device_vector<T, std::uint32_t>(handle, diag_size)),
diag_size(std::min(params.n_rows, params.n_cols))
{
T mat_fill_scalar = 1.0;
T diag_fill_scalar = 5.0;
auto input_view = raft::make_device_matrix_view<const T, std::uint32_t>(
input.data_handle(), input.extent(0), input.extent(1));
auto diag_expected_view =
raft::make_device_vector_view<const T, std::uint32_t>(diag_expected.data_handle(), diag_size);
raft::matrix::fill(
handle, input_view, input.view(), raft::make_host_scalar_view<T>(&mat_fill_scalar));
raft::matrix::fill(handle,
diag_expected_view,
diag_expected.view(),
raft::make_host_scalar_view<T>(&diag_fill_scalar));
resource::sync_stream(handle);
raft::matrix::set_diagonal(handle, diag_expected_view, input.view());
resource::sync_stream(handle);
raft::matrix::get_diagonal(handle, input_view, diag_actual.view());
resource::sync_stream(handle);
}
protected:
raft::resources handle;
DiagonalInputs<T> params;
int diag_size;
raft::device_matrix<T, std::uint32_t> input;
raft::device_vector<T, std::uint32_t> diag_expected;
raft::device_vector<T, std::uint32_t> diag_actual;
};
const std::vector<DiagonalInputs<float>> inputsf = {{4, 4}, {4, 10}, {10, 4}};
const std::vector<DiagonalInputs<double>> inputsd = {{4, 4}, {4, 10}, {10, 4}};
typedef DiagonalTest<float> DiagonalTestF;
TEST_P(DiagonalTestF, Result)
{
ASSERT_TRUE(devArrMatch(diag_expected.data_handle(),
diag_actual.data_handle(),
diag_size,
Compare<float>(),
resource::get_cuda_stream(handle)));
}
typedef DiagonalTest<double> DiagonalTestD;
TEST_P(DiagonalTestD, Result)
{
ASSERT_TRUE(devArrMatch(diag_expected.data_handle(),
diag_actual.data_handle(),
diag_size,
Compare<double>(),
resource::get_cuda_stream(handle)));
}
INSTANTIATE_TEST_SUITE_P(DiagonalTest, DiagonalTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(DiagonalTest, DiagonalTestD, ::testing::ValuesIn(inputsd));
} // namespace matrix
} // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/test | rapidsai_public_repos/raft/cpp/test/cluster/kmeans_find_k.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.h"
#include <gtest/gtest.h>
#include <optional>
#include <raft/core/resource/cuda_stream.hpp>
#include <vector>
#include <raft/cluster/kmeans.cuh>
#include <raft/core/cudart_utils.hpp>
#include <raft/core/resources.hpp>
#include <raft/random/make_blobs.cuh>
#include <raft/util/cuda_utils.cuh>
namespace raft {
template <typename T>
struct KmeansFindKInputs {
int n_row;
int n_col;
int n_clusters;
T tol;
bool weighted;
};
template <typename T>
class KmeansFindKTest : public ::testing::TestWithParam<KmeansFindKInputs<T>> {
protected:
KmeansFindKTest()
: stream(resource::get_cuda_stream(handle)), best_k(raft::make_host_scalar<int>(0))
{
}
void basicTest()
{
testparams = ::testing::TestWithParam<KmeansFindKInputs<T>>::GetParam();
int n_samples = testparams.n_row;
int n_features = testparams.n_col;
int n_clusters = testparams.n_clusters;
auto X = raft::make_device_matrix<T, int>(handle, n_samples, n_features);
auto labels = raft::make_device_vector<int, int>(handle, n_samples);
raft::random::make_blobs<T, int>(X.data_handle(),
labels.data_handle(),
n_samples,
n_features,
n_clusters,
stream,
true,
nullptr,
nullptr,
T(.001),
false,
(T)-10.0f,
(T)10.0f,
(uint64_t)1234);
auto inertia = raft::make_host_scalar<T>(0);
auto n_iter = raft::make_host_scalar<int>(0);
auto X_view =
raft::make_device_matrix_view<const T, int>(X.data_handle(), X.extent(0), X.extent(1));
raft::cluster::kmeans::find_k<int, T>(
handle, X_view, best_k.view(), inertia.view(), n_iter.view(), n_clusters);
resource::sync_stream(handle, stream);
}
void SetUp() override { basicTest(); }
protected:
raft::resources handle;
cudaStream_t stream;
KmeansFindKInputs<T> testparams;
raft::host_scalar<int> best_k;
};
const std::vector<KmeansFindKInputs<float>> inputsf2 = {{1000, 32, 8, 0.001f, true},
{1000, 32, 8, 0.001f, false},
{1000, 100, 20, 0.001f, true},
{1000, 100, 20, 0.001f, false},
{10000, 32, 10, 0.001f, true},
{10000, 32, 10, 0.001f, false},
{10000, 100, 50, 0.001f, true},
{10000, 100, 50, 0.001f, false},
{10000, 500, 100, 0.001f, true},
{10000, 500, 100, 0.001f, false}};
const std::vector<KmeansFindKInputs<double>> inputsd2 = {{1000, 32, 5, 0.0001, true},
{1000, 32, 5, 0.0001, false},
{1000, 100, 20, 0.0001, true},
{1000, 100, 20, 0.0001, false},
{10000, 32, 10, 0.0001, true},
{10000, 32, 10, 0.0001, false},
{10000, 100, 50, 0.0001, true},
{10000, 100, 50, 0.0001, false},
{10000, 500, 100, 0.0001, true},
{10000, 500, 100, 0.0001, false}};
typedef KmeansFindKTest<float> KmeansFindKTestF;
TEST_P(KmeansFindKTestF, Result)
{
if (best_k.view()[0] != testparams.n_clusters) {
std::cout << best_k.view()[0] << " " << testparams.n_clusters << std::endl;
}
ASSERT_TRUE(best_k.view()[0] == testparams.n_clusters);
}
typedef KmeansFindKTest<double> KmeansFindKTestD;
TEST_P(KmeansFindKTestD, Result)
{
if (best_k.view()[0] != testparams.n_clusters) {
std::cout << best_k.view()[0] << " " << testparams.n_clusters << std::endl;
}
ASSERT_TRUE(best_k.view()[0] == testparams.n_clusters);
}
INSTANTIATE_TEST_CASE_P(KmeansFindKTests, KmeansFindKTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(KmeansFindKTests, KmeansFindKTestD, ::testing::ValuesIn(inputsd2));
} // namespace raft
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.