repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/linalg/norm.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/itertools.hpp>
#include <rmm/device_uvector.hpp>
namespace raft::bench::linalg {
template <typename IdxT>
struct norm_input {
IdxT rows, cols;
};
template <typename IdxT>
inline auto operator<<(std::ostream& os, const norm_input<IdxT>& p) -> std::ostream&
{
os << p.rows << "#" << p.cols;
return os;
}
template <typename T, typename IdxT>
struct rowNorm : public fixture {
rowNorm(const norm_input<IdxT>& p) : params(p), in(p.rows * p.cols, stream), dots(p.rows, stream)
{
raft::random::RngState rng{1234};
raft::random::uniform(handle, rng, in.data(), p.rows * p.cols, (T)-10.0, (T)10.0);
}
void run_benchmark(::benchmark::State& state) override
{
std::ostringstream label_stream;
label_stream << params;
state.SetLabel(label_stream.str());
loop_on_state(state, [this]() {
auto input_view = raft::make_device_matrix_view<const T, IdxT, raft::row_major>(
in.data(), params.rows, params.cols);
auto output_view =
raft::make_device_vector_view<T, IdxT, raft::row_major>(dots.data(), params.rows);
raft::linalg::norm(handle,
input_view,
output_view,
raft::linalg::L2Norm,
raft::linalg::Apply::ALONG_ROWS,
raft::sqrt_op());
});
}
private:
norm_input<IdxT> params;
rmm::device_uvector<T> in, dots;
}; // struct rowNorm
const std::vector<norm_input<int>> norm_inputs_i32 =
raft::util::itertools::product<norm_input<int>>({10, 100, 1000, 10000, 100000},
{16, 32, 64, 128, 256, 512, 1024});
const std::vector<norm_input<int64_t>> norm_inputs_i64 =
raft::util::itertools::product<norm_input<int64_t>>({10, 100, 1000, 10000, 100000},
{16, 32, 64, 128, 256, 512, 1024});
RAFT_BENCH_REGISTER((rowNorm<float, int>), "", norm_inputs_i32);
RAFT_BENCH_REGISTER((rowNorm<double, int>), "", norm_inputs_i32);
RAFT_BENCH_REGISTER((rowNorm<float, int64_t>), "", norm_inputs_i64);
RAFT_BENCH_REGISTER((rowNorm<double, int64_t>), "", norm_inputs_i64);
} // namespace raft::bench::linalg
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/linalg/reduce.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/linalg/reduce.cuh>
#include <rmm/device_uvector.hpp>
namespace raft::bench::linalg {
struct input_size {
int rows, cols;
bool along_rows;
};
template <typename T>
struct reduce : public fixture {
reduce(bool along_rows, const input_size& p)
: input_size(p), along_rows(along_rows), in(p.rows * p.cols, stream), out(p.rows, stream)
{
}
void run_benchmark(::benchmark::State& state) override
{
loop_on_state(state, [this]() {
raft::linalg::reduce(
out.data(), in.data(), input_size.cols, input_size.rows, T(0.f), true, along_rows, stream);
});
}
private:
bool along_rows;
input_size input_size;
rmm::device_uvector<T> in, out;
}; // struct reduce
const std::vector<input_size> kInputSizes{{8 * 1024, 1024},
{1024, 8 * 1024},
{8 * 1024, 8 * 1024},
{32 * 1024, 1024},
{1024, 32 * 1024},
{32 * 1024, 32 * 1024}};
const std::vector<bool> kAlongRows{false, true};
RAFT_BENCH_REGISTER(reduce<float>, "", kAlongRows, kInputSizes);
RAFT_BENCH_REGISTER(reduce<double>, "", kAlongRows, kInputSizes);
} // namespace raft::bench::linalg
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/linalg/reduce_rows_by_key.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/linalg/reduce_rows_by_key.cuh>
#include <raft/random/rng.cuh>
#include <rmm/device_uvector.hpp>
namespace raft::bench::linalg {
struct rrbk_params {
int64_t rows, cols;
int64_t keys;
};
template <typename T, typename KeyT>
struct reduce_rows_by_key : public fixture {
reduce_rows_by_key(const rrbk_params& p)
: params(p),
in(p.rows * p.cols, stream),
out(p.keys * p.cols, stream),
keys(p.rows, stream),
workspace(p.rows, stream)
{
raft::random::RngState rng{42};
raft::random::uniformInt(handle, rng, keys.data(), p.rows, (KeyT)0, (KeyT)p.keys);
}
void run_benchmark(::benchmark::State& state) override
{
loop_on_state(state, [this]() {
raft::linalg::reduce_rows_by_key(in.data(),
params.cols,
keys.data(),
workspace.data(),
params.rows,
params.cols,
params.keys,
out.data(),
stream,
false);
});
}
protected:
rrbk_params params;
rmm::device_uvector<T> in, out;
rmm::device_uvector<KeyT> keys;
rmm::device_uvector<char> workspace;
}; // struct reduce_rows_by_key
const std::vector<rrbk_params> kInputSizes{
{10000, 128, 64},
{100000, 128, 64},
{1000000, 128, 64},
{10000000, 128, 64},
{10000, 128, 256},
{100000, 128, 256},
{1000000, 128, 256},
{10000000, 128, 256},
{10000, 128, 1024},
{100000, 128, 1024},
{1000000, 128, 1024},
{10000000, 128, 1024},
{10000, 128, 4096},
{100000, 128, 4096},
{1000000, 128, 4096},
{10000000, 128, 4096},
};
RAFT_BENCH_REGISTER((reduce_rows_by_key<float, uint32_t>), "", kInputSizes);
RAFT_BENCH_REGISTER((reduce_rows_by_key<double, uint32_t>), "", kInputSizes);
} // namespace raft::bench::linalg
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/linalg/matrix_vector_op.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/linalg/matrix_vector_op.cuh>
#include <rmm/device_uvector.hpp>
namespace raft::bench::linalg {
template <typename IdxT>
struct mat_vec_op_inputs {
IdxT rows, cols;
bool rowMajor, bcastAlongRows;
IdxT inAlignOffset, outAlignOffset;
}; // struct mat_vec_op_inputs
template <typename IdxT>
inline auto operator<<(std::ostream& os, const mat_vec_op_inputs<IdxT>& p) -> std::ostream&
{
os << p.rows << "#" << p.cols << "#" << p.rowMajor << "#" << p.bcastAlongRows << "#"
<< p.inAlignOffset << "#" << p.outAlignOffset;
return os;
}
template <typename OpT, typename T, typename IdxT>
struct mat_vec_op : public fixture {
mat_vec_op(const mat_vec_op_inputs<IdxT>& p)
: params(p),
out(p.rows * p.cols + params.outAlignOffset, stream),
in(p.rows * p.cols + params.inAlignOffset, stream),
vec1(p.bcastAlongRows ? p.cols : p.rows, stream),
vec2(p.bcastAlongRows ? p.cols : p.rows, stream)
{
}
void run_benchmark(::benchmark::State& state) override
{
std::ostringstream label_stream;
label_stream << params;
state.SetLabel(label_stream.str());
loop_on_state(state, [this]() {
if constexpr (OpT::useTwoVectors) {
raft::linalg::matrixVectorOp(out.data() + params.outAlignOffset,
in.data() + params.inAlignOffset,
vec1.data(),
vec2.data(),
params.cols,
params.rows,
params.rowMajor,
params.bcastAlongRows,
OpT{},
stream);
} else {
raft::linalg::matrixVectorOp(out.data() + params.outAlignOffset,
in.data() + params.inAlignOffset,
vec1.data(),
params.cols,
params.rows,
params.rowMajor,
params.bcastAlongRows,
OpT{},
stream);
}
});
}
private:
mat_vec_op_inputs<IdxT> params;
rmm::device_uvector<T> out, in, vec1, vec2;
}; // struct MatVecOp
template <typename IdxT>
std::vector<mat_vec_op_inputs<IdxT>> get_mv_inputs()
{
std::vector<mat_vec_op_inputs<IdxT>> out;
// Scalability benchmark with round dimensions
std::vector<IdxT> rows = {1000, 100000, 1000000};
std::vector<IdxT> cols = {8, 64, 256, 1024};
for (bool rowMajor : {true, false}) {
for (bool alongRows : {true, false}) {
for (IdxT rows_ : rows) {
for (IdxT cols_ : cols) {
out.push_back({rows_, cols_, rowMajor, alongRows, 0, 0});
}
}
}
}
// Odd dimensions, misalignment
std::vector<std::tuple<IdxT, IdxT>> rowcols = {
{44739207, 7},
{44739207, 15},
{44739207, 16},
{44739207, 17},
{2611236, 256},
{2611236, 257},
{2611236, 263},
};
for (bool rowMajor : {true, false}) {
for (bool alongRows : {true, false}) {
for (auto rc : rowcols) {
for (IdxT inAlignOffset : {0, 1}) {
for (IdxT outAlignOffset : {0, 1}) {
out.push_back({std::get<0>(rc),
std::get<1>(rc),
rowMajor,
alongRows,
inAlignOffset,
outAlignOffset});
}
}
}
}
}
return out;
}
const std::vector<mat_vec_op_inputs<int>> mv_input_i32 = get_mv_inputs<int>();
const std::vector<mat_vec_op_inputs<int64_t>> mv_input_i64 = get_mv_inputs<int64_t>();
template <typename T>
struct Add1Vec {
static constexpr bool useTwoVectors = false;
HDI T operator()(T a, T b) const { return a + b; };
};
template <typename T>
struct Add2Vec {
static constexpr bool useTwoVectors = true;
HDI T operator()(T a, T b, T c) const { return a + b + c; };
};
RAFT_BENCH_REGISTER((mat_vec_op<Add1Vec<float>, float, int>), "", mv_input_i32);
RAFT_BENCH_REGISTER((mat_vec_op<Add1Vec<double>, double, int>), "", mv_input_i32);
RAFT_BENCH_REGISTER((mat_vec_op<Add2Vec<float>, float, int>), "", mv_input_i32);
RAFT_BENCH_REGISTER((mat_vec_op<Add2Vec<double>, double, int>), "", mv_input_i32);
RAFT_BENCH_REGISTER((mat_vec_op<Add1Vec<float>, float, int64_t>), "", mv_input_i64);
RAFT_BENCH_REGISTER((mat_vec_op<Add1Vec<double>, double, int64_t>), "", mv_input_i64);
RAFT_BENCH_REGISTER((mat_vec_op<Add2Vec<float>, float, int64_t>), "", mv_input_i64);
RAFT_BENCH_REGISTER((mat_vec_op<Add2Vec<double>, double, int64_t>), "", mv_input_i64);
} // namespace raft::bench::linalg
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/linalg/map_then_reduce.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/linalg/map_then_reduce.cuh>
#include <rmm/device_uvector.hpp>
namespace raft::bench::linalg {
struct map_then_reduce_inputs {
int len;
};
template <typename Type>
struct Identity {
HDI Type operator()(Type a) { return a; }
};
template <typename T>
struct map_then_reduce : public fixture {
map_then_reduce(const map_then_reduce_inputs& p) : params(p), in(p.len, stream), out(1, stream) {}
void run_benchmark(::benchmark::State& state) override
{
loop_on_state(state, [this]() {
raft::linalg::mapThenSumReduce(out.data(), params.len, Identity<T>(), stream, in.data());
});
}
private:
map_then_reduce_inputs params;
rmm::device_uvector<T> out, in;
}; // struct MapThenReduce
const std::vector<map_then_reduce_inputs> map_then_reduce_input_vecs{
{1024 * 1024},
{32 * 1024 * 1024},
{1024 * 1024 * 1024},
{1024 * 1024 + 2},
{32 * 1024 * 1024 + 2},
{1024 * 1024 * 1024 + 2},
{1024 * 1024 + 1},
{32 * 1024 * 1024 + 1},
{1024 * 1024 * 1024 + 1},
};
RAFT_BENCH_REGISTER(map_then_reduce<float>, "", map_then_reduce_input_vecs);
RAFT_BENCH_REGISTER(map_then_reduce<double>, "", map_then_reduce_input_vecs);
} // namespace raft::bench::linalg
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/matrix/select_k.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft_internal/matrix/select_k.cuh>
#include <common/benchmark.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/nvtx.hpp>
#include <raft/random/rng.cuh>
#include <raft/sparse/detail/utils.h>
#include <raft/util/cudart_utils.hpp>
#include <raft/matrix/detail/select_radix.cuh>
#include <raft/matrix/detail/select_warpsort.cuh>
#include <raft/matrix/select_k.cuh>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <cstdint>
#include <cstring>
#include <type_traits>
namespace raft::matrix {
using namespace raft::bench; // NOLINT
template <typename KeyT>
struct replace_with_mask {
KeyT replacement;
int64_t line_length;
int64_t spared_inputs;
constexpr auto inline operator()(int64_t offset, KeyT x, uint8_t mask) -> KeyT
{
auto i = offset % line_length;
// don't replace all the inputs, spare a few elements at the beginning of the input
return (mask && i >= spared_inputs) ? replacement : x;
}
};
template <typename KeyT, typename IdxT, select::Algo Algo>
struct selection : public fixture {
explicit selection(const select::params& p)
: fixture(p.use_memory_pool),
params_(p),
in_dists_(p.batch_size * p.len, stream),
in_ids_(p.batch_size * p.len, stream),
out_dists_(p.batch_size * p.k, stream),
out_ids_(p.batch_size * p.k, stream)
{
raft::sparse::iota_fill(in_ids_.data(), IdxT(p.batch_size), IdxT(p.len), stream);
raft::random::RngState state{42};
KeyT min_value = -1.0;
KeyT max_value = 1.0;
if (p.use_same_leading_bits) {
if constexpr (std::is_same_v<KeyT, float>) {
uint32_t min_bits = 0x3F800000; // 1.0
uint32_t max_bits = 0x3F8000FF; // 1.00003
memcpy(&min_value, &min_bits, sizeof(KeyT));
memcpy(&max_value, &max_bits, sizeof(KeyT));
} else if constexpr (std::is_same_v<KeyT, double>) {
uint64_t min_bits = 0x3FF0000000000000; // 1.0
uint64_t max_bits = 0x3FF0000FFFFFFFFF; // 1.000015
memcpy(&min_value, &min_bits, sizeof(KeyT));
memcpy(&max_value, &max_bits, sizeof(KeyT));
}
}
raft::random::uniform(handle, state, in_dists_.data(), in_dists_.size(), min_value, max_value);
if (p.frac_infinities > 0.0) {
rmm::device_uvector<uint8_t> mask_buf(p.batch_size * p.len, stream);
auto mask = make_device_vector_view<uint8_t, size_t>(mask_buf.data(), mask_buf.size());
raft::random::bernoulli(handle, state, mask, p.frac_infinities);
KeyT bound = p.select_min ? raft::upper_bound<KeyT>() : raft::lower_bound<KeyT>();
auto mask_in =
make_device_vector_view<const uint8_t, size_t>(mask_buf.data(), mask_buf.size());
auto dists_in = make_device_vector_view<const KeyT>(in_dists_.data(), in_dists_.size());
auto dists_out = make_device_vector_view<KeyT>(in_dists_.data(), in_dists_.size());
raft::linalg::map_offset(handle,
dists_out,
replace_with_mask<KeyT>{bound, int64_t(p.len), int64_t(p.k / 2)},
dists_in,
mask_in);
}
}
void run_benchmark(::benchmark::State& state) override // NOLINT
{
try {
std::ostringstream label_stream;
label_stream << params_.batch_size << "#" << params_.len << "#" << params_.k;
if (params_.use_same_leading_bits) { label_stream << "#same-leading-bits"; }
if (params_.frac_infinities > 0) { label_stream << "#infs-" << params_.frac_infinities; }
state.SetLabel(label_stream.str());
common::nvtx::range case_scope("%s - %s", state.name().c_str(), label_stream.str().c_str());
int iter = 0;
loop_on_state(state, [&iter, this]() {
common::nvtx::range lap_scope("lap-", iter++);
select::select_k_impl<KeyT, IdxT>(handle,
Algo,
in_dists_.data(),
params_.use_index_input ? in_ids_.data() : NULL,
params_.batch_size,
params_.len,
params_.k,
out_dists_.data(),
out_ids_.data(),
params_.select_min);
});
} catch (raft::exception& e) {
state.SkipWithError(e.what());
}
}
private:
const select::params params_;
rmm::device_uvector<KeyT> in_dists_, out_dists_;
rmm::device_uvector<IdxT> in_ids_, out_ids_;
};
const std::vector<select::params> kInputs{
{20000, 500, 1, true},
{20000, 500, 2, true},
{20000, 500, 4, true},
{20000, 500, 8, true},
{20000, 500, 16, true},
{20000, 500, 32, true},
{20000, 500, 64, true},
{20000, 500, 128, true},
{20000, 500, 256, true},
{1000, 10000, 1, true},
{1000, 10000, 2, true},
{1000, 10000, 4, true},
{1000, 10000, 8, true},
{1000, 10000, 16, true},
{1000, 10000, 32, true},
{1000, 10000, 64, true},
{1000, 10000, 128, true},
{1000, 10000, 256, true},
{100, 100000, 1, true},
{100, 100000, 2, true},
{100, 100000, 4, true},
{100, 100000, 8, true},
{100, 100000, 16, true},
{100, 100000, 32, true},
{100, 100000, 64, true},
{100, 100000, 128, true},
{100, 100000, 256, true},
{10, 1000000, 1, true},
{10, 1000000, 2, true},
{10, 1000000, 4, true},
{10, 1000000, 8, true},
{10, 1000000, 16, true},
{10, 1000000, 32, true},
{10, 1000000, 64, true},
{10, 1000000, 128, true},
{10, 1000000, 256, true},
{10, 1000000, 1, true, false, true},
{10, 1000000, 2, true, false, true},
{10, 1000000, 4, true, false, true},
{10, 1000000, 8, true, false, true},
{10, 1000000, 16, true, false, true},
{10, 1000000, 32, true, false, true},
{10, 1000000, 64, true, false, true},
{10, 1000000, 128, true, false, true},
{10, 1000000, 256, true, false, true},
{10, 1000000, 1, true, false, false, true, 0.1},
{10, 1000000, 16, true, false, false, true, 0.1},
{10, 1000000, 64, true, false, false, true, 0.1},
{10, 1000000, 128, true, false, false, true, 0.1},
{10, 1000000, 256, true, false, false, true, 0.1},
{10, 1000000, 1, true, false, false, true, 0.9},
{10, 1000000, 16, true, false, false, true, 0.9},
{10, 1000000, 64, true, false, false, true, 0.9},
{10, 1000000, 128, true, false, false, true, 0.9},
{10, 1000000, 256, true, false, false, true, 0.9},
{1000, 10000, 1, true, false, false, true, 0.9},
{1000, 10000, 16, true, false, false, true, 0.9},
{1000, 10000, 64, true, false, false, true, 0.9},
{1000, 10000, 128, true, false, false, true, 0.9},
{1000, 10000, 256, true, false, false, true, 0.9},
{10, 1000000, 1, true, false, false, true, 1.0},
{10, 1000000, 16, true, false, false, true, 1.0},
{10, 1000000, 64, true, false, false, true, 1.0},
{10, 1000000, 128, true, false, false, true, 1.0},
{10, 1000000, 256, true, false, false, true, 1.0},
{1000, 10000, 1, true, false, false, true, 1.0},
{1000, 10000, 16, true, false, false, true, 1.0},
{1000, 10000, 64, true, false, false, true, 1.0},
{1000, 10000, 128, true, false, false, true, 1.0},
{1000, 10000, 256, true, false, false, true, 1.0},
{1000, 10000, 256, true, false, false, true, 0.999},
};
#define SELECTION_REGISTER(KeyT, IdxT, A) \
namespace BENCHMARK_PRIVATE_NAME(selection) { \
using SelectK = selection<KeyT, IdxT, select::Algo::A>; \
RAFT_BENCH_REGISTER(SelectK, #KeyT "/" #IdxT "/" #A, kInputs); \
}
SELECTION_REGISTER(float, uint32_t, kPublicApi); // NOLINT
SELECTION_REGISTER(float, uint32_t, kRadix8bits); // NOLINT
SELECTION_REGISTER(float, uint32_t, kRadix11bits); // NOLINT
SELECTION_REGISTER(float, uint32_t, kRadix11bitsExtraPass); // NOLINT
SELECTION_REGISTER(float, uint32_t, kWarpAuto); // NOLINT
SELECTION_REGISTER(float, uint32_t, kWarpImmediate); // NOLINT
SELECTION_REGISTER(float, uint32_t, kWarpFiltered); // NOLINT
SELECTION_REGISTER(float, uint32_t, kWarpDistributed); // NOLINT
SELECTION_REGISTER(float, uint32_t, kWarpDistributedShm); // NOLINT
SELECTION_REGISTER(double, uint32_t, kRadix8bits); // NOLINT
SELECTION_REGISTER(double, uint32_t, kRadix11bits); // NOLINT
SELECTION_REGISTER(double, uint32_t, kRadix11bitsExtraPass); // NOLINT
SELECTION_REGISTER(double, uint32_t, kWarpAuto); // NOLINT
SELECTION_REGISTER(double, int64_t, kRadix8bits); // NOLINT
SELECTION_REGISTER(double, int64_t, kRadix11bits); // NOLINT
SELECTION_REGISTER(double, int64_t, kRadix11bitsExtraPass); // NOLINT
SELECTION_REGISTER(double, int64_t, kWarpImmediate); // NOLINT
SELECTION_REGISTER(double, int64_t, kWarpFiltered); // NOLINT
SELECTION_REGISTER(double, int64_t, kWarpDistributed); // NOLINT
SELECTION_REGISTER(double, int64_t, kWarpDistributedShm); // NOLINT
// For learning a heuristic of which selection algorithm to use, we
// have a couple of additional constraints when generating the dataset:
// 1. We want these benchmarks to be optionally enabled from the commandline -
// there are thousands of them, and the run-time is non-trivial. This should be opt-in only
// 2. We test out larger k values - that won't work for all algorithms. This requires filtering
// the input parameters per algorithm.
// This makes the code to generate this dataset different from the code above to
// register other benchmarks
#define SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, A, input) \
{ \
using SelectK = selection<KeyT, IdxT, select::Algo::A>; \
std::stringstream name; \
name << "SelectKDataset/" << #KeyT "/" #IdxT "/" #A << "/" << input.batch_size << "/" \
<< input.len << "/" << input.k << "/" << input.use_index_input << "/" \
<< input.use_memory_pool; \
auto* b = ::benchmark::internal::RegisterBenchmarkInternal( \
new raft::bench::internal::Fixture<SelectK, select::params>(name.str(), input)); \
b->UseManualTime(); \
b->Unit(benchmark::kMillisecond); \
}
const static size_t MAX_MEMORY = 16 * 1024 * 1024 * 1024ULL;
// registers the input for all algorithms
#define SELECTION_REGISTER_INPUT(KeyT, IdxT, input) \
{ \
size_t mem = input.batch_size * input.len * (sizeof(KeyT) + sizeof(IdxT)); \
if (mem < MAX_MEMORY) { \
SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, kRadix8bits, input) \
SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, kRadix11bits, input) \
SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, kRadix11bitsExtraPass, input) \
if (input.k <= raft::matrix::detail::select::warpsort::kMaxCapacity) { \
SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, kWarpImmediate, input) \
SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, kWarpFiltered, input) \
SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, kWarpDistributed, input) \
SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, kWarpDistributedShm, input) \
} \
if (input.k <= raft::neighbors::detail::kFaissMaxK<IdxT, KeyT>()) { \
SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, kFaissBlockSelect, input) \
} \
} \
}
void add_select_k_dataset_benchmarks()
{
// define a uniform grid
std::vector<select::params> inputs;
size_t grid_increment = 1;
std::vector<int> k_vals;
for (size_t k = 0; k < 13; k += grid_increment) {
k_vals.push_back(1 << k);
}
// Add in values just past the limit for warp/faiss select
k_vals.push_back(257);
k_vals.push_back(2049);
const static bool select_min = true;
const static bool use_ids = false;
for (size_t row = 0; row < 13; row += grid_increment) {
for (size_t col = 10; col < 28; col += grid_increment) {
for (auto k : k_vals) {
inputs.push_back(
select::params{size_t(1 << row), size_t(1 << col), k, select_min, use_ids});
}
}
}
// also add in some random values
std::default_random_engine rng(42);
std::uniform_real_distribution<> row_dist(0, 13);
std::uniform_real_distribution<> col_dist(10, 28);
std::uniform_real_distribution<> k_dist(0, 13);
for (size_t i = 0; i < 1024; ++i) {
auto row = static_cast<size_t>(pow(2, row_dist(rng)));
auto col = static_cast<size_t>(pow(2, col_dist(rng)));
auto k = static_cast<int>(pow(2, k_dist(rng)));
inputs.push_back(select::params{row, col, k, select_min, use_ids});
}
for (auto& input : inputs) {
SELECTION_REGISTER_INPUT(double, int64_t, input);
SELECTION_REGISTER_INPUT(double, uint32_t, input);
SELECTION_REGISTER_INPUT(float, int64_t, input);
SELECTION_REGISTER_INPUT(float, uint32_t, input);
}
// also try again without a memory pool to see if there are significant differences
for (auto input : inputs) {
input.use_memory_pool = false;
SELECTION_REGISTER_INPUT(double, int64_t, input);
SELECTION_REGISTER_INPUT(double, uint32_t, input);
SELECTION_REGISTER_INPUT(float, int64_t, input);
SELECTION_REGISTER_INPUT(float, uint32_t, input);
}
}
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/matrix/gather.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/gather.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/itertools.hpp>
#include <rmm/device_uvector.hpp>
namespace raft::bench::matrix {
template <typename IdxT>
struct GatherParams {
IdxT rows, cols, map_length;
};
template <typename IdxT>
inline auto operator<<(std::ostream& os, const GatherParams<IdxT>& p) -> std::ostream&
{
os << p.rows << "#" << p.cols << "#" << p.map_length;
return os;
}
template <typename T, typename MapT, typename IdxT, bool Conditional = false>
struct Gather : public fixture {
Gather(const GatherParams<IdxT>& p)
: params(p), matrix(this->handle), map(this->handle), out(this->handle), stencil(this->handle)
{
}
void allocate_data(const ::benchmark::State& state) override
{
matrix = raft::make_device_matrix<T, IdxT>(handle, params.rows, params.cols);
map = raft::make_device_vector<MapT, IdxT>(handle, params.map_length);
out = raft::make_device_matrix<T, IdxT>(handle, params.map_length, params.cols);
stencil = raft::make_device_vector<T, IdxT>(handle, Conditional ? params.map_length : IdxT(0));
raft::random::RngState rng{1234};
raft::random::uniform(
handle, rng, matrix.data_handle(), params.rows * params.cols, T(-1), T(1));
raft::random::uniformInt(
handle, rng, map.data_handle(), params.map_length, (MapT)0, (MapT)params.rows);
if constexpr (Conditional) {
raft::random::uniform(handle, rng, stencil.data_handle(), params.map_length, T(-1), T(1));
}
resource::sync_stream(handle, stream);
}
void run_benchmark(::benchmark::State& state) override
{
std::ostringstream label_stream;
label_stream << params;
state.SetLabel(label_stream.str());
loop_on_state(state, [this]() {
auto matrix_const_view = raft::make_const_mdspan(matrix.view());
auto map_const_view = raft::make_const_mdspan(map.view());
if constexpr (Conditional) {
auto stencil_const_view = raft::make_const_mdspan(stencil.view());
auto pred_op = raft::plug_const_op(T(0.0), raft::greater_op());
raft::matrix::gather_if(
handle, matrix_const_view, out.view(), map_const_view, stencil_const_view, pred_op);
} else {
raft::matrix::gather(handle, matrix_const_view, map_const_view, out.view());
}
});
}
private:
GatherParams<IdxT> params;
raft::device_matrix<T, IdxT> matrix, out;
raft::device_vector<T, IdxT> stencil;
raft::device_vector<MapT, IdxT> map;
}; // struct Gather
template <typename T, typename MapT, typename IdxT>
using GatherIf = Gather<T, MapT, IdxT, true>;
const std::vector<GatherParams<int64_t>> gather_inputs_i64 =
raft::util::itertools::product<GatherParams<int64_t>>(
{1000000}, {10, 20, 50, 100, 200, 500}, {1000, 10000, 100000, 1000000});
RAFT_BENCH_REGISTER((Gather<float, uint32_t, int64_t>), "", gather_inputs_i64);
RAFT_BENCH_REGISTER((Gather<double, uint32_t, int64_t>), "", gather_inputs_i64);
RAFT_BENCH_REGISTER((GatherIf<float, uint32_t, int64_t>), "", gather_inputs_i64);
RAFT_BENCH_REGISTER((GatherIf<double, uint32_t, int64_t>), "", gather_inputs_i64);
} // namespace raft::bench::matrix
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/matrix/argmin.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/argmin.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/itertools.hpp>
#include <rmm/device_uvector.hpp>
namespace raft::bench::matrix {
template <typename IdxT>
struct ArgminParams {
IdxT rows, cols;
};
template <typename T, typename OutT, typename IdxT>
struct Argmin : public fixture {
Argmin(const ArgminParams<IdxT>& p) : params(p), matrix(this->handle), indices(this->handle) {}
void allocate_data(const ::benchmark::State& state) override
{
matrix = raft::make_device_matrix<T, IdxT>(handle, params.rows, params.cols);
indices = raft::make_device_vector<OutT, IdxT>(handle, params.rows);
raft::random::RngState rng{1234};
raft::random::uniform(
handle, rng, matrix.data_handle(), params.rows * params.cols, T(-1), T(1));
resource::sync_stream(handle, stream);
}
void run_benchmark(::benchmark::State& state) override
{
loop_on_state(state, [this]() {
raft::matrix::argmin(handle, raft::make_const_mdspan(matrix.view()), indices.view());
});
}
private:
ArgminParams<IdxT> params;
raft::device_matrix<T, IdxT> matrix;
raft::device_vector<OutT, IdxT> indices;
}; // struct Argmin
const std::vector<ArgminParams<int64_t>> argmin_inputs_i64 =
raft::util::itertools::product<ArgminParams<int64_t>>({1000, 10000, 100000, 1000000, 10000000},
{64, 128, 256, 512, 1024});
RAFT_BENCH_REGISTER((Argmin<float, uint32_t, int64_t>), "", argmin_inputs_i64);
RAFT_BENCH_REGISTER((Argmin<double, uint32_t, int64_t>), "", argmin_inputs_i64);
} // namespace raft::bench::matrix
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/matrix/main.cpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmark/benchmark.h>
#include <cstring>
namespace raft::matrix {
void add_select_k_dataset_benchmarks();
}
int main(int argc, char** argv)
{
// if we're passed a 'select_k_dataset' flag, add in extra benchmarks
for (int i = 1; i < argc; ++i) {
if (strcmp(argv[i], "--select_k_dataset") == 0) {
raft::matrix::add_select_k_dataset_benchmarks();
// pop off the cmdline argument from argc/argv
for (int j = i; j < argc - 1; ++j)
argv[j] = argv[j + 1];
argc--;
break;
}
}
benchmark::Initialize(&argc, argv);
if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1;
benchmark::RunSpecifiedBenchmarks();
}
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/common/benchmark.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <memory>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/detail/macros.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/interruptible.hpp>
#include <raft/random/make_blobs.cuh>
#include <raft/util/cudart_utils.hpp>
#include <benchmark/benchmark.h>
#include <rmm/cuda_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
namespace raft::bench {
/**
* RAII way to temporary set the pooling memory allocator in rmm.
* This may be useful for benchmarking functions that do some memory allocations.
*/
struct using_pool_memory_res {
private:
rmm::mr::device_memory_resource* orig_res_;
rmm::mr::cuda_memory_resource cuda_res_;
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> pool_res_;
public:
using_pool_memory_res(size_t initial_size, size_t max_size)
: orig_res_(rmm::mr::get_current_device_resource()),
pool_res_(&cuda_res_, initial_size, max_size)
{
rmm::mr::set_current_device_resource(&pool_res_);
}
using_pool_memory_res() : orig_res_(rmm::mr::get_current_device_resource()), pool_res_(&cuda_res_)
{
rmm::mr::set_current_device_resource(&pool_res_);
}
~using_pool_memory_res() { rmm::mr::set_current_device_resource(orig_res_); }
};
/**
* RAII way of timing cuda calls. This has been shamelessly copied from the
* cudf codebase via cuml codebase. So, credits for this class goes to cudf developers.
*/
struct cuda_event_timer {
private:
::benchmark::State* state_;
rmm::cuda_stream_view stream_;
cudaEvent_t start_;
cudaEvent_t stop_;
public:
/**
* @param state the benchmark::State whose timer we are going to update.
* @param stream CUDA stream we are measuring time on.
*/
cuda_event_timer(::benchmark::State& state, rmm::cuda_stream_view stream)
: state_(&state), stream_(stream)
{
RAFT_CUDA_TRY(cudaEventCreate(&start_));
RAFT_CUDA_TRY(cudaEventCreate(&stop_));
raft::interruptible::synchronize(stream_);
RAFT_CUDA_TRY(cudaEventRecord(start_, stream_));
}
cuda_event_timer() = delete;
/**
* @brief The dtor stops the timer and performs a synchroniazation. Time of
* the benchmark::State object provided to the ctor will be set to the
* value given by `cudaEventElapsedTime()`.
*/
~cuda_event_timer()
{
RAFT_CUDA_TRY_NO_THROW(cudaEventRecord(stop_, stream_));
raft::interruptible::synchronize(stop_);
float milliseconds = 0.0f;
RAFT_CUDA_TRY_NO_THROW(cudaEventElapsedTime(&milliseconds, start_, stop_));
state_->SetIterationTime(milliseconds / 1000.f);
RAFT_CUDA_TRY_NO_THROW(cudaEventDestroy(start_));
RAFT_CUDA_TRY_NO_THROW(cudaEventDestroy(stop_));
}
};
/** Main fixture to be inherited and used by all other c++ benchmarks */
class fixture {
private:
rmm::device_buffer scratch_buf_;
public:
raft::device_resources handle;
rmm::cuda_stream_view stream;
fixture(bool use_pool_memory_resource = false) : stream{resource::get_cuda_stream(handle)}
{
// Cache memory pool between test runs, since it is expensive to create.
// This speeds up the time required to run the select_k bench by over 3x.
// This is part of the fixture class here so that the pool will get cleaned
// up, rather than outliving the benchmarks that require it.
static std::unique_ptr<using_pool_memory_res> memory_pool;
if (use_pool_memory_resource) {
if (!memory_pool) { memory_pool.reset(new using_pool_memory_res()); }
} else if (memory_pool) {
memory_pool.reset();
}
int l2_cache_size = 0;
int device_id = 0;
RAFT_CUDA_TRY(cudaGetDevice(&device_id));
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&l2_cache_size, cudaDevAttrL2CacheSize, device_id));
scratch_buf_ = rmm::device_buffer(l2_cache_size * 3, stream);
}
// every benchmark should be overriding this
virtual void run_benchmark(::benchmark::State& state) = 0;
virtual void generate_metrics(::benchmark::State& state) {}
virtual void allocate_data(const ::benchmark::State& state) {}
virtual void deallocate_data(const ::benchmark::State& state) {}
virtual void allocate_temp_buffers(const ::benchmark::State& state) {}
virtual void deallocate_temp_buffers(const ::benchmark::State& state) {}
protected:
/** The helper that writes zeroes to some buffer in GPU memory to flush the L2 cache. */
void flush_L2_cache()
{
RAFT_CUDA_TRY(cudaMemsetAsync(scratch_buf_.data(), 0, scratch_buf_.size(), stream));
}
/**
* The helper to be used inside `run_benchmark`, to loop over the state and record time using the
* cuda_event_timer.
*/
template <typename Lambda>
void loop_on_state(::benchmark::State& state, Lambda benchmark_func, bool flush_L2 = true)
{
for (auto _ : state) {
if (flush_L2) { flush_L2_cache(); }
cuda_event_timer timer(state, stream);
benchmark_func();
}
}
};
/** Indicates the dataset size. */
struct DatasetParams {
size_t rows;
size_t cols;
bool row_major;
};
/** Holds params needed to generate blobs dataset */
struct BlobsParams {
int n_clusters;
double cluster_std;
bool shuffle;
double center_box_min, center_box_max;
uint64_t seed;
};
/** Fixture for cluster benchmarks using make_blobs */
template <typename T, typename IndexT = int>
class BlobsFixture : public fixture {
public:
BlobsFixture(const DatasetParams dp, const BlobsParams bp)
: data_params(dp), blobs_params(bp), X(this->handle)
{
}
virtual void run_benchmark(::benchmark::State& state) = 0;
void allocate_data(const ::benchmark::State& state) override
{
auto labels_ref = raft::make_device_vector<IndexT, IndexT>(this->handle, data_params.rows);
X = raft::make_device_matrix<T, IndexT>(this->handle, data_params.rows, data_params.cols);
raft::random::make_blobs<T, IndexT>(X.data_handle(),
labels_ref.data_handle(),
(IndexT)data_params.rows,
(IndexT)data_params.cols,
(IndexT)blobs_params.n_clusters,
stream,
data_params.row_major,
nullptr,
nullptr,
(T)blobs_params.cluster_std,
blobs_params.shuffle,
(T)blobs_params.center_box_min,
(T)blobs_params.center_box_max,
blobs_params.seed);
resource::sync_stream(this->handle, stream);
}
protected:
DatasetParams data_params;
BlobsParams blobs_params;
raft::device_matrix<T, IndexT> X;
};
namespace internal {
template <typename Class, typename... Params>
class Fixture : public ::benchmark::Fixture {
using State = ::benchmark::State;
public:
explicit Fixture(const std::string name, const Params&... params)
: ::benchmark::Fixture(), params_(params...), name_(name)
{
SetName(name_.c_str());
}
Fixture() = delete;
void SetUp(const State& state) override
{
fixture_ =
std::apply([](const Params&... ps) { return std::make_unique<Class>(ps...); }, params_);
fixture_->allocate_data(state);
fixture_->allocate_temp_buffers(state);
}
void TearDown(const State& state) override
{
fixture_->deallocate_temp_buffers(state);
fixture_->deallocate_data(state);
fixture_.reset();
}
void SetUp(State& st) override { SetUp(const_cast<const State&>(st)); }
void TearDown(State& st) override { TearDown(const_cast<const State&>(st)); }
private:
std::unique_ptr<Class> fixture_;
std::tuple<Params...> params_;
const std::string name_;
protected:
void BenchmarkCase(State& state) override
{
fixture_->run_benchmark(state);
fixture_->generate_metrics(state);
}
}; // class Fixture
/**
* A helper struct to create a fixture for every combination of input vectors.
* Use with care, this can blow up quickly!
*/
template <typename Class, typename... Params>
struct cartesian_registrar {
template <typename... Fixed>
static void run(const std::string case_name,
const std::vector<Params>&... params,
const Fixed&... fixed);
};
template <typename Class>
struct cartesian_registrar<Class> {
template <typename... Fixed>
static void run(const std::string case_name, const Fixed&... fixed)
{
auto* b = ::benchmark::internal::RegisterBenchmarkInternal(
new Fixture<Class, Fixed...>(case_name, fixed...));
b->UseManualTime();
b->Unit(benchmark::kMillisecond);
}
};
template <typename Class, typename Param, typename... Params>
struct cartesian_registrar<Class, Param, Params...> {
template <typename... Fixed>
static void run(const std::string case_name,
const std::vector<Param>& param,
const std::vector<Params>&... params,
const Fixed&... fixed)
{
int param_len = param.size();
for (int i = 0; i < param_len; i++) {
cartesian_registrar<Class, Params...>::run(
case_name + "/" + std::to_string(i), params..., fixed..., param[i]);
}
}
};
template <typename Class>
struct registrar {
/**
* Register a fixture `Class` named `testClass` for every combination of input `params`.
*
* @param test_class
* A string representation of the `Class` name.
* @param test_name
* Optional test name. Leave empty, if you don't need it.
* @param params
* Zero or more vectors of parameters.
* The generated test cases are a cartesian product of these vectors.
* Use with care, this can blow up quickly!
*/
template <typename... Params>
registrar(const std::string& test_class,
const std::string& test_name,
const std::vector<Params>&... params)
{
std::stringstream name_stream;
name_stream << test_class;
if (!test_name.empty()) { name_stream << "/" << test_name; }
cartesian_registrar<Class, Params...>::run(name_stream.str(), params...);
}
};
}; // namespace internal
#define RAFT_BENCH_REGISTER_INTERNAL(TestClass, ...) \
static raft::bench::internal::registrar<TestClass> BENCHMARK_PRIVATE_NAME(registrar)( \
RAFT_STRINGIFY(TestClass), __VA_ARGS__)
/**
* This is the entry point macro for all benchmarks. This needs to be called
* for the set of benchmarks to be registered so that the main harness inside
* google bench can find these benchmarks and run them.
*
* @param TestClass child class of `raft::bench::Fixture` which contains
* the logic to generate the dataset and run training on it
* for a given algo. Ideally, once such struct is needed for
* every algo to be benchmarked
* @param test_name a unique string to identify these tests at the end of run
* This is optional and if choose not to use this, pass an
* empty string
* @param params... zero or more lists of params upon which to benchmark.
*/
#define RAFT_BENCH_REGISTER(TestClass, ...) \
RAFT_BENCH_REGISTER_INTERNAL(RAFT_DEPAREN(TestClass), __VA_ARGS__)
} // namespace raft::bench
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/cluster/kmeans.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/cluster/kmeans.cuh>
#include <raft/cluster/kmeans_types.hpp>
namespace raft::bench::cluster {
struct KMeansBenchParams {
DatasetParams data;
BlobsParams blobs;
raft::cluster::KMeansParams kmeans;
};
inline auto operator<<(std::ostream& os, const KMeansBenchParams& p) -> std::ostream&
{
os << p.data.rows << "#" << p.data.cols << "#" << p.kmeans.n_clusters;
return os;
}
template <typename T, typename IndexT = int>
struct KMeans : public BlobsFixture<T, IndexT> {
KMeans(const KMeansBenchParams& p)
: BlobsFixture<T, IndexT>(p.data, p.blobs),
params(p),
centroids(this->handle),
labels(this->handle)
{
}
void run_benchmark(::benchmark::State& state) override
{
std::ostringstream label_stream;
label_stream << params;
state.SetLabel(label_stream.str());
raft::device_matrix_view<const T, IndexT> X_view = this->X.view();
std::optional<raft::device_vector_view<const T, IndexT>> opt_weights_view = std::nullopt;
std::optional<raft::device_matrix_view<T, IndexT>> centroids_view =
std::make_optional<raft::device_matrix_view<T, IndexT>>(centroids.view());
raft::device_vector_view<IndexT, IndexT> labels_view = labels.view();
raft::host_scalar_view<T> inertia_view = raft::make_host_scalar_view<T>(&inertia);
raft::host_scalar_view<IndexT> n_iter_view = raft::make_host_scalar_view<IndexT>(&n_iter);
this->loop_on_state(state, [&]() {
raft::cluster::kmeans_fit_predict<T, IndexT>(this->handle,
params.kmeans,
X_view,
opt_weights_view,
centroids_view,
labels_view,
inertia_view,
n_iter_view);
});
}
void allocate_temp_buffers(const ::benchmark::State& state) override
{
centroids =
raft::make_device_matrix<T, IndexT>(this->handle, params.kmeans.n_clusters, params.data.cols);
labels = raft::make_device_vector<IndexT, IndexT>(this->handle, params.data.rows);
}
private:
KMeansBenchParams params;
raft::device_matrix<T, IndexT> centroids;
raft::device_vector<IndexT, IndexT> labels;
T inertia;
IndexT n_iter;
}; // struct KMeans
std::vector<KMeansBenchParams> getKMeansInputs()
{
std::vector<KMeansBenchParams> out;
KMeansBenchParams p;
p.data.row_major = true;
p.blobs.cluster_std = 1.0;
p.blobs.shuffle = false;
p.blobs.center_box_min = -10.0;
p.blobs.center_box_max = 10.0;
p.blobs.seed = 12345ULL;
p.kmeans.init = raft::cluster::KMeansParams::KMeansPlusPlus;
p.kmeans.max_iter = 300;
p.kmeans.tol = 1e-4;
p.kmeans.verbosity = RAFT_LEVEL_INFO;
p.kmeans.metric = raft::distance::DistanceType::L2Expanded;
p.kmeans.inertia_check = true;
std::vector<std::tuple<int, int, int>> row_cols_k = {
{1000000, 20, 1000},
{3000000, 50, 20},
{10000000, 50, 5},
};
for (auto& rck : row_cols_k) {
p.data.rows = std::get<0>(rck);
p.data.cols = std::get<1>(rck);
p.blobs.n_clusters = std::get<2>(rck);
p.kmeans.n_clusters = std::get<2>(rck);
out.push_back(p);
}
return out;
}
// note(lsugy): commenting out int64_t because the templates are not compiled in the distance
// library, resulting in long compilation times.
RAFT_BENCH_REGISTER((KMeans<float, int>), "", getKMeansInputs());
RAFT_BENCH_REGISTER((KMeans<double, int>), "", getKMeansInputs());
// RAFT_BENCH_REGISTER((KMeans<float, int64_t>), "", getKMeansInputs());
// RAFT_BENCH_REGISTER((KMeans<double, int64_t>), "", getKMeansInputs());
} // namespace raft::bench::cluster
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/cluster/kmeans_balanced.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/cluster/kmeans_balanced.cuh>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/random/rng.cuh>
namespace raft::bench::cluster {
struct KMeansBalancedBenchParams {
DatasetParams data;
uint32_t n_lists;
raft::cluster::kmeans_balanced_params kb_params;
};
template <typename T, typename IndexT = int>
struct KMeansBalanced : public fixture {
KMeansBalanced(const KMeansBalancedBenchParams& p) : params(p), X(handle), centroids(handle) {}
void run_benchmark(::benchmark::State& state) override
{
this->loop_on_state(state, [this]() {
raft::device_matrix_view<const T, IndexT> X_view = this->X.view();
raft::device_matrix_view<T, IndexT> centroids_view = this->centroids.view();
raft::cluster::kmeans_balanced::fit(
this->handle, this->params.kb_params, X_view, centroids_view);
});
}
void allocate_data(const ::benchmark::State& state) override
{
X = raft::make_device_matrix<T, IndexT>(handle, params.data.rows, params.data.cols);
raft::random::RngState rng{1234};
constexpr T kRangeMax = std::is_integral_v<T> ? std::numeric_limits<T>::max() : T(1);
constexpr T kRangeMin = std::is_integral_v<T> ? std::numeric_limits<T>::min() : T(-1);
if constexpr (std::is_integral_v<T>) {
raft::random::uniformInt(
handle, rng, X.data_handle(), params.data.rows * params.data.cols, kRangeMin, kRangeMax);
} else {
raft::random::uniform(
handle, rng, X.data_handle(), params.data.rows * params.data.cols, kRangeMin, kRangeMax);
}
resource::sync_stream(handle, stream);
}
void allocate_temp_buffers(const ::benchmark::State& state) override
{
centroids =
raft::make_device_matrix<float, IndexT>(this->handle, params.n_lists, params.data.cols);
}
private:
KMeansBalancedBenchParams params;
raft::device_matrix<T, IndexT> X;
raft::device_matrix<float, IndexT> centroids;
}; // struct KMeansBalanced
std::vector<KMeansBalancedBenchParams> getKMeansBalancedInputs()
{
std::vector<KMeansBalancedBenchParams> out;
KMeansBalancedBenchParams p;
p.data.row_major = true;
p.kb_params.n_iters = 20;
p.kb_params.metric = raft::distance::DistanceType::L2Expanded;
std::vector<std::pair<int, int>> row_cols = {
{100000, 128}, {1000000, 128}, {10000000, 128},
// The following dataset sizes are too large for most GPUs.
// {100000000, 128},
};
for (auto& rc : row_cols) {
p.data.rows = rc.first;
p.data.cols = rc.second;
for (auto n_lists : std::vector<int>({1000, 10000, 100000})) {
p.n_lists = n_lists;
out.push_back(p);
}
}
return out;
}
// Note: the datasets sizes are too large for 32-bit index types.
RAFT_BENCH_REGISTER((KMeansBalanced<float, int64_t>), "", getKMeansBalancedInputs());
} // namespace raft::bench::cluster
| 0 |
rapidsai_public_repos/raft/cpp/bench | rapidsai_public_repos/raft/cpp/bench/ann/CMakeLists.txt | # =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# ##################################################################################################
# * benchmark options ------------------------------------------------------------------------------
option(RAFT_ANN_BENCH_USE_FAISS_GPU_FLAT "Include faiss' brute-force knn algorithm in benchmark" ON)
option(RAFT_ANN_BENCH_USE_FAISS_GPU_IVF_FLAT "Include faiss' ivf flat algorithm in benchmark" ON)
option(RAFT_ANN_BENCH_USE_FAISS_GPU_IVF_PQ "Include faiss' ivf pq algorithm in benchmark" ON)
option(RAFT_ANN_BENCH_USE_FAISS_CPU_FLAT
"Include faiss' cpu brute-force knn algorithm in benchmark" ON
)
option(RAFT_ANN_BENCH_USE_FAISS_CPU_FLAT "Include faiss' cpu brute-force algorithm in benchmark" ON)
option(RAFT_ANN_BENCH_USE_FAISS_CPU_IVF_FLAT "Include faiss' cpu ivf flat algorithm in benchmark"
ON
)
option(RAFT_ANN_BENCH_USE_FAISS_CPU_IVF_PQ "Include faiss' cpu ivf pq algorithm in benchmark" ON)
option(RAFT_ANN_BENCH_USE_RAFT_IVF_FLAT "Include raft's ivf flat algorithm in benchmark" ON)
option(RAFT_ANN_BENCH_USE_RAFT_IVF_PQ "Include raft's ivf pq algorithm in benchmark" ON)
option(RAFT_ANN_BENCH_USE_RAFT_CAGRA "Include raft's CAGRA in benchmark" ON)
option(RAFT_ANN_BENCH_USE_RAFT_CAGRA_HNSWLIB "Include raft's CAGRA in benchmark" ON)
option(RAFT_ANN_BENCH_USE_HNSWLIB "Include hnsw algorithm in benchmark" ON)
option(RAFT_ANN_BENCH_USE_GGNN "Include ggnn algorithm in benchmark" ON)
option(RAFT_ANN_BENCH_SINGLE_EXE
"Make a single executable with benchmark as shared library modules" OFF
)
# ##################################################################################################
# * Process options ----------------------------------------------------------
find_package(Threads REQUIRED)
if(BUILD_CPU_ONLY)
# Include necessary logging dependencies
include(cmake/thirdparty/get_fmt.cmake)
include(cmake/thirdparty/get_spdlog.cmake)
set(RAFT_FAISS_ENABLE_GPU OFF)
set(RAFT_ANN_BENCH_USE_FAISS_GPU_FLAT OFF)
set(RAFT_ANN_BENCH_USE_FAISS_GPU_IVF_FLAT OFF)
set(RAFT_ANN_BENCH_USE_FAISS_GPU_IVF_PQ OFF)
set(RAFT_ANN_BENCH_USE_RAFT_IVF_FLAT OFF)
set(RAFT_ANN_BENCH_USE_RAFT_IVF_PQ OFF)
set(RAFT_ANN_BENCH_USE_RAFT_CAGRA OFF)
set(RAFT_ANN_BENCH_USE_RAFT_CAGRA_HNSWLIB OFF)
set(RAFT_ANN_BENCH_USE_GGNN OFF)
else()
# Disable faiss benchmarks on CUDA 12 since faiss is not yet CUDA 12-enabled.
# https://github.com/rapidsai/raft/issues/1627
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.0.0)
set(RAFT_FAISS_ENABLE_GPU OFF)
set(RAFT_ANN_BENCH_USE_FAISS_GPU_FLAT OFF)
set(RAFT_ANN_BENCH_USE_FAISS_GPU_IVF_FLAT OFF)
set(RAFT_ANN_BENCH_USE_FAISS_GPU_IVF_PQ OFF)
set(RAFT_ANN_BENCH_USE_FAISS_CPU_FLAT OFF)
set(RAFT_ANN_BENCH_USE_FAISS_CPU_IVF_PQ OFF)
set(RAFT_ANN_BENCH_USE_FAISS_CPU_IVF_FLAT OFF)
else()
set(RAFT_FAISS_ENABLE_GPU ON)
endif()
endif()
set(RAFT_ANN_BENCH_USE_FAISS OFF)
if(RAFT_ANN_BENCH_USE_FAISS_GPU_FLAT
OR RAFT_ANN_BENCH_USE_FAISS_GPU_IVF_PQ
OR RAFT_ANN_BENCH_USE_FAISS_GPU_IVF_FLAT
OR RAFT_ANN_BENCH_USE_FAISS_CPU_FLAT
OR RAFT_ANN_BENCH_USE_FAISS_CPU_IVF_PQ
OR RAFT_ANN_BENCH_USE_FAISS_CPU_IVF_FLAT
)
set(RAFT_ANN_BENCH_USE_FAISS ON)
set(RAFT_USE_FAISS_STATIC ON)
endif()
set(RAFT_ANN_BENCH_USE_RAFT OFF)
if(RAFT_ANN_BENCH_USE_RAFT_IVF_PQ
OR RAFT_ANN_BENCH_USE_RAFT_BRUTE_FORCE
OR RAFT_ANN_BENCH_USE_RAFT_IVF_FLAT
OR RAFT_ANN_BENCH_USE_RAFT_CAGRA
OR RAFT_ANN_BENCH_USE_RAFT_CAGRA_HNSWLIB
)
set(RAFT_ANN_BENCH_USE_RAFT ON)
endif()
# ##################################################################################################
# * Fetch requirements -------------------------------------------------------------
if(RAFT_ANN_BENCH_USE_HNSWLIB OR RAFT_ANN_BENCH_USE_RAFT_CAGRA_HNSWLIB)
include(cmake/thirdparty/get_hnswlib.cmake)
endif()
include(cmake/thirdparty/get_nlohmann_json.cmake)
if(RAFT_ANN_BENCH_USE_GGNN)
include(cmake/thirdparty/get_ggnn.cmake)
endif()
if(RAFT_ANN_BENCH_USE_FAISS)
# We need to ensure that faiss has all the conda information. So we currently use the very ugly
# hammer of `link_libraries` to ensure that all targets in this directory and the faiss directory
# will have the conda includes/link dirs
link_libraries($<TARGET_NAME_IF_EXISTS:conda_env>)
include(cmake/thirdparty/get_faiss.cmake)
endif()
# ##################################################################################################
# * Configure tests function-------------------------------------------------------------
function(ConfigureAnnBench)
set(oneValueArgs NAME)
set(multiValueArgs PATH LINKS CXXFLAGS INCLUDES)
if(NOT BUILD_CPU_ONLY)
set(GPU_BUILD ON)
endif()
cmake_parse_arguments(
ConfigureAnnBench "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}
)
set(BENCH_NAME ${ConfigureAnnBench_NAME}_ANN_BENCH)
if(RAFT_ANN_BENCH_SINGLE_EXE)
add_library(${BENCH_NAME} SHARED ${ConfigureAnnBench_PATH})
string(TOLOWER ${BENCH_NAME} BENCH_LIB_NAME)
set_target_properties(${BENCH_NAME} PROPERTIES OUTPUT_NAME ${BENCH_LIB_NAME})
add_dependencies(${BENCH_NAME} ANN_BENCH)
else()
add_executable(${BENCH_NAME} ${ConfigureAnnBench_PATH})
target_compile_definitions(${BENCH_NAME} PRIVATE ANN_BENCH_BUILD_MAIN)
target_link_libraries(${BENCH_NAME} PRIVATE benchmark::benchmark)
endif()
target_link_libraries(
${BENCH_NAME}
PRIVATE raft::raft
nlohmann_json::nlohmann_json
${ConfigureAnnBench_LINKS}
Threads::Threads
$<$<BOOL:${GPU_BUILD}>:${RAFT_CTK_MATH_DEPENDENCIES}>
$<TARGET_NAME_IF_EXISTS:OpenMP::OpenMP_CXX>
$<TARGET_NAME_IF_EXISTS:conda_env>
-static-libgcc
-static-libstdc++
$<$<BOOL:${BUILD_CPU_ONLY}>:fmt::fmt-header-only>
$<$<BOOL:${BUILD_CPU_ONLY}>:spdlog::spdlog_header_only>
)
set_target_properties(
${BENCH_NAME}
PROPERTIES # set target compile options
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
)
set(${ConfigureAnnBench_CXXFLAGS} ${RAFT_CXX_FLAGS} ${ConfigureAnnBench_CXXFLAGS})
target_compile_options(
${BENCH_NAME} PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:${ConfigureAnnBench_CXXFLAGS}>"
"$<$<COMPILE_LANGUAGE:CUDA>:${RAFT_CUDA_FLAGS}>"
)
if(RAFT_ANN_BENCH_USE_${ConfigureAnnBench_NAME})
target_compile_definitions(
${BENCH_NAME}
PUBLIC
RAFT_ANN_BENCH_USE_${ConfigureAnnBench_NAME}=RAFT_ANN_BENCH_USE_${ConfigureAnnBench_NAME}
)
endif()
target_include_directories(
${BENCH_NAME}
PUBLIC "$<BUILD_INTERFACE:${RAFT_SOURCE_DIR}/include>"
PRIVATE ${ConfigureAnnBench_INCLUDES}
)
install(
TARGETS ${BENCH_NAME}
COMPONENT ann_bench
DESTINATION bin/ann
)
endfunction()
# ##################################################################################################
# * Configure tests-------------------------------------------------------------
if(RAFT_ANN_BENCH_USE_HNSWLIB)
ConfigureAnnBench(
NAME HNSWLIB PATH bench/ann/src/hnswlib/hnswlib_benchmark.cpp INCLUDES
${CMAKE_CURRENT_BINARY_DIR}/_deps/hnswlib-src/hnswlib CXXFLAGS "${HNSW_CXX_FLAGS}"
)
endif()
if(RAFT_ANN_BENCH_USE_RAFT_IVF_PQ)
ConfigureAnnBench(
NAME
RAFT_IVF_PQ
PATH
bench/ann/src/raft/raft_benchmark.cu
$<$<BOOL:${RAFT_ANN_BENCH_USE_RAFT_IVF_PQ}>:bench/ann/src/raft/raft_ivf_pq.cu>
LINKS
raft::compiled
)
endif()
if(RAFT_ANN_BENCH_USE_RAFT_IVF_FLAT)
ConfigureAnnBench(
NAME
RAFT_IVF_FLAT
PATH
bench/ann/src/raft/raft_benchmark.cu
$<$<BOOL:${RAFT_ANN_BENCH_USE_RAFT_IVF_FLAT}>:bench/ann/src/raft/raft_ivf_flat.cu>
LINKS
raft::compiled
)
endif()
if(RAFT_ANN_BENCH_USE_RAFT_BRUTE_FORCE)
ConfigureAnnBench(
NAME RAFT_BRUTE_FORCE PATH bench/ann/src/raft/raft_benchmark.cu LINKS raft::compiled
)
endif()
if(RAFT_ANN_BENCH_USE_RAFT_CAGRA)
ConfigureAnnBench(
NAME
RAFT_CAGRA
PATH
bench/ann/src/raft/raft_benchmark.cu
$<$<BOOL:${RAFT_ANN_BENCH_USE_RAFT_CAGRA}>:bench/ann/src/raft/raft_cagra.cu>
LINKS
raft::compiled
)
endif()
if(RAFT_ANN_BENCH_USE_RAFT_CAGRA_HNSWLIB)
ConfigureAnnBench(
NAME
RAFT_CAGRA_HNSWLIB
PATH
bench/ann/src/raft/raft_cagra_hnswlib.cu
INCLUDES
${CMAKE_CURRENT_BINARY_DIR}/_deps/hnswlib-src/hnswlib
LINKS
raft::compiled
CXXFLAGS
"${HNSW_CXX_FLAGS}"
)
endif()
set(RAFT_FAISS_TARGETS faiss::faiss)
if(TARGET faiss::faiss_avx2)
set(RAFT_FAISS_TARGETS faiss::faiss_avx2)
endif()
message("RAFT_FAISS_TARGETS: ${RAFT_FAISS_TARGETS}")
message("CUDAToolkit_LIBRARY_DIR: ${CUDAToolkit_LIBRARY_DIR}")
if(RAFT_ANN_BENCH_USE_FAISS_CPU_FLAT)
ConfigureAnnBench(
NAME FAISS_CPU_FLAT PATH bench/ann/src/faiss/faiss_cpu_benchmark.cpp LINKS
${RAFT_FAISS_TARGETS}
)
endif()
if(RAFT_ANN_BENCH_USE_FAISS_CPU_IVF_FLAT)
ConfigureAnnBench(
NAME FAISS_CPU_IVF_FLAT PATH bench/ann/src/faiss/faiss_cpu_benchmark.cpp LINKS
${RAFT_FAISS_TARGETS}
)
endif()
if(RAFT_ANN_BENCH_USE_FAISS_CPU_IVF_PQ)
ConfigureAnnBench(
NAME FAISS_CPU_IVF_PQ PATH bench/ann/src/faiss/faiss_cpu_benchmark.cpp LINKS
${RAFT_FAISS_TARGETS}
)
endif()
if(RAFT_ANN_BENCH_USE_FAISS_GPU_IVF_FLAT)
ConfigureAnnBench(
NAME FAISS_GPU_IVF_FLAT PATH bench/ann/src/faiss/faiss_gpu_benchmark.cu LINKS
${RAFT_FAISS_TARGETS}
)
endif()
if(RAFT_ANN_BENCH_USE_FAISS_GPU_IVF_PQ)
ConfigureAnnBench(
NAME FAISS_GPU_IVF_PQ PATH bench/ann/src/faiss/faiss_gpu_benchmark.cu LINKS
${RAFT_FAISS_TARGETS}
)
endif()
if(RAFT_ANN_BENCH_USE_FAISS_GPU_FLAT)
ConfigureAnnBench(
NAME FAISS_GPU_FLAT PATH bench/ann/src/faiss/faiss_gpu_benchmark.cu LINKS ${RAFT_FAISS_TARGETS}
)
endif()
if(RAFT_ANN_BENCH_USE_GGNN)
include(cmake/thirdparty/get_glog.cmake)
ConfigureAnnBench(
NAME GGNN PATH bench/ann/src/ggnn/ggnn_benchmark.cu INCLUDES
${CMAKE_CURRENT_BINARY_DIR}/_deps/ggnn-src/include LINKS glog::glog
)
endif()
# ##################################################################################################
# * Dynamically-loading ANN_BENCH executable -------------------------------------------------------
if(RAFT_ANN_BENCH_SINGLE_EXE)
add_executable(ANN_BENCH bench/ann/src/common/benchmark.cpp)
# Build and link static version of the GBench to keep ANN_BENCH self-contained.
get_target_property(TMP_PROP benchmark::benchmark SOURCES)
add_library(benchmark_static STATIC ${TMP_PROP})
get_target_property(TMP_PROP benchmark::benchmark INCLUDE_DIRECTORIES)
target_include_directories(benchmark_static PUBLIC ${TMP_PROP})
get_target_property(TMP_PROP benchmark::benchmark LINK_LIBRARIES)
target_link_libraries(benchmark_static PUBLIC ${TMP_PROP})
target_include_directories(ANN_BENCH PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
target_link_libraries(
ANN_BENCH PRIVATE nlohmann_json::nlohmann_json benchmark_static dl -static-libgcc
-static-libstdc++ CUDA::nvtx3
)
set_target_properties(
ANN_BENCH
PROPERTIES # set target compile options
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
)
# Disable NVTX when the nvtx3 headers are missing
set(_CMAKE_REQUIRED_INCLUDES_ORIG ${CMAKE_REQUIRED_INCLUDES})
get_target_property(CMAKE_REQUIRED_INCLUDES ANN_BENCH INCLUDE_DIRECTORIES)
CHECK_INCLUDE_FILE_CXX(nvtx3/nvToolsExt.h NVTX3_HEADERS_FOUND)
set(CMAKE_REQUIRED_INCLUDES ${_CMAKE_REQUIRED_INCLUDES_ORIG})
target_compile_definitions(
ANN_BENCH
PRIVATE
$<$<BOOL:${CUDAToolkit_FOUND}>:ANN_BENCH_LINK_CUDART="libcudart.so.${CUDAToolkit_VERSION_MAJOR}.${CUDAToolkit_VERSION_MINOR}.${CUDAToolkit_VERSION_PATCH}
">
$<$<BOOL:${NVTX3_HEADERS_FOUND}>:ANN_BENCH_NVTX3_HEADERS_FOUND>
)
target_link_options(ANN_BENCH PRIVATE -export-dynamic)
install(
TARGETS ANN_BENCH
COMPONENT ann_bench
DESTINATION bin/ann
EXCLUDE_FROM_ALL
)
endif()
| 0 |
rapidsai_public_repos/raft/cpp/bench | rapidsai_public_repos/raft/cpp/bench/ann/README.md | # RAFT CUDA ANN Benchmarks
Please see the [ANN Benchmarks](https://docs.rapids.ai/api/raft/stable/cuda_ann_benchmarks.html) section of the RAFT documentation for instructions on building and using the ANN benchmarks. | 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/raft/raft_ivf_flat_wrapper.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cassert>
#include <fstream>
#include <iostream>
#include <memory>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/logger.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/linalg/unary_op.cuh>
#include <raft/neighbors/ivf_flat.cuh>
#include <raft/neighbors/ivf_flat_types.hpp>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <stdexcept>
#include <string>
#include <type_traits>
#include "../common/ann_types.hpp"
#include "raft_ann_bench_utils.h"
#include <raft/util/cudart_utils.hpp>
namespace raft::bench::ann {
template <typename T, typename IdxT>
class RaftIvfFlatGpu : public ANN<T> {
public:
using typename ANN<T>::AnnSearchParam;
struct SearchParam : public AnnSearchParam {
raft::neighbors::ivf_flat::search_params ivf_flat_params;
};
using BuildParam = raft::neighbors::ivf_flat::index_params;
RaftIvfFlatGpu(Metric metric, int dim, const BuildParam& param)
: ANN<T>(metric, dim), index_params_(param), dimension_(dim)
{
index_params_.metric = parse_metric_type(metric);
index_params_.conservative_memory_allocation = true;
RAFT_CUDA_TRY(cudaGetDevice(&device_));
}
~RaftIvfFlatGpu() noexcept {}
void build(const T* dataset, size_t nrow, cudaStream_t stream) final;
void set_search_param(const AnnSearchParam& param) override;
// TODO: if the number of results is less than k, the remaining elements of 'neighbors'
// will be filled with (size_t)-1
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const override;
// to enable dataset access from GPU memory
AlgoProperty get_preference() const override
{
AlgoProperty property;
property.dataset_memory_type = MemoryType::Device;
property.query_memory_type = MemoryType::Device;
return property;
}
void save(const std::string& file) const override;
void load(const std::string&) override;
private:
raft::device_resources handle_;
BuildParam index_params_;
raft::neighbors::ivf_flat::search_params search_params_;
std::optional<raft::neighbors::ivf_flat::index<T, IdxT>> index_;
int device_;
int dimension_;
};
template <typename T, typename IdxT>
void RaftIvfFlatGpu<T, IdxT>::build(const T* dataset, size_t nrow, cudaStream_t)
{
index_.emplace(
raft::neighbors::ivf_flat::build(handle_, index_params_, dataset, IdxT(nrow), dimension_));
return;
}
template <typename T, typename IdxT>
void RaftIvfFlatGpu<T, IdxT>::set_search_param(const AnnSearchParam& param)
{
auto search_param = dynamic_cast<const SearchParam&>(param);
search_params_ = search_param.ivf_flat_params;
assert(search_params_.n_probes <= index_params_.n_lists);
}
template <typename T, typename IdxT>
void RaftIvfFlatGpu<T, IdxT>::save(const std::string& file) const
{
raft::neighbors::ivf_flat::serialize(handle_, file, *index_);
return;
}
template <typename T, typename IdxT>
void RaftIvfFlatGpu<T, IdxT>::load(const std::string& file)
{
index_ = raft::neighbors::ivf_flat::deserialize<T, IdxT>(handle_, file);
return;
}
template <typename T, typename IdxT>
void RaftIvfFlatGpu<T, IdxT>::search(
const T* queries, int batch_size, int k, size_t* neighbors, float* distances, cudaStream_t) const
{
static_assert(sizeof(size_t) == sizeof(IdxT), "IdxT is incompatible with size_t");
raft::neighbors::ivf_flat::search(
handle_, search_params_, *index_, queries, batch_size, k, (IdxT*)neighbors, distances);
resource::sync_stream(handle_);
return;
}
} // namespace raft::bench::ann
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/raft/raft_cagra_hnswlib.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../common/ann_types.hpp"
#include "raft_ann_bench_param_parser.h"
#include "raft_cagra_hnswlib_wrapper.h"
#include <rmm/mr/device/pool_memory_resource.hpp>
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
namespace raft::bench::ann {
template <typename T, typename IdxT>
void parse_search_param(const nlohmann::json& conf,
typename raft::bench::ann::RaftCagraHnswlib<T, IdxT>::SearchParam& param)
{
param.ef = conf.at("ef");
if (conf.contains("numThreads")) { param.num_threads = conf.at("numThreads"); }
}
template <typename T>
std::unique_ptr<raft::bench::ann::ANN<T>> create_algo(const std::string& algo,
const std::string& distance,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
// stop compiler warning; not all algorithms support multi-GPU so it may not be used
(void)dev_list;
raft::bench::ann::Metric metric = parse_metric(distance);
std::unique_ptr<raft::bench::ann::ANN<T>> ann;
if constexpr (std::is_same_v<T, float> or std::is_same_v<T, std::uint8_t>) {
if (algo == "raft_cagra_hnswlib") {
typename raft::bench::ann::RaftCagraHnswlib<T, uint32_t>::BuildParam param;
parse_build_param<T, uint32_t>(conf, param);
ann = std::make_unique<raft::bench::ann::RaftCagraHnswlib<T, uint32_t>>(metric, dim, param);
}
}
if (!ann) { throw std::runtime_error("invalid algo: '" + algo + "'"); }
return ann;
}
template <typename T>
std::unique_ptr<typename raft::bench::ann::ANN<T>::AnnSearchParam> create_search_param(
const std::string& algo, const nlohmann::json& conf)
{
if (algo == "raft_cagra_hnswlib") {
auto param =
std::make_unique<typename raft::bench::ann::RaftCagraHnswlib<T, uint32_t>::SearchParam>();
parse_search_param<T, uint32_t>(conf, *param);
return param;
}
throw std::runtime_error("invalid algo: '" + algo + "'");
}
} // namespace raft::bench::ann
REGISTER_ALGO_INSTANCE(float);
REGISTER_ALGO_INSTANCE(std::int8_t);
REGISTER_ALGO_INSTANCE(std::uint8_t);
#ifdef ANN_BENCH_BUILD_MAIN
#include "../common/benchmark.hpp"
int main(int argc, char** argv)
{
rmm::mr::cuda_memory_resource cuda_mr;
// Construct a resource that uses a coalescing best-fit pool allocator
rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource> pool_mr{&cuda_mr};
rmm::mr::set_current_device_resource(
&pool_mr); // Updates the current device resource pointer to `pool_mr`
rmm::mr::device_memory_resource* mr =
rmm::mr::get_current_device_resource(); // Points to `pool_mr`
return raft::bench::ann::run_main(argc, argv);
}
#endif
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/raft/raft_cagra.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "raft_cagra_wrapper.h"
namespace raft::bench::ann {
template class RaftCagra<uint8_t, uint32_t>;
template class RaftCagra<int8_t, uint32_t>;
template class RaftCagra<float, uint32_t>;
} // namespace raft::bench::ann
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/raft/raft_cagra_hnswlib_wrapper.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../hnswlib/hnswlib_wrapper.h"
#include "raft_cagra_wrapper.h"
#include <memory>
namespace raft::bench::ann {
template <typename T, typename IdxT>
class RaftCagraHnswlib : public ANN<T> {
public:
using typename ANN<T>::AnnSearchParam;
using BuildParam = typename RaftCagra<T, IdxT>::BuildParam;
using SearchParam = typename HnswLib<T>::SearchParam;
RaftCagraHnswlib(Metric metric, int dim, const BuildParam& param, int concurrent_searches = 1)
: ANN<T>(metric, dim),
metric_(metric),
index_params_(param),
dimension_(dim),
handle_(cudaStreamPerThread)
{
}
~RaftCagraHnswlib() noexcept {}
void build(const T* dataset, size_t nrow, cudaStream_t stream) final;
void set_search_param(const AnnSearchParam& param) override;
// TODO: if the number of results is less than k, the remaining elements of 'neighbors'
// will be filled with (size_t)-1
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const override;
// to enable dataset access from GPU memory
AlgoProperty get_preference() const override
{
AlgoProperty property;
property.dataset_memory_type = MemoryType::HostMmap;
property.query_memory_type = MemoryType::Host;
return property;
}
void save(const std::string& file) const override;
void load(const std::string&) override;
private:
raft::device_resources handle_;
Metric metric_;
BuildParam index_params_;
int dimension_;
std::unique_ptr<RaftCagra<T, IdxT>> cagra_build_;
std::unique_ptr<HnswLib<T>> hnswlib_search_;
Objective metric_objective_;
};
template <typename T, typename IdxT>
void RaftCagraHnswlib<T, IdxT>::build(const T* dataset, size_t nrow, cudaStream_t stream)
{
if (not cagra_build_) {
cagra_build_ = std::make_unique<RaftCagra<T, IdxT>>(metric_, dimension_, index_params_);
}
cagra_build_->build(dataset, nrow, stream);
}
template <typename T, typename IdxT>
void RaftCagraHnswlib<T, IdxT>::set_search_param(const AnnSearchParam& param_)
{
hnswlib_search_->set_search_param(param_);
}
template <typename T, typename IdxT>
void RaftCagraHnswlib<T, IdxT>::save(const std::string& file) const
{
cagra_build_->save_to_hnswlib(file);
}
template <typename T, typename IdxT>
void RaftCagraHnswlib<T, IdxT>::load(const std::string& file)
{
typename HnswLib<T>::BuildParam param;
// these values don't matter since we don't build with HnswLib
param.M = 50;
param.ef_construction = 100;
if (not hnswlib_search_) {
hnswlib_search_ = std::make_unique<HnswLib<T>>(metric_, dimension_, param);
}
hnswlib_search_->load(file);
hnswlib_search_->set_base_layer_only();
}
template <typename T, typename IdxT>
void RaftCagraHnswlib<T, IdxT>::search(
const T* queries, int batch_size, int k, size_t* neighbors, float* distances, cudaStream_t) const
{
hnswlib_search_->search(queries, batch_size, k, neighbors, distances);
}
} // namespace raft::bench::ann
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/raft/raft_ivf_pq.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "raft_ivf_pq_wrapper.h"
namespace raft::bench::ann {
template class RaftIvfPQ<float, int64_t>;
template class RaftIvfPQ<uint8_t, int64_t>;
template class RaftIvfPQ<int8_t, int64_t>;
} // namespace raft::bench::ann
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/raft/raft_ann_bench_param_parser.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
#undef WARP_SIZE
#ifdef RAFT_ANN_BENCH_USE_RAFT_BFKNN
#include "raft_wrapper.h"
#endif
#ifdef RAFT_ANN_BENCH_USE_RAFT_IVF_FLAT
#include "raft_ivf_flat_wrapper.h"
extern template class raft::bench::ann::RaftIvfFlatGpu<float, int64_t>;
extern template class raft::bench::ann::RaftIvfFlatGpu<uint8_t, int64_t>;
extern template class raft::bench::ann::RaftIvfFlatGpu<int8_t, int64_t>;
#endif
#if defined(RAFT_ANN_BENCH_USE_RAFT_IVF_PQ) || defined(RAFT_ANN_BENCH_USE_RAFT_CAGRA) || \
defined(RAFT_ANN_BENCH_USE_RAFT_CAGRA_HNSWLIB)
#include "raft_ivf_pq_wrapper.h"
#endif
#ifdef RAFT_ANN_BENCH_USE_RAFT_IVF_PQ
extern template class raft::bench::ann::RaftIvfPQ<float, int64_t>;
extern template class raft::bench::ann::RaftIvfPQ<uint8_t, int64_t>;
extern template class raft::bench::ann::RaftIvfPQ<int8_t, int64_t>;
#endif
#if defined(RAFT_ANN_BENCH_USE_RAFT_CAGRA) || defined(RAFT_ANN_BENCH_USE_RAFT_CAGRA_HNSWLIB)
#include "raft_cagra_wrapper.h"
#endif
#ifdef RAFT_ANN_BENCH_USE_RAFT_CAGRA
extern template class raft::bench::ann::RaftCagra<float, uint32_t>;
extern template class raft::bench::ann::RaftCagra<uint8_t, uint32_t>;
extern template class raft::bench::ann::RaftCagra<int8_t, uint32_t>;
#endif
#ifdef RAFT_ANN_BENCH_USE_RAFT_IVF_FLAT
template <typename T, typename IdxT>
void parse_build_param(const nlohmann::json& conf,
typename raft::bench::ann::RaftIvfFlatGpu<T, IdxT>::BuildParam& param)
{
param.n_lists = conf.at("nlist");
if (conf.contains("niter")) { param.kmeans_n_iters = conf.at("niter"); }
if (conf.contains("ratio")) { param.kmeans_trainset_fraction = 1.0 / (double)conf.at("ratio"); }
}
template <typename T, typename IdxT>
void parse_search_param(const nlohmann::json& conf,
typename raft::bench::ann::RaftIvfFlatGpu<T, IdxT>::SearchParam& param)
{
param.ivf_flat_params.n_probes = conf.at("nprobe");
}
#endif
#if defined(RAFT_ANN_BENCH_USE_RAFT_IVF_PQ) || defined(RAFT_ANN_BENCH_USE_RAFT_CAGRA) || \
defined(RAFT_ANN_BENCH_USE_RAFT_CAGRA_HNSWLIB)
template <typename T, typename IdxT>
void parse_build_param(const nlohmann::json& conf,
typename raft::bench::ann::RaftIvfPQ<T, IdxT>::BuildParam& param)
{
if (conf.contains("nlist")) { param.n_lists = conf.at("nlist"); }
if (conf.contains("niter")) { param.kmeans_n_iters = conf.at("niter"); }
if (conf.contains("ratio")) { param.kmeans_trainset_fraction = 1.0 / (double)conf.at("ratio"); }
if (conf.contains("pq_bits")) { param.pq_bits = conf.at("pq_bits"); }
if (conf.contains("pq_dim")) { param.pq_dim = conf.at("pq_dim"); }
if (conf.contains("codebook_kind")) {
std::string kind = conf.at("codebook_kind");
if (kind == "cluster") {
param.codebook_kind = raft::neighbors::ivf_pq::codebook_gen::PER_CLUSTER;
} else if (kind == "subspace") {
param.codebook_kind = raft::neighbors::ivf_pq::codebook_gen::PER_SUBSPACE;
} else {
throw std::runtime_error("codebook_kind: '" + kind +
"', should be either 'cluster' or 'subspace'");
}
}
}
template <typename T, typename IdxT>
void parse_search_param(const nlohmann::json& conf,
typename raft::bench::ann::RaftIvfPQ<T, IdxT>::SearchParam& param)
{
if (conf.contains("nprobe")) { param.pq_param.n_probes = conf.at("nprobe"); }
if (conf.contains("internalDistanceDtype")) {
std::string type = conf.at("internalDistanceDtype");
if (type == "float") {
param.pq_param.internal_distance_dtype = CUDA_R_32F;
} else if (type == "half") {
param.pq_param.internal_distance_dtype = CUDA_R_16F;
} else {
throw std::runtime_error("internalDistanceDtype: '" + type +
"', should be either 'float' or 'half'");
}
} else {
// set half as default type
param.pq_param.internal_distance_dtype = CUDA_R_16F;
}
if (conf.contains("smemLutDtype")) {
std::string type = conf.at("smemLutDtype");
if (type == "float") {
param.pq_param.lut_dtype = CUDA_R_32F;
} else if (type == "half") {
param.pq_param.lut_dtype = CUDA_R_16F;
} else if (type == "fp8") {
param.pq_param.lut_dtype = CUDA_R_8U;
} else {
throw std::runtime_error("smemLutDtype: '" + type +
"', should be either 'float', 'half' or 'fp8'");
}
} else {
// set half as default
param.pq_param.lut_dtype = CUDA_R_16F;
}
if (conf.contains("refine_ratio")) {
param.refine_ratio = conf.at("refine_ratio");
if (param.refine_ratio < 1.0f) { throw std::runtime_error("refine_ratio should be >= 1.0"); }
}
}
#endif
#if defined(RAFT_ANN_BENCH_USE_RAFT_CAGRA) || defined(RAFT_ANN_BENCH_USE_RAFT_CAGRA_HNSWLIB)
template <typename T, typename IdxT>
void parse_build_param(const nlohmann::json& conf,
raft::neighbors::experimental::nn_descent::index_params& param)
{
if (conf.contains("graph_degree")) { param.graph_degree = conf.at("graph_degree"); }
if (conf.contains("intermediate_graph_degree")) {
param.intermediate_graph_degree = conf.at("intermediate_graph_degree");
}
// we allow niter shorthand for max_iterations
if (conf.contains("niter")) { param.max_iterations = conf.at("niter"); }
if (conf.contains("max_iterations")) { param.max_iterations = conf.at("max_iterations"); }
if (conf.contains("termination_threshold")) {
param.termination_threshold = conf.at("termination_threshold");
}
}
nlohmann::json collect_conf_with_prefix(const nlohmann::json& conf,
const std::string& prefix,
bool remove_prefix = true)
{
nlohmann::json out;
for (auto& i : conf.items()) {
if (i.key().compare(0, prefix.size(), prefix) == 0) {
auto new_key = remove_prefix ? i.key().substr(prefix.size()) : i.key();
out[new_key] = i.value();
}
}
return out;
}
template <typename T, typename IdxT>
void parse_build_param(const nlohmann::json& conf,
typename raft::bench::ann::RaftCagra<T, IdxT>::BuildParam& param)
{
if (conf.contains("graph_degree")) {
param.cagra_params.graph_degree = conf.at("graph_degree");
param.cagra_params.intermediate_graph_degree = param.cagra_params.graph_degree * 2;
}
if (conf.contains("intermediate_graph_degree")) {
param.cagra_params.intermediate_graph_degree = conf.at("intermediate_graph_degree");
}
if (conf.contains("graph_build_algo")) {
if (conf.at("graph_build_algo") == "IVF_PQ") {
param.cagra_params.build_algo = raft::neighbors::cagra::graph_build_algo::IVF_PQ;
} else if (conf.at("graph_build_algo") == "NN_DESCENT") {
param.cagra_params.build_algo = raft::neighbors::cagra::graph_build_algo::NN_DESCENT;
}
}
nlohmann::json ivf_pq_build_conf = collect_conf_with_prefix(conf, "ivf_pq_build_");
if (!ivf_pq_build_conf.empty()) {
raft::neighbors::ivf_pq::index_params bparam;
parse_build_param<T, IdxT>(ivf_pq_build_conf, bparam);
param.ivf_pq_build_params = bparam;
}
nlohmann::json ivf_pq_search_conf = collect_conf_with_prefix(conf, "ivf_pq_search_");
if (!ivf_pq_search_conf.empty()) {
typename raft::bench::ann::RaftIvfPQ<T, IdxT>::SearchParam sparam;
parse_search_param<T, IdxT>(ivf_pq_search_conf, sparam);
param.ivf_pq_search_params = sparam.pq_param;
param.ivf_pq_refine_rate = sparam.refine_ratio;
}
nlohmann::json nn_descent_conf = collect_conf_with_prefix(conf, "nn_descent_");
if (!nn_descent_conf.empty()) {
raft::neighbors::experimental::nn_descent::index_params nn_param;
nn_param.intermediate_graph_degree = 1.5 * param.cagra_params.intermediate_graph_degree;
parse_build_param<T, IdxT>(nn_descent_conf, nn_param);
if (nn_param.graph_degree != param.cagra_params.intermediate_graph_degree) {
nn_param.graph_degree = param.cagra_params.intermediate_graph_degree;
}
param.nn_descent_params = nn_param;
}
}
raft::bench::ann::AllocatorType parse_allocator(std::string mem_type)
{
if (mem_type == "device") {
return raft::bench::ann::AllocatorType::Device;
} else if (mem_type == "host_pinned") {
return raft::bench::ann::AllocatorType::HostPinned;
} else if (mem_type == "host_huge_page") {
return raft::bench::ann::AllocatorType::HostHugePage;
}
THROW(
"Invalid value for memory type %s, must be one of [\"device\", \"host_pinned\", "
"\"host_huge_page\"",
mem_type.c_str());
}
template <typename T, typename IdxT>
void parse_search_param(const nlohmann::json& conf,
typename raft::bench::ann::RaftCagra<T, IdxT>::SearchParam& param)
{
if (conf.contains("itopk")) { param.p.itopk_size = conf.at("itopk"); }
if (conf.contains("search_width")) { param.p.search_width = conf.at("search_width"); }
if (conf.contains("max_iterations")) { param.p.max_iterations = conf.at("max_iterations"); }
if (conf.contains("algo")) {
if (conf.at("algo") == "single_cta") {
param.p.algo = raft::neighbors::experimental::cagra::search_algo::SINGLE_CTA;
} else if (conf.at("algo") == "multi_cta") {
param.p.algo = raft::neighbors::experimental::cagra::search_algo::MULTI_CTA;
} else if (conf.at("algo") == "multi_kernel") {
param.p.algo = raft::neighbors::experimental::cagra::search_algo::MULTI_KERNEL;
} else if (conf.at("algo") == "auto") {
param.p.algo = raft::neighbors::experimental::cagra::search_algo::AUTO;
} else {
std::string tmp = conf.at("algo");
THROW("Invalid value for algo: %s", tmp.c_str());
}
}
if (conf.contains("graph_memory_type")) {
param.graph_mem = parse_allocator(conf.at("graph_memory_type"));
}
if (conf.contains("internal_dataset_memory_type")) {
param.dataset_mem = parse_allocator(conf.at("internal_dataset_memory_type"));
}
}
#endif
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/raft/raft_ivf_flat.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "raft_ivf_flat_wrapper.h"
namespace raft::bench::ann {
template class RaftIvfFlatGpu<float, int64_t>;
template class RaftIvfFlatGpu<uint8_t, int64_t>;
template class RaftIvfFlatGpu<int8_t, int64_t>;
} // namespace raft::bench::ann
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/raft/raft_ann_bench_utils.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cassert>
#include <fstream>
#include <iostream>
#include <memory>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/logger.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <sstream>
#include <stdexcept>
#include <string>
#include <type_traits>
namespace raft::bench::ann {
inline raft::distance::DistanceType parse_metric_type(raft::bench::ann::Metric metric)
{
if (metric == raft::bench::ann::Metric::kInnerProduct) {
return raft::distance::DistanceType::InnerProduct;
} else if (metric == raft::bench::ann::Metric::kEuclidean) {
// Even for L2 expanded RAFT IVF Flat uses unexpanded formula
return raft::distance::DistanceType::L2Expanded;
} else {
throw std::runtime_error("raft supports only metric type of inner product and L2");
}
}
} // namespace raft::bench::ann
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/raft/raft_benchmark.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../common/ann_types.hpp"
#include "raft_ann_bench_param_parser.h"
#include <algorithm>
#include <cmath>
#include <memory>
#include <raft/core/logger.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <utility>
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
namespace raft::bench::ann {
template <typename T>
std::unique_ptr<raft::bench::ann::ANN<T>> create_algo(const std::string& algo,
const std::string& distance,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
// stop compiler warning; not all algorithms support multi-GPU so it may not be used
(void)dev_list;
raft::bench::ann::Metric metric = parse_metric(distance);
std::unique_ptr<raft::bench::ann::ANN<T>> ann;
if constexpr (std::is_same_v<T, float>) {
#ifdef RAFT_ANN_BENCH_USE_RAFT_BFKNN
if (algo == "raft_bfknn") { ann = std::make_unique<raft::bench::ann::RaftGpu<T>>(metric, dim); }
#endif
}
if constexpr (std::is_same_v<T, uint8_t>) {}
#ifdef RAFT_ANN_BENCH_USE_RAFT_IVF_FLAT
if (algo == "raft_ivf_flat") {
typename raft::bench::ann::RaftIvfFlatGpu<T, int64_t>::BuildParam param;
parse_build_param<T, int64_t>(conf, param);
ann = std::make_unique<raft::bench::ann::RaftIvfFlatGpu<T, int64_t>>(metric, dim, param);
}
#endif
#ifdef RAFT_ANN_BENCH_USE_RAFT_IVF_PQ
if (algo == "raft_ivf_pq") {
typename raft::bench::ann::RaftIvfPQ<T, int64_t>::BuildParam param;
parse_build_param<T, int64_t>(conf, param);
ann = std::make_unique<raft::bench::ann::RaftIvfPQ<T, int64_t>>(metric, dim, param);
}
#endif
#ifdef RAFT_ANN_BENCH_USE_RAFT_CAGRA
if (algo == "raft_cagra") {
typename raft::bench::ann::RaftCagra<T, uint32_t>::BuildParam param;
parse_build_param<T, uint32_t>(conf, param);
ann = std::make_unique<raft::bench::ann::RaftCagra<T, uint32_t>>(metric, dim, param);
}
#endif
if (!ann) { throw std::runtime_error("invalid algo: '" + algo + "'"); }
return ann;
}
template <typename T>
std::unique_ptr<typename raft::bench::ann::ANN<T>::AnnSearchParam> create_search_param(
const std::string& algo, const nlohmann::json& conf)
{
#ifdef RAFT_ANN_BENCH_USE_RAFT_BFKNN
if (algo == "raft_brute_force") {
auto param = std::make_unique<typename raft::bench::ann::ANN<T>::AnnSearchParam>();
return param;
}
#endif
#ifdef RAFT_ANN_BENCH_USE_RAFT_IVF_FLAT
if (algo == "raft_ivf_flat") {
auto param =
std::make_unique<typename raft::bench::ann::RaftIvfFlatGpu<T, int64_t>::SearchParam>();
parse_search_param<T, int64_t>(conf, *param);
return param;
}
#endif
#ifdef RAFT_ANN_BENCH_USE_RAFT_IVF_PQ
if (algo == "raft_ivf_pq") {
auto param = std::make_unique<typename raft::bench::ann::RaftIvfPQ<T, int64_t>::SearchParam>();
parse_search_param<T, int64_t>(conf, *param);
return param;
}
#endif
#ifdef RAFT_ANN_BENCH_USE_RAFT_CAGRA
if (algo == "raft_cagra") {
auto param = std::make_unique<typename raft::bench::ann::RaftCagra<T, uint32_t>::SearchParam>();
parse_search_param<T, uint32_t>(conf, *param);
return param;
}
#endif
// else
throw std::runtime_error("invalid algo: '" + algo + "'");
}
}; // namespace raft::bench::ann
REGISTER_ALGO_INSTANCE(float);
REGISTER_ALGO_INSTANCE(std::int8_t);
REGISTER_ALGO_INSTANCE(std::uint8_t);
#ifdef ANN_BENCH_BUILD_MAIN
#include "../common/benchmark.hpp"
int main(int argc, char** argv)
{
rmm::mr::cuda_memory_resource cuda_mr;
// Construct a resource that uses a coalescing best-fit pool allocator
rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource> pool_mr{&cuda_mr};
rmm::mr::set_current_device_resource(
&pool_mr); // Updates the current device resource pointer to `pool_mr`
rmm::mr::device_memory_resource* mr =
rmm::mr::get_current_device_resource(); // Points to `pool_mr`
return raft::bench::ann::run_main(argc, argv);
}
#endif
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/raft/raft_ivf_pq_wrapper.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/logger.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/linalg/unary_op.cuh>
#include <raft/neighbors/ivf_pq_types.hpp>
#include <raft/util/cudart_utils.hpp>
#include <raft_runtime/neighbors/ivf_pq.hpp>
#include <raft_runtime/neighbors/refine.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <type_traits>
#include "../common/ann_types.hpp"
#include "raft_ann_bench_utils.h"
#include <raft/util/cudart_utils.hpp>
namespace raft::bench::ann {
template <typename T, typename IdxT>
class RaftIvfPQ : public ANN<T> {
public:
using typename ANN<T>::AnnSearchParam;
using ANN<T>::dim_;
struct SearchParam : public AnnSearchParam {
raft::neighbors::ivf_pq::search_params pq_param;
float refine_ratio = 1.0f;
auto needs_dataset() const -> bool override { return refine_ratio > 1.0f; }
};
using BuildParam = raft::neighbors::ivf_pq::index_params;
RaftIvfPQ(Metric metric, int dim, const BuildParam& param)
: ANN<T>(metric, dim), index_params_(param), dimension_(dim)
{
index_params_.metric = parse_metric_type(metric);
RAFT_CUDA_TRY(cudaGetDevice(&device_));
RAFT_CUDA_TRY(cudaEventCreate(&sync_, cudaEventDisableTiming));
}
~RaftIvfPQ() noexcept { RAFT_CUDA_TRY_NO_THROW(cudaEventDestroy(sync_)); }
void build(const T* dataset, size_t nrow, cudaStream_t stream) final;
void set_search_param(const AnnSearchParam& param) override;
void set_search_dataset(const T* dataset, size_t nrow) override;
// TODO: if the number of results is less than k, the remaining elements of 'neighbors'
// will be filled with (size_t)-1
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const override;
// to enable dataset access from GPU memory
AlgoProperty get_preference() const override
{
AlgoProperty property;
property.dataset_memory_type = MemoryType::Host;
property.query_memory_type = MemoryType::Device;
return property;
}
void save(const std::string& file) const override;
void load(const std::string&) override;
private:
raft::device_resources handle_;
cudaEvent_t sync_{nullptr};
BuildParam index_params_;
raft::neighbors::ivf_pq::search_params search_params_;
std::optional<raft::neighbors::ivf_pq::index<IdxT>> index_;
int device_;
int dimension_;
float refine_ratio_ = 1.0;
raft::device_matrix_view<const T, IdxT> dataset_;
void stream_wait(cudaStream_t stream) const
{
RAFT_CUDA_TRY(cudaEventRecord(sync_, resource::get_cuda_stream(handle_)));
RAFT_CUDA_TRY(cudaStreamWaitEvent(stream, sync_));
}
};
template <typename T, typename IdxT>
void RaftIvfPQ<T, IdxT>::save(const std::string& file) const
{
raft::runtime::neighbors::ivf_pq::serialize(handle_, file, *index_);
}
template <typename T, typename IdxT>
void RaftIvfPQ<T, IdxT>::load(const std::string& file)
{
auto index_tmp = raft::neighbors::ivf_pq::index<IdxT>(handle_, index_params_, dimension_);
raft::runtime::neighbors::ivf_pq::deserialize(handle_, file, &index_tmp);
index_.emplace(std::move(index_tmp));
return;
}
template <typename T, typename IdxT>
void RaftIvfPQ<T, IdxT>::build(const T* dataset, size_t nrow, cudaStream_t stream)
{
auto dataset_v = raft::make_device_matrix_view<const T, IdxT>(dataset, IdxT(nrow), dim_);
index_.emplace(raft::runtime::neighbors::ivf_pq::build(handle_, index_params_, dataset_v));
stream_wait(stream);
}
template <typename T, typename IdxT>
void RaftIvfPQ<T, IdxT>::set_search_param(const AnnSearchParam& param)
{
auto search_param = dynamic_cast<const SearchParam&>(param);
search_params_ = search_param.pq_param;
refine_ratio_ = search_param.refine_ratio;
assert(search_params_.n_probes <= index_params_.n_lists);
}
template <typename T, typename IdxT>
void RaftIvfPQ<T, IdxT>::set_search_dataset(const T* dataset, size_t nrow)
{
dataset_ = raft::make_device_matrix_view<const T, IdxT>(dataset, nrow, index_->dim());
}
template <typename T, typename IdxT>
void RaftIvfPQ<T, IdxT>::search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream) const
{
if (refine_ratio_ > 1.0f) {
uint32_t k0 = static_cast<uint32_t>(refine_ratio_ * k);
auto queries_v =
raft::make_device_matrix_view<const T, IdxT>(queries, batch_size, index_->dim());
auto distances_tmp = raft::make_device_matrix<float, IdxT>(handle_, batch_size, k0);
auto candidates = raft::make_device_matrix<IdxT, IdxT>(handle_, batch_size, k0);
raft::runtime::neighbors::ivf_pq::search(
handle_, search_params_, *index_, queries_v, candidates.view(), distances_tmp.view());
if (raft::get_device_for_address(dataset_.data_handle()) >= 0) {
auto queries_v =
raft::make_device_matrix_view<const T, IdxT>(queries, batch_size, index_->dim());
auto neighbors_v = raft::make_device_matrix_view<IdxT, IdxT>((IdxT*)neighbors, batch_size, k);
auto distances_v = raft::make_device_matrix_view<float, IdxT>(distances, batch_size, k);
raft::runtime::neighbors::refine(handle_,
dataset_,
queries_v,
candidates.view(),
neighbors_v,
distances_v,
index_->metric());
stream_wait(stream); // RAFT stream -> bench stream
} else {
auto queries_host = raft::make_host_matrix<T, IdxT>(batch_size, index_->dim());
auto candidates_host = raft::make_host_matrix<IdxT, IdxT>(batch_size, k0);
auto neighbors_host = raft::make_host_matrix<IdxT, IdxT>(batch_size, k);
auto distances_host = raft::make_host_matrix<float, IdxT>(batch_size, k);
raft::copy(queries_host.data_handle(), queries, queries_host.size(), stream);
raft::copy(candidates_host.data_handle(),
candidates.data_handle(),
candidates_host.size(),
resource::get_cuda_stream(handle_));
auto dataset_v = raft::make_host_matrix_view<const T, IdxT>(
dataset_.data_handle(), dataset_.extent(0), dataset_.extent(1));
// wait for the queries to copy to host in 'stream` and for IVF-PQ::search to finish
RAFT_CUDA_TRY(cudaEventRecord(sync_, resource::get_cuda_stream(handle_)));
RAFT_CUDA_TRY(cudaEventRecord(sync_, stream));
RAFT_CUDA_TRY(cudaEventSynchronize(sync_));
raft::runtime::neighbors::refine(handle_,
dataset_v,
queries_host.view(),
candidates_host.view(),
neighbors_host.view(),
distances_host.view(),
index_->metric());
raft::copy(neighbors, (size_t*)neighbors_host.data_handle(), neighbors_host.size(), stream);
raft::copy(distances, distances_host.data_handle(), distances_host.size(), stream);
}
} else {
auto queries_v =
raft::make_device_matrix_view<const T, IdxT>(queries, batch_size, index_->dim());
auto neighbors_v = raft::make_device_matrix_view<IdxT, IdxT>((IdxT*)neighbors, batch_size, k);
auto distances_v = raft::make_device_matrix_view<float, IdxT>(distances, batch_size, k);
raft::runtime::neighbors::ivf_pq::search(
handle_, search_params_, *index_, queries_v, neighbors_v, distances_v);
stream_wait(stream); // RAFT stream -> bench stream
}
}
} // namespace raft::bench::ann
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/raft/raft_cagra_wrapper.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cassert>
#include <fstream>
#include <iostream>
#include <memory>
#include <optional>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/logger.hpp>
#include <raft/core/operators.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/linalg/unary_op.cuh>
#include <raft/neighbors/cagra.cuh>
#include <raft/neighbors/cagra_serialize.cuh>
#include <raft/neighbors/cagra_types.hpp>
#include <raft/neighbors/detail/cagra/cagra_build.cuh>
#include <raft/neighbors/ivf_pq_types.hpp>
#include <raft/neighbors/nn_descent_types.hpp>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <stdexcept>
#include <string>
#include <type_traits>
#include "../common/ann_types.hpp"
#include "raft_ann_bench_utils.h"
#include <raft/util/cudart_utils.hpp>
#include "../common/cuda_huge_page_resource.hpp"
#include "../common/cuda_pinned_resource.hpp"
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
namespace raft::bench::ann {
enum class AllocatorType { HostPinned, HostHugePage, Device };
template <typename T, typename IdxT>
class RaftCagra : public ANN<T> {
public:
using typename ANN<T>::AnnSearchParam;
struct SearchParam : public AnnSearchParam {
raft::neighbors::experimental::cagra::search_params p;
AllocatorType graph_mem = AllocatorType::Device;
AllocatorType dataset_mem = AllocatorType::Device;
auto needs_dataset() const -> bool override { return true; }
};
struct BuildParam {
raft::neighbors::cagra::index_params cagra_params;
std::optional<raft::neighbors::experimental::nn_descent::index_params> nn_descent_params =
std::nullopt;
std::optional<float> ivf_pq_refine_rate = std::nullopt;
std::optional<raft::neighbors::ivf_pq::index_params> ivf_pq_build_params = std::nullopt;
std::optional<raft::neighbors::ivf_pq::search_params> ivf_pq_search_params = std::nullopt;
};
RaftCagra(Metric metric, int dim, const BuildParam& param, int concurrent_searches = 1)
: ANN<T>(metric, dim),
index_params_(param),
dimension_(dim),
handle_(cudaStreamPerThread),
need_dataset_update_(true),
dataset_(make_device_matrix<T, int64_t>(handle_, 0, 0)),
graph_(make_device_matrix<IdxT, int64_t>(handle_, 0, 0)),
input_dataset_v_(nullptr, 0, 0),
graph_mem_(AllocatorType::Device),
dataset_mem_(AllocatorType::Device)
{
index_params_.cagra_params.metric = parse_metric_type(metric);
index_params_.ivf_pq_build_params->metric = parse_metric_type(metric);
RAFT_CUDA_TRY(cudaGetDevice(&device_));
}
~RaftCagra() noexcept {}
void build(const T* dataset, size_t nrow, cudaStream_t stream) final;
void set_search_param(const AnnSearchParam& param) override;
void set_search_dataset(const T* dataset, size_t nrow) override;
// TODO: if the number of results is less than k, the remaining elements of 'neighbors'
// will be filled with (size_t)-1
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const override;
// to enable dataset access from GPU memory
AlgoProperty get_preference() const override
{
AlgoProperty property;
property.dataset_memory_type = MemoryType::HostMmap;
property.query_memory_type = MemoryType::Device;
return property;
}
void save(const std::string& file) const override;
void load(const std::string&) override;
void save_to_hnswlib(const std::string& file) const;
private:
inline rmm::mr::device_memory_resource* get_mr(AllocatorType mem_type)
{
switch (mem_type) {
case (AllocatorType::HostPinned): return &mr_pinned_;
case (AllocatorType::HostHugePage): return &mr_huge_page_;
default: return rmm::mr::get_current_device_resource();
}
}
raft ::mr::cuda_pinned_resource mr_pinned_;
raft ::mr::cuda_huge_page_resource mr_huge_page_;
raft::device_resources handle_;
AllocatorType graph_mem_;
AllocatorType dataset_mem_;
BuildParam index_params_;
bool need_dataset_update_;
raft::neighbors::cagra::search_params search_params_;
std::optional<raft::neighbors::cagra::index<T, IdxT>> index_;
int device_;
int dimension_;
raft::device_matrix<IdxT, int64_t, row_major> graph_;
raft::device_matrix<T, int64_t, row_major> dataset_;
raft::device_matrix_view<const T, int64_t, row_major> input_dataset_v_;
};
template <typename T, typename IdxT>
void RaftCagra<T, IdxT>::build(const T* dataset, size_t nrow, cudaStream_t)
{
auto dataset_view =
raft::make_host_matrix_view<const T, int64_t>(dataset, IdxT(nrow), dimension_);
auto& params = index_params_.cagra_params;
index_.emplace(raft::neighbors::cagra::detail::build(handle_,
params,
dataset_view,
index_params_.nn_descent_params,
index_params_.ivf_pq_refine_rate,
index_params_.ivf_pq_build_params,
index_params_.ivf_pq_search_params));
return;
}
inline std::string allocator_to_string(AllocatorType mem_type)
{
if (mem_type == AllocatorType::Device) {
return "device";
} else if (mem_type == AllocatorType::HostPinned) {
return "host_pinned";
} else if (mem_type == AllocatorType::HostHugePage) {
return "host_huge_page";
}
return "<invalid allocator type>";
}
template <typename T, typename IdxT>
void RaftCagra<T, IdxT>::set_search_param(const AnnSearchParam& param)
{
auto search_param = dynamic_cast<const SearchParam&>(param);
search_params_ = search_param.p;
if (search_param.graph_mem != graph_mem_) {
// Move graph to correct memory space
graph_mem_ = search_param.graph_mem;
RAFT_LOG_INFO("moving graph to new memory space: %s", allocator_to_string(graph_mem_).c_str());
// We create a new graph and copy to it from existing graph
auto mr = get_mr(graph_mem_);
auto new_graph = make_device_mdarray<IdxT, int64_t>(
handle_, mr, make_extents<int64_t>(index_->graph().extent(0), index_->graph_degree()));
raft::copy(new_graph.data_handle(),
index_->graph().data_handle(),
index_->graph().size(),
resource::get_cuda_stream(handle_));
index_->update_graph(handle_, make_const_mdspan(new_graph.view()));
// update_graph() only stores a view in the index. We need to keep the graph object alive.
graph_ = std::move(new_graph);
}
if (search_param.dataset_mem != dataset_mem_ || need_dataset_update_) {
dataset_mem_ = search_param.dataset_mem;
// First free up existing memory
dataset_ = make_device_matrix<T, int64_t>(handle_, 0, 0);
index_->update_dataset(handle_, make_const_mdspan(dataset_.view()));
// Allocate space using the correct memory resource.
RAFT_LOG_INFO("moving dataset to new memory space: %s",
allocator_to_string(dataset_mem_).c_str());
auto mr = get_mr(dataset_mem_);
raft::neighbors::cagra::detail::copy_with_padding(handle_, dataset_, input_dataset_v_, mr);
index_->update_dataset(handle_, make_const_mdspan(dataset_.view()));
// Ideally, instead of dataset_.view(), we should pass a strided matrix view to update.
// See Issue https://github.com/rapidsai/raft/issues/1972 for details.
// auto dataset_view = make_device_strided_matrix_view<const T, int64_t>(
// dataset_.data_handle(), dataset_.extent(0), this->dim_, dataset_.extent(1));
// index_->update_dataset(handle_, dataset_view);
need_dataset_update_ = false;
}
}
template <typename T, typename IdxT>
void RaftCagra<T, IdxT>::set_search_dataset(const T* dataset, size_t nrow)
{
// It can happen that we are re-using a previous algo object which already has
// the dataset set. Check if we need update.
if (static_cast<size_t>(input_dataset_v_.extent(0)) != nrow ||
input_dataset_v_.data_handle() != dataset) {
input_dataset_v_ = make_device_matrix_view<const T, int64_t>(dataset, nrow, this->dim_);
need_dataset_update_ = true;
}
}
template <typename T, typename IdxT>
void RaftCagra<T, IdxT>::save(const std::string& file) const
{
raft::neighbors::cagra::serialize<T, IdxT>(handle_, file, *index_);
}
template <typename T, typename IdxT>
void RaftCagra<T, IdxT>::save_to_hnswlib(const std::string& file) const
{
raft::neighbors::cagra::serialize_to_hnswlib<T, IdxT>(handle_, file, *index_);
}
template <typename T, typename IdxT>
void RaftCagra<T, IdxT>::load(const std::string& file)
{
index_ = raft::neighbors::cagra::deserialize<T, IdxT>(handle_, file);
}
template <typename T, typename IdxT>
void RaftCagra<T, IdxT>::search(
const T* queries, int batch_size, int k, size_t* neighbors, float* distances, cudaStream_t) const
{
IdxT* neighbors_IdxT;
rmm::device_uvector<IdxT> neighbors_storage(0, resource::get_cuda_stream(handle_));
if constexpr (std::is_same<IdxT, size_t>::value) {
neighbors_IdxT = neighbors;
} else {
neighbors_storage.resize(batch_size * k, resource::get_cuda_stream(handle_));
neighbors_IdxT = neighbors_storage.data();
}
auto queries_view =
raft::make_device_matrix_view<const T, int64_t>(queries, batch_size, dimension_);
auto neighbors_view = raft::make_device_matrix_view<IdxT, int64_t>(neighbors_IdxT, batch_size, k);
auto distances_view = raft::make_device_matrix_view<float, int64_t>(distances, batch_size, k);
raft::neighbors::cagra::search(
handle_, search_params_, *index_, queries_view, neighbors_view, distances_view);
if (!std::is_same<IdxT, size_t>::value) {
raft::linalg::unaryOp(neighbors,
neighbors_IdxT,
batch_size * k,
raft::cast_op<size_t>(),
raft::resource::get_cuda_stream(handle_));
}
handle_.sync_stream();
}
} // namespace raft::bench::ann
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/raft/raft_wrapper.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cassert>
#include <memory>
#include <raft/distance/detail/distance.cuh>
#include <raft/distance/distance_types.hpp>
#include <raft/spatial/knn/detail/fused_l2_knn.cuh>
#include <stdexcept>
#include <string>
#include <type_traits>
#include "../common/ann_types.hpp"
namespace raft_temp {
inline raft::distance::DistanceType parse_metric_type(raft::bench::ann::Metric metric)
{
if (metric == raft::bench::ann::Metric::kInnerProduct) {
return raft::distance::DistanceType::InnerProduct;
} else if (metric == raft::bench::ann::Metric::kEuclidean) {
return raft::distance::DistanceType::L2Expanded;
} else {
throw std::runtime_error("raft supports only metric type of inner product and L2");
}
}
} // namespace raft_temp
namespace raft::bench::ann {
// brute force fused L2 KNN - RAFT
template <typename T>
class RaftGpu : public ANN<T> {
public:
using typename ANN<T>::AnnSearchParam;
RaftGpu(Metric metric, int dim);
void build(const T*, size_t, cudaStream_t) final;
void set_search_param(const AnnSearchParam& param) override;
// TODO: if the number of results is less than k, the remaining elements of 'neighbors'
// will be filled with (size_t)-1
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const final;
// to enable dataset access from GPU memory
AlgoProperty get_preference() const override
{
AlgoProperty property;
property.dataset_memory_type = MemoryType::Device;
property.query_memory_type = MemoryType::Device;
return property;
}
void set_search_dataset(const T* dataset, size_t nrow) override;
void save(const std::string& file) const override;
void load(const std::string&) override { return; };
protected:
raft::distance::DistanceType metric_type_;
int device_;
const T* dataset_;
size_t nrow_;
};
template <typename T>
RaftGpu<T>::RaftGpu(Metric metric, int dim)
: ANN<T>(metric, dim), metric_type_(raft_temp::parse_metric_type(metric))
{
static_assert(std::is_same_v<T, float>, "raft support only float type");
assert(metric_type_ == raft::distance::DistanceType::L2Expanded);
RAFT_CUDA_TRY(cudaGetDevice(&device_));
}
template <typename T>
void RaftGpu<T>::build(const T*, size_t, cudaStream_t)
{
// as this is brute force algo so no index building required
return;
}
template <typename T>
void RaftGpu<T>::set_search_param(const AnnSearchParam&)
{
// Nothing to set here as it is brute force implementation
}
template <typename T>
void RaftGpu<T>::set_search_dataset(const T* dataset, size_t nrow)
{
dataset_ = dataset;
nrow_ = nrow;
}
template <typename T>
void RaftGpu<T>::save(const std::string& file) const
{
// create a empty index file as no index to store.
std::fstream fp;
fp.open(file.c_str(), std::ios::out);
if (!fp) {
printf("Error in creating file!!!\n");
;
return;
}
fp.close();
}
template <typename T>
void RaftGpu<T>::search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream) const
{
// TODO: Integrate new `raft::brute_force::index` (from
// https://github.com/rapidsai/raft/pull/1817)
raft::spatial::knn::detail::fusedL2Knn(this->dim_,
reinterpret_cast<int64_t*>(neighbors),
distances,
dataset_,
queries,
nrow_,
static_cast<size_t>(batch_size),
k,
true,
true,
stream,
metric_type_);
}
} // namespace raft::bench::ann
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/hnswlib/hnswlib_benchmark.cpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../common/ann_types.hpp"
#include <algorithm>
#include <cmath>
#include <memory>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <utility>
#include "hnswlib_wrapper.h"
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
namespace raft::bench::ann {
template <typename T>
void parse_build_param(const nlohmann::json& conf,
typename raft::bench::ann::HnswLib<T>::BuildParam& param)
{
param.ef_construction = conf.at("efConstruction");
param.M = conf.at("M");
if (conf.contains("numThreads")) { param.num_threads = conf.at("numThreads"); }
}
template <typename T>
void parse_search_param(const nlohmann::json& conf,
typename raft::bench::ann::HnswLib<T>::SearchParam& param)
{
param.ef = conf.at("ef");
if (conf.contains("numThreads")) { param.num_threads = conf.at("numThreads"); }
}
template <typename T, template <typename> class Algo>
std::unique_ptr<raft::bench::ann::ANN<T>> make_algo(raft::bench::ann::Metric metric,
int dim,
const nlohmann::json& conf)
{
typename Algo<T>::BuildParam param;
parse_build_param<T>(conf, param);
return std::make_unique<Algo<T>>(metric, dim, param);
}
template <typename T, template <typename> class Algo>
std::unique_ptr<raft::bench::ann::ANN<T>> make_algo(raft::bench::ann::Metric metric,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
typename Algo<T>::BuildParam param;
parse_build_param<T>(conf, param);
(void)dev_list;
return std::make_unique<Algo<T>>(metric, dim, param);
}
template <typename T>
std::unique_ptr<raft::bench::ann::ANN<T>> create_algo(const std::string& algo,
const std::string& distance,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
// stop compiler warning; not all algorithms support multi-GPU so it may not be used
(void)dev_list;
raft::bench::ann::Metric metric = parse_metric(distance);
std::unique_ptr<raft::bench::ann::ANN<T>> ann;
if constexpr (std::is_same_v<T, float>) {
if (algo == "hnswlib") { ann = make_algo<T, raft::bench::ann::HnswLib>(metric, dim, conf); }
}
if constexpr (std::is_same_v<T, uint8_t>) {
if (algo == "hnswlib") { ann = make_algo<T, raft::bench::ann::HnswLib>(metric, dim, conf); }
}
if (!ann) { throw std::runtime_error("invalid algo: '" + algo + "'"); }
return ann;
}
template <typename T>
std::unique_ptr<typename raft::bench::ann::ANN<T>::AnnSearchParam> create_search_param(
const std::string& algo, const nlohmann::json& conf)
{
if (algo == "hnswlib") {
auto param = std::make_unique<typename raft::bench::ann::HnswLib<T>::SearchParam>();
parse_search_param<T>(conf, *param);
return param;
}
// else
throw std::runtime_error("invalid algo: '" + algo + "'");
}
}; // namespace raft::bench::ann
REGISTER_ALGO_INSTANCE(float);
REGISTER_ALGO_INSTANCE(std::int8_t);
REGISTER_ALGO_INSTANCE(std::uint8_t);
#ifdef ANN_BENCH_BUILD_MAIN
#include "../common/benchmark.hpp"
int main(int argc, char** argv) { return raft::bench::ann::run_main(argc, argv); }
#endif
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/hnswlib/hnswlib_wrapper.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include <atomic>
#include <cassert>
#include <cmath>
#include <condition_variable>
#include <cstdio>
#include <ctime>
#include <future>
#include <memory>
#include <mutex>
#include <numeric>
#include <stdexcept>
#include <thread>
#include <utility>
#include <vector>
#include "../common/ann_types.hpp"
#include "../common/thread_pool.hpp"
#include <hnswlib.h>
namespace raft::bench::ann {
template <typename T>
struct hnsw_dist_t {
using type = void;
};
template <>
struct hnsw_dist_t<float> {
using type = float;
};
template <>
struct hnsw_dist_t<uint8_t> {
using type = int;
};
template <typename T>
class HnswLib : public ANN<T> {
public:
// https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md
struct BuildParam {
int M;
int ef_construction;
int num_threads = omp_get_num_procs();
};
using typename ANN<T>::AnnSearchParam;
struct SearchParam : public AnnSearchParam {
int ef;
int num_threads = 1;
};
HnswLib(Metric metric, int dim, const BuildParam& param);
void build(const T* dataset, size_t nrow, cudaStream_t stream = 0) override;
void set_search_param(const AnnSearchParam& param) override;
void search(const T* query,
int batch_size,
int k,
size_t* indices,
float* distances,
cudaStream_t stream = 0) const override;
void save(const std::string& path_to_index) const override;
void load(const std::string& path_to_index) override;
AlgoProperty get_preference() const override
{
AlgoProperty property;
property.dataset_memory_type = MemoryType::Host;
property.query_memory_type = MemoryType::Host;
return property;
}
void set_base_layer_only() { appr_alg_->base_layer_only = true; }
private:
void get_search_knn_results_(const T* query, int k, size_t* indices, float* distances) const;
std::unique_ptr<hnswlib::HierarchicalNSW<typename hnsw_dist_t<T>::type>> appr_alg_;
std::unique_ptr<hnswlib::SpaceInterface<typename hnsw_dist_t<T>::type>> space_;
using ANN<T>::metric_;
using ANN<T>::dim_;
int ef_construction_;
int m_;
int num_threads_;
std::unique_ptr<FixedThreadPool> thread_pool_;
Objective metric_objective_;
};
template <typename T>
HnswLib<T>::HnswLib(Metric metric, int dim, const BuildParam& param) : ANN<T>(metric, dim)
{
assert(dim_ > 0);
static_assert(std::is_same_v<T, float> || std::is_same_v<T, uint8_t>);
if constexpr (std::is_same_v<T, uint8_t>) {
if (metric_ != Metric::kEuclidean) {
throw std::runtime_error("hnswlib<uint8_t> only supports Euclidean distance");
}
}
ef_construction_ = param.ef_construction;
m_ = param.M;
num_threads_ = param.num_threads;
}
template <typename T>
void HnswLib<T>::build(const T* dataset, size_t nrow, cudaStream_t)
{
if constexpr (std::is_same_v<T, float>) {
if (metric_ == Metric::kInnerProduct) {
space_ = std::make_unique<hnswlib::InnerProductSpace>(dim_);
} else {
space_ = std::make_unique<hnswlib::L2Space>(dim_);
}
} else if constexpr (std::is_same_v<T, uint8_t>) {
space_ = std::make_unique<hnswlib::L2SpaceI>(dim_);
}
appr_alg_ = std::make_unique<hnswlib::HierarchicalNSW<typename hnsw_dist_t<T>::type>>(
space_.get(), nrow, m_, ef_construction_);
thread_pool_ = std::make_unique<FixedThreadPool>(num_threads_);
const size_t items_per_thread = nrow / (num_threads_ + 1);
thread_pool_->submit(
[&](size_t i) {
if (i < items_per_thread && i % 10000 == 0) {
char buf[20];
std::time_t now = std::time(nullptr);
std::strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", std::localtime(&now));
printf("%s building %zu / %zu\n", buf, i, items_per_thread);
fflush(stdout);
}
appr_alg_->addPoint(dataset + i * dim_, i);
},
nrow);
}
template <typename T>
void HnswLib<T>::set_search_param(const AnnSearchParam& param_)
{
auto param = dynamic_cast<const SearchParam&>(param_);
appr_alg_->ef_ = param.ef;
metric_objective_ = param.metric_objective;
num_threads_ = param.num_threads;
// Create a pool if multiple query threads have been set and the pool hasn't been created already
bool create_pool = (metric_objective_ == Objective::LATENCY && num_threads_ > 1 && !thread_pool_);
if (create_pool) { thread_pool_ = std::make_unique<FixedThreadPool>(num_threads_); }
}
template <typename T>
void HnswLib<T>::search(
const T* query, int batch_size, int k, size_t* indices, float* distances, cudaStream_t) const
{
auto f = [&](int i) {
// hnsw can only handle a single vector at a time.
get_search_knn_results_(query + i * dim_, k, indices + i * k, distances + i * k);
};
if (metric_objective_ == Objective::LATENCY && num_threads_ > 1) {
thread_pool_->submit(f, batch_size);
} else {
for (int i = 0; i < batch_size; i++) {
f(i);
}
}
}
template <typename T>
void HnswLib<T>::save(const std::string& path_to_index) const
{
appr_alg_->saveIndex(std::string(path_to_index));
}
template <typename T>
void HnswLib<T>::load(const std::string& path_to_index)
{
if constexpr (std::is_same_v<T, float>) {
if (metric_ == Metric::kInnerProduct) {
space_ = std::make_unique<hnswlib::InnerProductSpace>(dim_);
} else {
space_ = std::make_unique<hnswlib::L2Space>(dim_);
}
} else if constexpr (std::is_same_v<T, uint8_t>) {
space_ = std::make_unique<hnswlib::L2SpaceI>(dim_);
}
appr_alg_ = std::make_unique<hnswlib::HierarchicalNSW<typename hnsw_dist_t<T>::type>>(
space_.get(), path_to_index);
}
template <typename T>
void HnswLib<T>::get_search_knn_results_(const T* query,
int k,
size_t* indices,
float* distances) const
{
auto result = appr_alg_->searchKnn(query, k);
assert(result.size() >= static_cast<size_t>(k));
for (int i = k - 1; i >= 0; --i) {
indices[i] = result.top().second;
distances[i] = result.top().first;
result.pop();
}
}
}; // namespace raft::bench::ann
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/ggnn/ggnn_benchmark.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cmath>
#include <memory>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <utility>
#include "../common/ann_types.hpp"
#include "ggnn_wrapper.cuh"
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
namespace raft::bench::ann {
template <typename T>
void parse_build_param(const nlohmann::json& conf,
typename raft::bench::ann::Ggnn<T>::BuildParam& param)
{
param.k = conf.at("k");
if (conf.contains("k_build")) { param.k_build = conf.at("k_build"); }
if (conf.contains("segment_size")) { param.segment_size = conf.at("segment_size"); }
if (conf.contains("num_layers")) { param.num_layers = conf.at("num_layers"); }
if (conf.contains("tau")) { param.tau = conf.at("tau"); }
if (conf.contains("refine_iterations")) {
param.refine_iterations = conf.at("refine_iterations");
}
}
template <typename T>
void parse_search_param(const nlohmann::json& conf,
typename raft::bench::ann::Ggnn<T>::SearchParam& param)
{
param.tau = conf.at("tau");
if (conf.contains("block_dim")) { param.block_dim = conf.at("block_dim"); }
if (conf.contains("max_iterations")) { param.max_iterations = conf.at("max_iterations"); }
if (conf.contains("cache_size")) { param.cache_size = conf.at("cache_size"); }
if (conf.contains("sorted_size")) { param.sorted_size = conf.at("sorted_size"); }
}
template <typename T, template <typename> class Algo>
std::unique_ptr<raft::bench::ann::ANN<T>> make_algo(raft::bench::ann::Metric metric,
int dim,
const nlohmann::json& conf)
{
typename Algo<T>::BuildParam param;
parse_build_param<T>(conf, param);
return std::make_unique<Algo<T>>(metric, dim, param);
}
template <typename T, template <typename> class Algo>
std::unique_ptr<raft::bench::ann::ANN<T>> make_algo(raft::bench::ann::Metric metric,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
typename Algo<T>::BuildParam param;
parse_build_param<T>(conf, param);
(void)dev_list;
return std::make_unique<Algo<T>>(metric, dim, param);
}
template <typename T>
std::unique_ptr<raft::bench::ann::ANN<T>> create_algo(const std::string& algo,
const std::string& distance,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
// stop compiler warning; not all algorithms support multi-GPU so it may not be used
(void)dev_list;
raft::bench::ann::Metric metric = parse_metric(distance);
std::unique_ptr<raft::bench::ann::ANN<T>> ann;
if constexpr (std::is_same_v<T, float>) {}
if constexpr (std::is_same_v<T, uint8_t>) {}
if (algo == "ggnn") { ann = make_algo<T, raft::bench::ann::Ggnn>(metric, dim, conf); }
if (!ann) { throw std::runtime_error("invalid algo: '" + algo + "'"); }
return ann;
}
template <typename T>
std::unique_ptr<typename raft::bench::ann::ANN<T>::AnnSearchParam> create_search_param(
const std::string& algo, const nlohmann::json& conf)
{
if (algo == "ggnn") {
auto param = std::make_unique<typename raft::bench::ann::Ggnn<T>::SearchParam>();
parse_search_param<T>(conf, *param);
return param;
}
// else
throw std::runtime_error("invalid algo: '" + algo + "'");
}
} // namespace raft::bench::ann
REGISTER_ALGO_INSTANCE(float);
REGISTER_ALGO_INSTANCE(std::int8_t);
REGISTER_ALGO_INSTANCE(std::uint8_t);
#ifdef ANN_BENCH_BUILD_MAIN
#include "../common/benchmark.hpp"
int main(int argc, char** argv) { return raft::bench::ann::run_main(argc, argv); }
#endif
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/ggnn/ggnn_wrapper.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../common/ann_types.hpp"
#include <ggnn/cuda_knn_ggnn_gpu_instance.cuh>
#include <raft/util/cudart_utils.hpp>
#include <memory>
#include <stdexcept>
namespace raft::bench::ann {
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
class GgnnImpl;
template <typename T>
class Ggnn : public ANN<T> {
public:
struct BuildParam {
int k_build{24}; // KBuild
int segment_size{32}; // S
int num_layers{4}; // L
float tau{0.5};
int refine_iterations{2};
int k; // GGNN requires to know k during building
};
using typename ANN<T>::AnnSearchParam;
struct SearchParam : public AnnSearchParam {
float tau;
int block_dim{32};
int max_iterations{400};
int cache_size{512};
int sorted_size{256};
auto needs_dataset() const -> bool override { return true; }
};
Ggnn(Metric metric, int dim, const BuildParam& param);
~Ggnn() { delete impl_; }
void build(const T* dataset, size_t nrow, cudaStream_t stream = 0) override
{
impl_->build(dataset, nrow, stream);
}
void set_search_param(const AnnSearchParam& param) override { impl_->set_search_param(param); }
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const override
{
impl_->search(queries, batch_size, k, neighbors, distances, stream);
}
void save(const std::string& file) const override { impl_->save(file); }
void load(const std::string& file) override { impl_->load(file); }
AlgoProperty get_preference() const override { return impl_->get_preference(); }
void set_search_dataset(const T* dataset, size_t nrow) override
{
impl_->set_search_dataset(dataset, nrow);
};
private:
ANN<T>* impl_;
};
template <typename T>
Ggnn<T>::Ggnn(Metric metric, int dim, const BuildParam& param) : ANN<T>(metric, dim)
{
// ggnn/src/sift1m.cu
if (metric == Metric::kEuclidean && dim == 128 && param.k_build == 24 && param.k == 10 &&
param.segment_size == 32) {
impl_ = new GgnnImpl<T, Euclidean, 128, 24, 10, 32>(metric, dim, param);
}
// ggnn/src/deep1b_multi_gpu.cu, and adapt it deep1B
else if (metric == Metric::kEuclidean && dim == 96 && param.k_build == 24 && param.k == 10 &&
param.segment_size == 32) {
impl_ = new GgnnImpl<T, Euclidean, 96, 24, 10, 32>(metric, dim, param);
} else if (metric == Metric::kInnerProduct && dim == 96 && param.k_build == 24 && param.k == 10 &&
param.segment_size == 32) {
impl_ = new GgnnImpl<T, Cosine, 96, 24, 10, 32>(metric, dim, param);
} else if (metric == Metric::kInnerProduct && dim == 96 && param.k_build == 96 && param.k == 10 &&
param.segment_size == 64) {
impl_ = new GgnnImpl<T, Cosine, 96, 96, 10, 64>(metric, dim, param);
}
// ggnn/src/glove200.cu, adapt it to glove100
else if (metric == Metric::kInnerProduct && dim == 100 && param.k_build == 96 && param.k == 10 &&
param.segment_size == 64) {
impl_ = new GgnnImpl<T, Cosine, 100, 96, 10, 64>(metric, dim, param);
} else {
throw std::runtime_error(
"ggnn: not supported combination of metric, dim and build param; "
"see Ggnn's constructor in ggnn_wrapper.cuh for available combinations");
}
}
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
class GgnnImpl : public ANN<T> {
public:
using typename ANN<T>::AnnSearchParam;
GgnnImpl(Metric metric, int dim, const typename Ggnn<T>::BuildParam& param);
void build(const T* dataset, size_t nrow, cudaStream_t stream = 0) override;
void set_search_param(const AnnSearchParam& param) override;
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const override;
void save(const std::string& file) const override;
void load(const std::string& file) override;
AlgoProperty get_preference() const override
{
AlgoProperty property;
property.dataset_memory_type = MemoryType::Device;
property.query_memory_type = MemoryType::Device;
return property;
}
void set_search_dataset(const T* dataset, size_t nrow) override;
private:
using ANN<T>::metric_;
using ANN<T>::dim_;
using GGNNGPUInstance = GGNNGPUInstance<measure,
int64_t /* KeyT */,
float /* ValueT */,
size_t /* GAddrT */,
T /* BaseT */,
size_t /* BAddrT */,
D,
KBuild,
KBuild / 2 /* KF */,
KQuery,
S>;
std::unique_ptr<GGNNGPUInstance> ggnn_;
typename Ggnn<T>::BuildParam build_param_;
typename Ggnn<T>::SearchParam search_param_;
};
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
GgnnImpl<T, measure, D, KBuild, KQuery, S>::GgnnImpl(Metric metric,
int dim,
const typename Ggnn<T>::BuildParam& param)
: ANN<T>(metric, dim), build_param_(param)
{
if (metric_ == Metric::kInnerProduct) {
if (measure != Cosine) { throw std::runtime_error("mis-matched metric"); }
} else if (metric_ == Metric::kEuclidean) {
if (measure != Euclidean) { throw std::runtime_error("mis-matched metric"); }
} else {
throw std::runtime_error(
"ggnn supports only metric type of InnerProduct, Cosine and Euclidean");
}
if (dim != D) { throw std::runtime_error("mis-matched dim"); }
}
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
void GgnnImpl<T, measure, D, KBuild, KQuery, S>::build(const T* dataset,
size_t nrow,
cudaStream_t stream)
{
int device;
RAFT_CUDA_TRY(cudaGetDevice(&device));
ggnn_ = std::make_unique<GGNNGPUInstance>(
device, nrow, build_param_.num_layers, true, build_param_.tau);
ggnn_->set_base_data(dataset);
ggnn_->set_stream(stream);
ggnn_->build(0);
for (int i = 0; i < build_param_.refine_iterations; ++i) {
ggnn_->refine();
}
}
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
void GgnnImpl<T, measure, D, KBuild, KQuery, S>::set_search_dataset(const T* dataset, size_t nrow)
{
ggnn_->set_base_data(dataset);
}
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
void GgnnImpl<T, measure, D, KBuild, KQuery, S>::set_search_param(const AnnSearchParam& param)
{
search_param_ = dynamic_cast<const typename Ggnn<T>::SearchParam&>(param);
}
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
void GgnnImpl<T, measure, D, KBuild, KQuery, S>::search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream) const
{
static_assert(sizeof(size_t) == sizeof(int64_t), "sizes of size_t and GGNN's KeyT are different");
if (k != KQuery) {
throw std::runtime_error(
"k = " + std::to_string(k) +
", but this GGNN instance only supports k = " + std::to_string(KQuery));
}
ggnn_->set_stream(stream);
RAFT_CUDA_TRY(cudaMemcpyToSymbol(c_tau_query, &search_param_.tau, sizeof(float)));
const int block_dim = search_param_.block_dim;
const int max_iterations = search_param_.max_iterations;
const int cache_size = search_param_.cache_size;
const int sorted_size = search_param_.sorted_size;
// default value
if (block_dim == 32 && max_iterations == 400 && cache_size == 512 && sorted_size == 256) {
ggnn_->template queryLayer<32, 400, 512, 256, false>(
queries, batch_size, reinterpret_cast<int64_t*>(neighbors), distances);
}
// ggnn/src/sift1m.cu
else if (block_dim == 32 && max_iterations == 200 && cache_size == 256 && sorted_size == 64) {
ggnn_->template queryLayer<32, 200, 256, 64, false>(
queries, batch_size, reinterpret_cast<int64_t*>(neighbors), distances);
}
// ggnn/src/sift1m.cu
else if (block_dim == 32 && max_iterations == 400 && cache_size == 448 && sorted_size == 64) {
ggnn_->template queryLayer<32, 400, 448, 64, false>(
queries, batch_size, reinterpret_cast<int64_t*>(neighbors), distances);
}
// ggnn/src/glove200.cu
else if (block_dim == 128 && max_iterations == 2000 && cache_size == 2048 && sorted_size == 32) {
ggnn_->template queryLayer<128, 2000, 2048, 32, false>(
queries, batch_size, reinterpret_cast<int64_t*>(neighbors), distances);
}
// for glove100
else if (block_dim == 64 && max_iterations == 400 && cache_size == 512 && sorted_size == 32) {
ggnn_->template queryLayer<64, 400, 512, 32, false>(
queries, batch_size, reinterpret_cast<int64_t*>(neighbors), distances);
} else if (block_dim == 128 && max_iterations == 2000 && cache_size == 1024 &&
sorted_size == 32) {
ggnn_->template queryLayer<128, 2000, 1024, 32, false>(
queries, batch_size, reinterpret_cast<int64_t*>(neighbors), distances);
} else {
throw std::runtime_error("ggnn: not supported search param");
}
}
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
void GgnnImpl<T, measure, D, KBuild, KQuery, S>::save(const std::string& file) const
{
auto& ggnn_host = ggnn_->ggnn_cpu_buffers.at(0);
auto& ggnn_device = ggnn_->ggnn_shards.at(0);
ggnn_->set_stream(0);
ggnn_host.downloadAsync(ggnn_device);
RAFT_CUDA_TRY(cudaStreamSynchronize(ggnn_device.stream));
ggnn_host.store(file);
}
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
void GgnnImpl<T, measure, D, KBuild, KQuery, S>::load(const std::string& file)
{
auto& ggnn_host = ggnn_->ggnn_cpu_buffers.at(0);
auto& ggnn_device = ggnn_->ggnn_shards.at(0);
ggnn_->set_stream(0);
ggnn_host.load(file);
ggnn_host.uploadAsync(ggnn_device);
RAFT_CUDA_TRY(cudaStreamSynchronize(ggnn_device.stream));
}
} // namespace raft::bench::ann
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/faiss/faiss_gpu_wrapper.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FAISS_WRAPPER_H_
#define FAISS_WRAPPER_H_
#include "../common/ann_types.hpp"
#include <raft/core/logger.hpp>
#include <raft/util/cudart_utils.hpp>
#include <faiss/IndexFlat.h>
#include <faiss/IndexIVFFlat.h>
#include <faiss/IndexIVFPQ.h>
#include <faiss/IndexRefine.h>
#include <faiss/IndexScalarQuantizer.h>
#include <faiss/gpu/GpuIndexFlat.h>
#include <faiss/gpu/GpuIndexIVFFlat.h>
#include <faiss/gpu/GpuIndexIVFPQ.h>
#include <faiss/gpu/GpuIndexIVFScalarQuantizer.h>
#include <faiss/gpu/StandardGpuResources.h>
#include <faiss/impl/ScalarQuantizer.h>
#include <faiss/index_io.h>
#include <omp.h>
#include <raft/core/device_resources.hpp>
#include <raft/core/resource/stream_view.hpp>
#include <cassert>
#include <memory>
#include <stdexcept>
#include <string>
#include <type_traits>
namespace {
faiss::MetricType parse_metric_type(raft::bench::ann::Metric metric)
{
if (metric == raft::bench::ann::Metric::kInnerProduct) {
return faiss::METRIC_INNER_PRODUCT;
} else if (metric == raft::bench::ann::Metric::kEuclidean) {
return faiss::METRIC_L2;
} else {
throw std::runtime_error("faiss supports only metric type of inner product and L2");
}
}
// note BLAS library can still use multi-threading, and
// setting environment variable like OPENBLAS_NUM_THREADS can control it
class OmpSingleThreadScope {
public:
OmpSingleThreadScope()
{
max_threads_ = omp_get_max_threads();
omp_set_num_threads(1);
}
~OmpSingleThreadScope()
{
// the best we can do
omp_set_num_threads(max_threads_);
}
private:
int max_threads_;
};
} // namespace
namespace raft::bench::ann {
template <typename T>
class FaissGpu : public ANN<T> {
public:
using typename ANN<T>::AnnSearchParam;
struct SearchParam : public AnnSearchParam {
int nprobe;
float refine_ratio = 1.0;
auto needs_dataset() const -> bool override { return refine_ratio > 1.0f; }
};
struct BuildParam {
int nlist = 1;
int ratio = 2;
};
FaissGpu(Metric metric, int dim, const BuildParam& param)
: ANN<T>(metric, dim),
metric_type_(parse_metric_type(metric)),
nlist_{param.nlist},
training_sample_fraction_{1.0 / double(param.ratio)}
{
static_assert(std::is_same_v<T, float>, "faiss support only float type");
RAFT_CUDA_TRY(cudaGetDevice(&device_));
RAFT_CUDA_TRY(cudaEventCreate(&sync_, cudaEventDisableTiming));
faiss_default_stream_ = gpu_resource_.getDefaultStream(device_);
raft::resource::set_cuda_stream(handle_, faiss_default_stream_);
}
virtual ~FaissGpu() noexcept { RAFT_CUDA_TRY_NO_THROW(cudaEventDestroy(sync_)); }
void build(const T* dataset, size_t nrow, cudaStream_t stream = 0) final;
virtual void set_search_param(const FaissGpu<T>::AnnSearchParam& param) {}
void set_search_dataset(const T* dataset, size_t nrow) override { dataset_ = dataset; }
// TODO: if the number of results is less than k, the remaining elements of 'neighbors'
// will be filled with (size_t)-1
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const final;
AlgoProperty get_preference() const override
{
AlgoProperty property;
// to enable building big dataset which is larger than GPU memory
property.dataset_memory_type = MemoryType::Host;
property.query_memory_type = MemoryType::Host;
return property;
}
protected:
template <typename GpuIndex, typename CpuIndex>
void save_(const std::string& file) const;
template <typename GpuIndex, typename CpuIndex>
void load_(const std::string& file);
void stream_wait(cudaStream_t stream) const
{
RAFT_CUDA_TRY(cudaEventRecord(sync_, faiss_default_stream_));
RAFT_CUDA_TRY(cudaStreamWaitEvent(stream, sync_));
}
mutable faiss::gpu::StandardGpuResources gpu_resource_;
std::unique_ptr<faiss::gpu::GpuIndex> index_;
std::unique_ptr<faiss::IndexRefineFlat> index_refine_{nullptr};
faiss::MetricType metric_type_;
int nlist_;
int device_;
cudaEvent_t sync_{nullptr};
cudaStream_t faiss_default_stream_{nullptr};
double training_sample_fraction_;
std::unique_ptr<faiss::SearchParameters> search_params_;
const T* dataset_;
raft::device_resources handle_;
float refine_ratio_ = 1.0;
};
template <typename T>
void FaissGpu<T>::build(const T* dataset, size_t nrow, cudaStream_t stream)
{
OmpSingleThreadScope omp_single_thread;
auto index_ivf = dynamic_cast<faiss::gpu::GpuIndexIVF*>(index_.get());
if (index_ivf != nullptr) {
// set the min/max training size for clustering to use the whole provided training set.
double trainset_size = training_sample_fraction_ * static_cast<double>(nrow);
double points_per_centroid = trainset_size / static_cast<double>(nlist_);
int max_ppc = std::ceil(points_per_centroid);
int min_ppc = std::floor(points_per_centroid);
if (min_ppc < index_ivf->cp.min_points_per_centroid) {
RAFT_LOG_WARN(
"The suggested training set size %zu (data size %zu, training sample ratio %f) yields %d "
"points per cluster (n_lists = %d). This is smaller than the FAISS default "
"min_points_per_centroid = %d.",
static_cast<size_t>(trainset_size),
nrow,
training_sample_fraction_,
min_ppc,
nlist_,
index_ivf->cp.min_points_per_centroid);
}
index_ivf->cp.max_points_per_centroid = max_ppc;
index_ivf->cp.min_points_per_centroid = min_ppc;
}
index_->train(nrow, dataset); // faiss::gpu::GpuIndexFlat::train() will do nothing
assert(index_->is_trained);
index_->add(nrow, dataset);
stream_wait(stream);
}
template <typename T>
void FaissGpu<T>::search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream) const
{
static_assert(sizeof(size_t) == sizeof(faiss::idx_t),
"sizes of size_t and faiss::idx_t are different");
if (this->refine_ratio_ > 1.0) {
// TODO: FAISS changed their search APIs to accept the search parameters as a struct object
// but their refine API doesn't allow the struct to be passed in. Once this is fixed, we
// need to re-enable refinement below
// index_refine_->search(batch_size, queries, k, distances,
// reinterpret_cast<faiss::idx_t*>(neighbors), this->search_params_.get()); Related FAISS issue:
// https://github.com/facebookresearch/faiss/issues/3118
throw std::runtime_error(
"FAISS doesn't support refinement in their new APIs so this feature is disabled in the "
"benchmarks for the time being.");
} else {
index_->search(batch_size,
queries,
k,
distances,
reinterpret_cast<faiss::idx_t*>(neighbors),
this->search_params_.get());
}
stream_wait(stream);
}
template <typename T>
template <typename GpuIndex, typename CpuIndex>
void FaissGpu<T>::save_(const std::string& file) const
{
OmpSingleThreadScope omp_single_thread;
auto cpu_index = std::make_unique<CpuIndex>();
dynamic_cast<GpuIndex*>(index_.get())->copyTo(cpu_index.get());
faiss::write_index(cpu_index.get(), file.c_str());
}
template <typename T>
template <typename GpuIndex, typename CpuIndex>
void FaissGpu<T>::load_(const std::string& file)
{
OmpSingleThreadScope omp_single_thread;
std::unique_ptr<CpuIndex> cpu_index(dynamic_cast<CpuIndex*>(faiss::read_index(file.c_str())));
assert(cpu_index);
try {
dynamic_cast<GpuIndex*>(index_.get())->copyFrom(cpu_index.get());
} catch (const std::exception& e) {
std::cout << "Error loading index file: " << std::string(e.what()) << std::endl;
}
}
template <typename T>
class FaissGpuIVFFlat : public FaissGpu<T> {
public:
using typename FaissGpu<T>::BuildParam;
FaissGpuIVFFlat(Metric metric, int dim, const BuildParam& param) : FaissGpu<T>(metric, dim, param)
{
faiss::gpu::GpuIndexIVFFlatConfig config;
config.device = this->device_;
this->index_ = std::make_unique<faiss::gpu::GpuIndexIVFFlat>(
&(this->gpu_resource_), dim, param.nlist, this->metric_type_, config);
}
void set_search_param(const typename FaissGpu<T>::AnnSearchParam& param) override
{
auto search_param = dynamic_cast<const typename FaissGpu<T>::SearchParam&>(param);
int nprobe = search_param.nprobe;
assert(nprobe <= nlist_);
faiss::IVFSearchParameters faiss_search_params;
faiss_search_params.nprobe = nprobe;
this->search_params_ = std::make_unique<faiss::IVFSearchParameters>(faiss_search_params);
this->refine_ratio_ = search_param.refine_ratio;
}
void save(const std::string& file) const override
{
this->template save_<faiss::gpu::GpuIndexIVFFlat, faiss::IndexIVFFlat>(file);
}
void load(const std::string& file) override
{
this->template load_<faiss::gpu::GpuIndexIVFFlat, faiss::IndexIVFFlat>(file);
}
};
template <typename T>
class FaissGpuIVFPQ : public FaissGpu<T> {
public:
struct BuildParam : public FaissGpu<T>::BuildParam {
int M;
bool useFloat16;
bool usePrecomputed;
};
FaissGpuIVFPQ(Metric metric, int dim, const BuildParam& param) : FaissGpu<T>(metric, dim, param)
{
faiss::gpu::GpuIndexIVFPQConfig config;
config.useFloat16LookupTables = param.useFloat16;
config.usePrecomputedTables = param.usePrecomputed;
config.device = this->device_;
this->index_ =
std::make_unique<faiss::gpu::GpuIndexIVFPQ>(&(this->gpu_resource_),
dim,
param.nlist,
param.M,
8, // FAISS only supports bitsPerCode=8
this->metric_type_,
config);
}
void set_search_param(const typename FaissGpu<T>::AnnSearchParam& param) override
{
auto search_param = dynamic_cast<const typename FaissGpu<T>::SearchParam&>(param);
int nprobe = search_param.nprobe;
assert(nprobe <= nlist_);
this->refine_ratio_ = search_param.refine_ratio;
faiss::IVFPQSearchParameters faiss_search_params;
faiss_search_params.nprobe = nprobe;
this->search_params_ = std::make_unique<faiss::IVFPQSearchParameters>(faiss_search_params);
if (search_param.refine_ratio > 1.0) {
this->index_refine_ =
std::make_unique<faiss::IndexRefineFlat>(this->index_.get(), this->dataset_);
this->index_refine_.get()->k_factor = search_param.refine_ratio;
}
}
void save(const std::string& file) const override
{
this->template save_<faiss::gpu::GpuIndexIVFPQ, faiss::IndexIVFPQ>(file);
}
void load(const std::string& file) override
{
this->template load_<faiss::gpu::GpuIndexIVFPQ, faiss::IndexIVFPQ>(file);
}
};
// TODO: Enable this in cmake
// ref: https://github.com/rapidsai/raft/issues/1876
template <typename T>
class FaissGpuIVFSQ : public FaissGpu<T> {
public:
struct BuildParam : public FaissGpu<T>::BuildParam {
std::string quantizer_type;
};
FaissGpuIVFSQ(Metric metric, int dim, const BuildParam& param) : FaissGpu<T>(metric, dim, param)
{
faiss::ScalarQuantizer::QuantizerType qtype;
if (param.quantizer_type == "fp16") {
qtype = faiss::ScalarQuantizer::QT_fp16;
} else if (param.quantizer_type == "int8") {
qtype = faiss::ScalarQuantizer::QT_8bit;
} else {
throw std::runtime_error("FaissGpuIVFSQ supports only fp16 and int8 but got " +
param.quantizer_type);
}
faiss::gpu::GpuIndexIVFScalarQuantizerConfig config;
config.device = this->device_;
this->index_ = std::make_unique<faiss::gpu::GpuIndexIVFScalarQuantizer>(
&(this->gpu_resource_), dim, param.nlist, qtype, this->metric_type_, true, config);
}
void set_search_param(const typename FaissGpu<T>::AnnSearchParam& param) override
{
auto search_param = dynamic_cast<const typename FaissGpu<T>::SearchParam&>(param);
int nprobe = search_param.nprobe;
assert(nprobe <= nlist_);
faiss::IVFSearchParameters faiss_search_params;
faiss_search_params.nprobe = nprobe;
this->search_params_ = std::make_unique<faiss::IVFSearchParameters>(faiss_search_params);
this->refine_ratio_ = search_param.refine_ratio;
if (search_param.refine_ratio > 1.0) {
this->index_refine_ =
std::make_unique<faiss::IndexRefineFlat>(this->index_.get(), this->dataset_);
this->index_refine_.get()->k_factor = search_param.refine_ratio;
}
}
void save(const std::string& file) const override
{
this->template save_<faiss::gpu::GpuIndexIVFScalarQuantizer, faiss::IndexIVFScalarQuantizer>(
file);
}
void load(const std::string& file) override
{
this->template load_<faiss::gpu::GpuIndexIVFScalarQuantizer, faiss::IndexIVFScalarQuantizer>(
file);
}
};
template <typename T>
class FaissGpuFlat : public FaissGpu<T> {
public:
FaissGpuFlat(Metric metric, int dim)
: FaissGpu<T>(metric, dim, typename FaissGpu<T>::BuildParam{})
{
faiss::gpu::GpuIndexFlatConfig config;
config.device = this->device_;
this->index_ = std::make_unique<faiss::gpu::GpuIndexFlat>(
&(this->gpu_resource_), dim, this->metric_type_, config);
}
void set_search_param(const typename FaissGpu<T>::AnnSearchParam& param) override
{
auto search_param = dynamic_cast<const typename FaissGpu<T>::SearchParam&>(param);
int nprobe = search_param.nprobe;
assert(nprobe <= nlist_);
this->search_params_ = std::make_unique<faiss::SearchParameters>();
}
void save(const std::string& file) const override
{
this->template save_<faiss::gpu::GpuIndexFlat, faiss::IndexFlat>(file);
}
void load(const std::string& file) override
{
this->template load_<faiss::gpu::GpuIndexFlat, faiss::IndexFlat>(file);
}
};
} // namespace raft::bench::ann
#endif | 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/faiss/faiss_cpu_wrapper.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../common/ann_types.hpp"
#include "../common/thread_pool.hpp"
#include <raft/core/logger.hpp>
#include <faiss/IndexFlat.h>
#include <faiss/IndexIVFFlat.h>
#include <faiss/IndexIVFPQ.h>
#include <faiss/IndexRefine.h>
#include <faiss/IndexScalarQuantizer.h>
#include <faiss/index_io.h>
#include <cassert>
#include <memory>
#include <stdexcept>
#include <string>
#include <type_traits>
namespace {
faiss::MetricType parse_metric_type(raft::bench::ann::Metric metric)
{
if (metric == raft::bench::ann::Metric::kInnerProduct) {
return faiss::METRIC_INNER_PRODUCT;
} else if (metric == raft::bench::ann::Metric::kEuclidean) {
return faiss::METRIC_L2;
} else {
throw std::runtime_error("faiss supports only metric type of inner product and L2");
}
}
} // namespace
namespace raft::bench::ann {
template <typename T>
class FaissCpu : public ANN<T> {
public:
using typename ANN<T>::AnnSearchParam;
struct SearchParam : public AnnSearchParam {
int nprobe;
float refine_ratio = 1.0;
int num_threads = omp_get_num_procs();
};
struct BuildParam {
int nlist = 1;
int ratio = 2;
};
FaissCpu(Metric metric, int dim, const BuildParam& param)
: ANN<T>(metric, dim),
metric_type_(parse_metric_type(metric)),
nlist_{param.nlist},
training_sample_fraction_{1.0 / double(param.ratio)}
{
static_assert(std::is_same_v<T, float>, "faiss support only float type");
}
virtual ~FaissCpu() noexcept {}
void build(const T* dataset, size_t nrow, cudaStream_t stream = 0) final;
void set_search_param(const AnnSearchParam& param) override;
void init_quantizer(int dim)
{
if (this->metric_type_ == faiss::MetricType::METRIC_L2) {
this->quantizer_ = std::make_unique<faiss::IndexFlatL2>(dim);
} else if (this->metric_type_ == faiss::MetricType::METRIC_INNER_PRODUCT) {
this->quantizer_ = std::make_unique<faiss::IndexFlatIP>(dim);
}
}
// TODO: if the number of results is less than k, the remaining elements of 'neighbors'
// will be filled with (size_t)-1
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const final;
AlgoProperty get_preference() const override
{
AlgoProperty property;
// to enable building big dataset which is larger than memory
property.dataset_memory_type = MemoryType::Host;
property.query_memory_type = MemoryType::Host;
return property;
}
protected:
template <typename Index>
void save_(const std::string& file) const;
template <typename Index>
void load_(const std::string& file);
std::unique_ptr<faiss::Index> index_;
std::unique_ptr<faiss::Index> quantizer_;
std::unique_ptr<faiss::IndexRefineFlat> index_refine_;
faiss::MetricType metric_type_;
int nlist_;
double training_sample_fraction_;
int num_threads_;
std::unique_ptr<FixedThreadPool> thread_pool_;
};
template <typename T>
void FaissCpu<T>::build(const T* dataset, size_t nrow, cudaStream_t stream)
{
auto index_ivf = dynamic_cast<faiss::IndexIVF*>(index_.get());
if (index_ivf != nullptr) {
// set the min/max training size for clustering to use the whole provided training set.
double trainset_size = training_sample_fraction_ * static_cast<double>(nrow);
double points_per_centroid = trainset_size / static_cast<double>(nlist_);
int max_ppc = std::ceil(points_per_centroid);
int min_ppc = std::floor(points_per_centroid);
if (min_ppc < index_ivf->cp.min_points_per_centroid) {
RAFT_LOG_WARN(
"The suggested training set size %zu (data size %zu, training sample ratio %f) yields %d "
"points per cluster (n_lists = %d). This is smaller than the FAISS default "
"min_points_per_centroid = %d.",
static_cast<size_t>(trainset_size),
nrow,
training_sample_fraction_,
min_ppc,
nlist_,
index_ivf->cp.min_points_per_centroid);
}
index_ivf->cp.max_points_per_centroid = max_ppc;
index_ivf->cp.min_points_per_centroid = min_ppc;
}
index_->train(nrow, dataset); // faiss::IndexFlat::train() will do nothing
assert(index_->is_trained);
index_->add(nrow, dataset);
index_refine_ = std::make_unique<faiss::IndexRefineFlat>(this->index_.get(), dataset);
}
template <typename T>
void FaissCpu<T>::set_search_param(const AnnSearchParam& param)
{
auto search_param = dynamic_cast<const SearchParam&>(param);
int nprobe = search_param.nprobe;
assert(nprobe <= nlist_);
dynamic_cast<faiss::IndexIVF*>(index_.get())->nprobe = nprobe;
if (search_param.refine_ratio > 1.0) {
this->index_refine_.get()->k_factor = search_param.refine_ratio;
}
if (!thread_pool_ || num_threads_ != search_param.num_threads) {
num_threads_ = search_param.num_threads;
thread_pool_ = std::make_unique<FixedThreadPool>(num_threads_);
}
}
template <typename T>
void FaissCpu<T>::search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream) const
{
static_assert(sizeof(size_t) == sizeof(faiss::idx_t),
"sizes of size_t and faiss::idx_t are different");
thread_pool_->submit(
[&](int i) {
// Use thread pool for batch size = 1. FAISS multi-threads internally for batch size > 1.
index_->search(batch_size, queries, k, distances, reinterpret_cast<faiss::idx_t*>(neighbors));
},
1);
}
template <typename T>
template <typename Index>
void FaissCpu<T>::save_(const std::string& file) const
{
faiss::write_index(index_.get(), file.c_str());
}
template <typename T>
template <typename Index>
void FaissCpu<T>::load_(const std::string& file)
{
index_ = std::unique_ptr<Index>(dynamic_cast<Index*>(faiss::read_index(file.c_str())));
}
template <typename T>
class FaissCpuIVFFlat : public FaissCpu<T> {
public:
using typename FaissCpu<T>::BuildParam;
FaissCpuIVFFlat(Metric metric, int dim, const BuildParam& param) : FaissCpu<T>(metric, dim, param)
{
this->init_quantizer(dim);
this->index_ = std::make_unique<faiss::IndexIVFFlat>(
this->quantizer_.get(), dim, param.nlist, this->metric_type_);
}
void save(const std::string& file) const override
{
this->template save_<faiss::IndexIVFFlat>(file);
}
void load(const std::string& file) override { this->template load_<faiss::IndexIVFFlat>(file); }
};
template <typename T>
class FaissCpuIVFPQ : public FaissCpu<T> {
public:
struct BuildParam : public FaissCpu<T>::BuildParam {
int M;
int bitsPerCode;
bool usePrecomputed;
};
FaissCpuIVFPQ(Metric metric, int dim, const BuildParam& param) : FaissCpu<T>(metric, dim, param)
{
this->init_quantizer(dim);
this->index_ = std::make_unique<faiss::IndexIVFPQ>(
this->quantizer_.get(), dim, param.nlist, param.M, param.bitsPerCode, this->metric_type_);
}
void save(const std::string& file) const override
{
this->template save_<faiss::IndexIVFPQ>(file);
}
void load(const std::string& file) override { this->template load_<faiss::IndexIVFPQ>(file); }
};
// TODO: Enable this in cmake
// ref: https://github.com/rapidsai/raft/issues/1876
template <typename T>
class FaissCpuIVFSQ : public FaissCpu<T> {
public:
struct BuildParam : public FaissCpu<T>::BuildParam {
std::string quantizer_type;
};
FaissCpuIVFSQ(Metric metric, int dim, const BuildParam& param) : FaissCpu<T>(metric, dim, param)
{
faiss::ScalarQuantizer::QuantizerType qtype;
if (param.quantizer_type == "fp16") {
qtype = faiss::ScalarQuantizer::QT_fp16;
} else if (param.quantizer_type == "int8") {
qtype = faiss::ScalarQuantizer::QT_8bit;
} else {
throw std::runtime_error("FaissCpuIVFSQ supports only fp16 and int8 but got " +
param.quantizer_type);
}
this->init_quantizer(dim);
this->index_ = std::make_unique<faiss::IndexIVFScalarQuantizer>(
this->quantizer_.get(), dim, param.nlist, qtype, this->metric_type_, true);
}
void save(const std::string& file) const override
{
this->template save_<faiss::IndexIVFScalarQuantizer>(file);
}
void load(const std::string& file) override
{
this->template load_<faiss::IndexIVFScalarQuantizer>(file);
}
};
template <typename T>
class FaissCpuFlat : public FaissCpu<T> {
public:
FaissCpuFlat(Metric metric, int dim)
: FaissCpu<T>(metric, dim, typename FaissCpu<T>::BuildParam{})
{
this->index_ = std::make_unique<faiss::IndexFlat>(dim, this->metric_type_);
}
// class FaissCpu is more like a IVF class, so need special treating here
void set_search_param(const typename ANN<T>::AnnSearchParam& param) override
{
auto search_param = dynamic_cast<const typename FaissCpu<T>::SearchParam&>(param);
if (!this->thread_pool_ || this->num_threads_ != search_param.num_threads) {
this->num_threads_ = search_param.num_threads;
this->thread_pool_ = std::make_unique<FixedThreadPool>(this->num_threads_);
}
};
void save(const std::string& file) const override
{
this->template save_<faiss::IndexFlat>(file);
}
void load(const std::string& file) override { this->template load_<faiss::IndexFlat>(file); }
};
} // namespace raft::bench::ann
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/faiss/faiss_gpu_benchmark.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cmath>
#include <memory>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <utility>
#include "../common/ann_types.hpp"
#undef WARP_SIZE
#include "faiss_gpu_wrapper.h"
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
namespace raft::bench::ann {
template <typename T>
void parse_base_build_param(const nlohmann::json& conf,
typename raft::bench::ann::FaissGpu<T>::BuildParam& param)
{
param.nlist = conf.at("nlist");
if (conf.contains("ratio")) { param.ratio = conf.at("ratio"); }
}
template <typename T>
void parse_build_param(const nlohmann::json& conf,
typename raft::bench::ann::FaissGpuIVFFlat<T>::BuildParam& param)
{
parse_base_build_param<T>(conf, param);
}
template <typename T>
void parse_build_param(const nlohmann::json& conf,
typename raft::bench::ann::FaissGpuIVFPQ<T>::BuildParam& param)
{
parse_base_build_param<T>(conf, param);
param.M = conf.at("M");
if (conf.contains("usePrecomputed")) {
param.usePrecomputed = conf.at("usePrecomputed");
} else {
param.usePrecomputed = false;
}
if (conf.contains("useFloat16")) {
param.useFloat16 = conf.at("useFloat16");
} else {
param.useFloat16 = false;
}
}
template <typename T>
void parse_build_param(const nlohmann::json& conf,
typename raft::bench::ann::FaissGpuIVFSQ<T>::BuildParam& param)
{
parse_base_build_param<T>(conf, param);
param.quantizer_type = conf.at("quantizer_type");
}
template <typename T>
void parse_search_param(const nlohmann::json& conf,
typename raft::bench::ann::FaissGpu<T>::SearchParam& param)
{
param.nprobe = conf.at("nprobe");
if (conf.contains("refine_ratio")) { param.refine_ratio = conf.at("refine_ratio"); }
}
template <typename T, template <typename> class Algo>
std::unique_ptr<raft::bench::ann::ANN<T>> make_algo(raft::bench::ann::Metric metric,
int dim,
const nlohmann::json& conf)
{
typename Algo<T>::BuildParam param;
parse_build_param<T>(conf, param);
return std::make_unique<Algo<T>>(metric, dim, param);
}
template <typename T, template <typename> class Algo>
std::unique_ptr<raft::bench::ann::ANN<T>> make_algo(raft::bench::ann::Metric metric,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
typename Algo<T>::BuildParam param;
parse_build_param<T>(conf, param);
(void)dev_list;
return std::make_unique<Algo<T>>(metric, dim, param);
}
template <typename T>
std::unique_ptr<raft::bench::ann::ANN<T>> create_algo(const std::string& algo,
const std::string& distance,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
// stop compiler warning; not all algorithms support multi-GPU so it may not be used
(void)dev_list;
std::unique_ptr<raft::bench::ann::ANN<T>> ann;
if constexpr (std::is_same_v<T, float>) {
raft::bench::ann::Metric metric = parse_metric(distance);
if (algo == "faiss_gpu_ivf_flat") {
ann = make_algo<T, raft::bench::ann::FaissGpuIVFFlat>(metric, dim, conf, dev_list);
} else if (algo == "faiss_gpu_ivf_pq") {
ann = make_algo<T, raft::bench::ann::FaissGpuIVFPQ>(metric, dim, conf);
} else if (algo == "faiss_gpu_ivf_sq") {
ann = make_algo<T, raft::bench::ann::FaissGpuIVFSQ>(metric, dim, conf);
} else if (algo == "faiss_gpu_flat") {
ann = std::make_unique<raft::bench::ann::FaissGpuFlat<T>>(metric, dim);
}
}
if constexpr (std::is_same_v<T, uint8_t>) {}
if (!ann) { throw std::runtime_error("invalid algo: '" + algo + "'"); }
return ann;
}
template <typename T>
std::unique_ptr<typename raft::bench::ann::ANN<T>::AnnSearchParam> create_search_param(
const std::string& algo, const nlohmann::json& conf)
{
if (algo == "faiss_gpu_ivf_flat" || algo == "faiss_gpu_ivf_pq" || algo == "faiss_gpu_ivf_sq") {
auto param = std::make_unique<typename raft::bench::ann::FaissGpu<T>::SearchParam>();
parse_search_param<T>(conf, *param);
return param;
} else if (algo == "faiss_gpu_flat") {
auto param = std::make_unique<typename raft::bench::ann::ANN<T>::AnnSearchParam>();
return param;
}
// else
throw std::runtime_error("invalid algo: '" + algo + "'");
}
} // namespace raft::bench::ann
REGISTER_ALGO_INSTANCE(float);
REGISTER_ALGO_INSTANCE(std::int8_t);
REGISTER_ALGO_INSTANCE(std::uint8_t);
#ifdef ANN_BENCH_BUILD_MAIN
#include "../common/benchmark.hpp"
int main(int argc, char** argv) { return raft::bench::ann::run_main(argc, argv); }
#endif
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/faiss/faiss_cpu_benchmark.cpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cmath>
#include <memory>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <utility>
#include "../common/ann_types.hpp"
#include "faiss_cpu_wrapper.h"
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
namespace raft::bench::ann {
template <typename T>
void parse_base_build_param(const nlohmann::json& conf,
typename raft::bench::ann::FaissCpu<T>::BuildParam& param)
{
param.nlist = conf.at("nlist");
if (conf.contains("ratio")) { param.ratio = conf.at("ratio"); }
}
template <typename T>
void parse_build_param(const nlohmann::json& conf,
typename raft::bench::ann::FaissCpuIVFFlat<T>::BuildParam& param)
{
parse_base_build_param<T>(conf, param);
}
template <typename T>
void parse_build_param(const nlohmann::json& conf,
typename raft::bench::ann::FaissCpuIVFPQ<T>::BuildParam& param)
{
parse_base_build_param<T>(conf, param);
param.M = conf.at("M");
if (conf.contains("usePrecomputed")) {
param.usePrecomputed = conf.at("usePrecomputed");
} else {
param.usePrecomputed = false;
}
if (conf.contains("bitsPerCode")) {
param.bitsPerCode = conf.at("bitsPerCode");
} else {
param.bitsPerCode = 8;
}
}
template <typename T>
void parse_build_param(const nlohmann::json& conf,
typename raft::bench::ann::FaissCpuIVFSQ<T>::BuildParam& param)
{
parse_base_build_param<T>(conf, param);
param.quantizer_type = conf.at("quantizer_type");
}
template <typename T>
void parse_search_param(const nlohmann::json& conf,
typename raft::bench::ann::FaissCpu<T>::SearchParam& param)
{
param.nprobe = conf.at("nprobe");
if (conf.contains("refine_ratio")) { param.refine_ratio = conf.at("refine_ratio"); }
if (conf.contains("numThreads")) { param.num_threads = conf.at("numThreads"); }
}
template <typename T, template <typename> class Algo>
std::unique_ptr<raft::bench::ann::ANN<T>> make_algo(raft::bench::ann::Metric metric,
int dim,
const nlohmann::json& conf)
{
typename Algo<T>::BuildParam param;
parse_build_param<T>(conf, param);
return std::make_unique<Algo<T>>(metric, dim, param);
}
template <typename T, template <typename> class Algo>
std::unique_ptr<raft::bench::ann::ANN<T>> make_algo(raft::bench::ann::Metric metric,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
typename Algo<T>::BuildParam param;
parse_build_param<T>(conf, param);
(void)dev_list;
return std::make_unique<Algo<T>>(metric, dim, param);
}
template <typename T>
std::unique_ptr<raft::bench::ann::ANN<T>> create_algo(const std::string& algo,
const std::string& distance,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
// stop compiler warning; not all algorithms support multi-GPU so it may not be used
(void)dev_list;
std::unique_ptr<raft::bench::ann::ANN<T>> ann;
if constexpr (std::is_same_v<T, float>) {
raft::bench::ann::Metric metric = parse_metric(distance);
if (algo == "faiss_cpu_ivf_flat") {
ann = make_algo<T, raft::bench::ann::FaissCpuIVFFlat>(metric, dim, conf, dev_list);
} else if (algo == "faiss_cpu_ivf_pq") {
ann = make_algo<T, raft::bench::ann::FaissCpuIVFPQ>(metric, dim, conf);
} else if (algo == "faiss_cpu_ivf_sq") {
ann = make_algo<T, raft::bench::ann::FaissCpuIVFSQ>(metric, dim, conf);
} else if (algo == "faiss_cpu_flat") {
ann = std::make_unique<raft::bench::ann::FaissCpuFlat<T>>(metric, dim);
}
}
if constexpr (std::is_same_v<T, uint8_t>) {}
if (!ann) { throw std::runtime_error("invalid algo: '" + algo + "'"); }
return ann;
}
template <typename T>
std::unique_ptr<typename raft::bench::ann::ANN<T>::AnnSearchParam> create_search_param(
const std::string& algo, const nlohmann::json& conf)
{
if (algo == "faiss_cpu_ivf_flat" || algo == "faiss_cpu_ivf_pq" || algo == "faiss_cpu_ivf_sq") {
auto param = std::make_unique<typename raft::bench::ann::FaissCpu<T>::SearchParam>();
parse_search_param<T>(conf, *param);
return param;
} else if (algo == "faiss_cpu_flat") {
auto param = std::make_unique<typename raft::bench::ann::ANN<T>::AnnSearchParam>();
return param;
}
// else
throw std::runtime_error("invalid algo: '" + algo + "'");
}
} // namespace raft::bench::ann
REGISTER_ALGO_INSTANCE(float);
REGISTER_ALGO_INSTANCE(std::int8_t);
REGISTER_ALGO_INSTANCE(std::uint8_t);
#ifdef ANN_BENCH_BUILD_MAIN
#include "../common/benchmark.hpp"
int main(int argc, char** argv) { return raft::bench::ann::run_main(argc, argv); }
#endif
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/common/cuda_huge_page_resource.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <sys/mman.h>
#include <cstddef>
namespace raft::mr {
/**
* @brief `device_memory_resource` derived class that uses mmap to allocate memory.
* This class enables memory allocation using huge pages.
* It is assumed that the allocated memory is directly accessible on device. This currently only
* works on GH systems.
*
* TODO(tfeher): consider improving or removing this helper once we made progress with
* https://github.com/rapidsai/raft/issues/1819
*/
class cuda_huge_page_resource final : public rmm::mr::device_memory_resource {
public:
cuda_huge_page_resource() = default;
~cuda_huge_page_resource() override = default;
cuda_huge_page_resource(cuda_huge_page_resource const&) = default;
cuda_huge_page_resource(cuda_huge_page_resource&&) = default;
cuda_huge_page_resource& operator=(cuda_huge_page_resource const&) = default;
cuda_huge_page_resource& operator=(cuda_huge_page_resource&&) = default;
/**
* @brief Query whether the resource supports use of non-null CUDA streams for
* allocation/deallocation. `cuda_huge_page_resource` does not support streams.
*
* @returns bool false
*/
[[nodiscard]] bool supports_streams() const noexcept override { return false; }
/**
* @brief Query whether the resource supports the get_mem_info API.
*
* @return true
*/
[[nodiscard]] bool supports_get_mem_info() const noexcept override { return true; }
private:
/**
* @brief Allocates memory of size at least `bytes` using cudaMalloc.
*
* The returned pointer has at least 256B alignment.
*
* @note Stream argument is ignored
*
* @throws `rmm::bad_alloc` if the requested allocation could not be fulfilled
*
* @param bytes The size, in bytes, of the allocation
* @return void* Pointer to the newly allocated memory
*/
void* do_allocate(std::size_t bytes, rmm::cuda_stream_view) override
{
void* _addr{nullptr};
_addr = mmap(NULL, bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (_addr == MAP_FAILED) { RAFT_FAIL("huge_page_resource::MAP FAILED"); }
if (madvise(_addr, bytes, MADV_HUGEPAGE) == -1) {
munmap(_addr, bytes);
RAFT_FAIL("huge_page_resource::madvise MADV_HUGEPAGE");
}
memset(_addr, 0, bytes);
return _addr;
}
/**
* @brief Deallocate memory pointed to by \p p.
*
* @note Stream argument is ignored.
*
* @throws Nothing.
*
* @param p Pointer to be deallocated
*/
void do_deallocate(void* ptr, std::size_t size, rmm::cuda_stream_view) override
{
if (munmap(ptr, size) == -1) { RAFT_FAIL("huge_page_resource::munmap"); }
}
/**
* @brief Compare this resource to another.
*
* Two cuda_huge_page_resources always compare equal, because they can each
* deallocate memory allocated by the other.
*
* @throws Nothing.
*
* @param other The other resource to compare to
* @return true If the two resources are equivalent
* @return false If the two resources are not equal
*/
[[nodiscard]] bool do_is_equal(device_memory_resource const& other) const noexcept override
{
return dynamic_cast<cuda_huge_page_resource const*>(&other) != nullptr;
}
/**
* @brief Get free and available memory for memory resource
*
* @throws `rmm::cuda_error` if unable to retrieve memory info.
*
* @return std::pair contaiing free_size and total_size of memory
*/
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
rmm::cuda_stream_view) const override
{
std::size_t free_size{};
std::size_t total_size{};
RMM_CUDA_TRY(cudaMemGetInfo(&free_size, &total_size));
return std::make_pair(free_size, total_size);
}
};
} // namespace raft::mr | 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/common/util.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "ann_types.hpp"
#include "cuda_stub.hpp" // cuda-related utils
#ifdef ANN_BENCH_NVTX3_HEADERS_FOUND
#include <nvtx3/nvToolsExt.h>
#endif
#include <sys/stat.h>
#include <sys/types.h>
#include <chrono>
#include <cstdio>
#include <cstring>
#include <ctime>
#include <sstream>
#include <string>
#include <vector>
#include <filesystem>
#include <functional>
namespace raft::bench::ann {
template <typename T>
struct buf {
MemoryType memory_type;
std::size_t size;
T* data;
buf(MemoryType memory_type, std::size_t size)
: memory_type(memory_type), size(size), data(nullptr)
{
switch (memory_type) {
#ifndef BUILD_CPU_ONLY
case MemoryType::Device: {
cudaMalloc(reinterpret_cast<void**>(&data), size * sizeof(T));
cudaMemset(data, 0, size * sizeof(T));
} break;
#endif
default: {
data = reinterpret_cast<T*>(malloc(size * sizeof(T)));
std::memset(data, 0, size * sizeof(T));
}
}
}
~buf() noexcept
{
if (data == nullptr) { return; }
switch (memory_type) {
#ifndef BUILD_CPU_ONLY
case MemoryType::Device: {
cudaFree(data);
} break;
#endif
default: {
free(data);
}
}
}
[[nodiscard]] auto move(MemoryType target_memory_type) -> buf<T>
{
buf<T> r{target_memory_type, size};
#ifndef BUILD_CPU_ONLY
if ((memory_type == MemoryType::Device && target_memory_type != MemoryType::Device) ||
(memory_type != MemoryType::Device && target_memory_type == MemoryType::Device)) {
cudaMemcpy(r.data, data, size * sizeof(T), cudaMemcpyDefault);
return r;
}
#endif
std::swap(data, r.data);
return r;
}
};
struct cuda_timer {
private:
cudaStream_t stream_{nullptr};
cudaEvent_t start_{nullptr};
cudaEvent_t stop_{nullptr};
double total_time_{0};
public:
struct cuda_lap {
private:
cudaStream_t stream_;
cudaEvent_t start_;
cudaEvent_t stop_;
double& total_time_;
public:
cuda_lap(cudaStream_t stream, cudaEvent_t start, cudaEvent_t stop, double& total_time)
: start_(start), stop_(stop), stream_(stream), total_time_(total_time)
{
#ifndef BUILD_CPU_ONLY
cudaStreamSynchronize(stream_);
cudaEventRecord(start_, stream_);
#endif
}
cuda_lap() = delete;
~cuda_lap() noexcept
{
#ifndef BUILD_CPU_ONLY
cudaEventRecord(stop_, stream_);
cudaEventSynchronize(stop_);
float milliseconds = 0.0f;
cudaEventElapsedTime(&milliseconds, start_, stop_);
total_time_ += milliseconds / 1000.0;
#endif
}
};
cuda_timer()
{
#ifndef BUILD_CPU_ONLY
cudaStreamCreateWithFlags(&stream_, cudaStreamNonBlocking);
cudaEventCreate(&stop_);
cudaEventCreate(&start_);
#endif
}
~cuda_timer() noexcept
{
#ifndef BUILD_CPU_ONLY
cudaEventDestroy(start_);
cudaEventDestroy(stop_);
cudaStreamDestroy(stream_);
#endif
}
[[nodiscard]] auto stream() const -> cudaStream_t { return stream_; }
[[nodiscard]] auto total_time() const -> double { return total_time_; }
[[nodiscard]] auto lap() -> cuda_timer::cuda_lap
{
return cuda_lap{stream_, start_, stop_, total_time_};
}
};
inline auto cuda_info()
{
std::vector<std::tuple<std::string, std::string>> props;
#ifndef BUILD_CPU_ONLY
int dev, driver = 0, runtime = 0;
cudaDriverGetVersion(&driver);
cudaRuntimeGetVersion(&runtime);
cudaDeviceProp device_prop;
cudaGetDevice(&dev);
cudaGetDeviceProperties(&device_prop, dev);
props.emplace_back("gpu_name", std::string(device_prop.name));
props.emplace_back("gpu_sm_count", std::to_string(device_prop.multiProcessorCount));
props.emplace_back("gpu_sm_freq", std::to_string(device_prop.clockRate * 1e3));
props.emplace_back("gpu_mem_freq", std::to_string(device_prop.memoryClockRate * 1e3));
props.emplace_back("gpu_mem_bus_width", std::to_string(device_prop.memoryBusWidth));
props.emplace_back("gpu_mem_global_size", std::to_string(device_prop.totalGlobalMem));
props.emplace_back("gpu_mem_shared_size", std::to_string(device_prop.sharedMemPerMultiprocessor));
props.emplace_back("gpu_driver_version",
std::to_string(driver / 1000) + "." + std::to_string((driver % 100) / 10));
props.emplace_back("gpu_runtime_version",
std::to_string(runtime / 1000) + "." + std::to_string((runtime % 100) / 10));
#endif
return props;
}
struct nvtx_case {
#ifdef ANN_BENCH_NVTX3_HEADERS_FOUND
private:
std::string case_name_;
std::array<char, 32> iter_name_{0};
nvtxDomainHandle_t domain_;
int64_t iteration_ = 0;
nvtxEventAttributes_t case_attrib_{0};
nvtxEventAttributes_t iter_attrib_{0};
#endif
public:
struct nvtx_lap {
#ifdef ANN_BENCH_NVTX3_HEADERS_FOUND
private:
nvtxDomainHandle_t domain_;
public:
nvtx_lap(nvtxDomainHandle_t domain, nvtxEventAttributes_t* attr) : domain_(domain)
{
nvtxDomainRangePushEx(domain_, attr);
}
nvtx_lap() = delete;
~nvtx_lap() noexcept { nvtxDomainRangePop(domain_); }
#endif
};
#ifdef ANN_BENCH_NVTX3_HEADERS_FOUND
explicit nvtx_case(std::string case_name)
: case_name_(std::move(case_name)), domain_(nvtxDomainCreateA("ANN benchmark"))
{
case_attrib_.version = NVTX_VERSION;
iter_attrib_.version = NVTX_VERSION;
case_attrib_.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
iter_attrib_.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
case_attrib_.colorType = NVTX_COLOR_ARGB;
iter_attrib_.colorType = NVTX_COLOR_ARGB;
case_attrib_.messageType = NVTX_MESSAGE_TYPE_ASCII;
iter_attrib_.messageType = NVTX_MESSAGE_TYPE_ASCII;
case_attrib_.message.ascii = case_name_.c_str();
auto c = std::hash<std::string>{}(case_name_);
case_attrib_.color = c | 0xA0A0A0;
nvtxDomainRangePushEx(domain_, &case_attrib_);
}
~nvtx_case()
{
nvtxDomainRangePop(domain_);
nvtxDomainDestroy(domain_);
}
#else
explicit nvtx_case(std::string) {}
#endif
[[nodiscard]] auto lap() -> nvtx_case::nvtx_lap
{
#ifdef ANN_BENCH_NVTX3_HEADERS_FOUND
auto i = iteration_++;
uint32_t c = (i % 5);
uint32_t r = 150 + c * 20;
uint32_t g = 200 + c * 10;
uint32_t b = 220 + c * 5;
std::snprintf(iter_name_.data(), iter_name_.size(), "Lap %zd", i);
iter_attrib_.message.ascii = iter_name_.data();
iter_attrib_.color = (r << 16) + (g << 8) + b;
return nvtx_lap{domain_, &iter_attrib_};
#else
return nvtx_lap{};
#endif
}
};
inline std::vector<std::string> split(const std::string& s, char delimiter)
{
std::vector<std::string> tokens;
std::string token;
std::istringstream iss(s);
while (getline(iss, token, delimiter)) {
if (!token.empty()) { tokens.push_back(token); }
}
return tokens;
}
inline bool file_exists(const std::string& filename)
{
struct stat statbuf;
if (stat(filename.c_str(), &statbuf) != 0) { return false; }
return S_ISREG(statbuf.st_mode);
}
inline bool dir_exists(const std::string& dir)
{
struct stat statbuf;
if (stat(dir.c_str(), &statbuf) != 0) { return false; }
return S_ISDIR(statbuf.st_mode);
}
inline bool create_dir(const std::string& dir)
{
const auto path = split(dir, '/');
std::string cwd;
if (!dir.empty() && dir[0] == '/') { cwd += '/'; }
for (const auto& p : path) {
cwd += p + "/";
if (!dir_exists(cwd)) {
int ret = mkdir(cwd.c_str(), S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
if (ret != 0) { return false; }
}
}
return true;
}
inline void make_sure_parent_dir_exists(const std::string& file_path)
{
const auto pos = file_path.rfind('/');
if (pos != std::string::npos) {
auto dir = file_path.substr(0, pos);
if (!dir_exists(dir)) { create_dir(dir); }
}
}
inline auto combine_path(const std::string& dir, const std::string& path)
{
std::filesystem::path p_dir(dir);
std::filesystem::path p_suf(path);
return (p_dir / p_suf).string();
}
template <typename... Ts>
void log_(const char* level, const Ts&... vs)
{
char buf[20];
std::time_t now = std::time(nullptr);
std::strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", std::localtime(&now));
printf("%s [%s] ", buf, level);
if constexpr (sizeof...(Ts) == 1) {
printf("%s", vs...);
} else {
printf(vs...);
}
printf("\n");
fflush(stdout);
}
template <typename... Ts>
void log_info(Ts&&... vs)
{
log_("info", std::forward<Ts>(vs)...);
}
template <typename... Ts>
void log_warn(Ts&&... vs)
{
log_("warn", std::forward<Ts>(vs)...);
}
template <typename... Ts>
void log_error(Ts&&... vs)
{
log_("error", std::forward<Ts>(vs)...);
}
} // namespace raft::bench::ann
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/common/conf.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "util.hpp"
#include <iostream>
#include <optional>
#include <string>
#include <unordered_set>
#include <vector>
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
namespace raft::bench::ann {
class Configuration {
public:
struct Index {
std::string name;
std::string algo;
nlohmann::json build_param;
std::string file;
std::vector<int> dev_list;
int batch_size;
int k;
std::vector<nlohmann::json> search_params;
};
struct DatasetConf {
std::string name;
std::string base_file;
// use only a subset of base_file,
// the range of rows is [subset_first_row, subset_first_row + subset_size)
// however, subset_size = 0 means using all rows after subset_first_row
// that is, the subset is [subset_first_row, #rows in base_file)
size_t subset_first_row{0};
size_t subset_size{0};
std::string query_file;
std::string distance;
std::optional<std::string> groundtruth_neighbors_file{std::nullopt};
// data type of input dataset, possible values ["float", "int8", "uint8"]
std::string dtype;
};
explicit inline Configuration(std::istream& conf_stream)
{
// to enable comments in json
auto conf = nlohmann::json::parse(conf_stream, nullptr, true, true);
parse_dataset_(conf.at("dataset"));
parse_index_(conf.at("index"), conf.at("search_basic_param"));
}
[[nodiscard]] inline auto get_dataset_conf() const -> DatasetConf { return dataset_conf_; }
[[nodiscard]] inline auto get_indices() const -> std::vector<Index> { return indices_; };
private:
inline void parse_dataset_(const nlohmann::json& conf)
{
dataset_conf_.name = conf.at("name");
dataset_conf_.base_file = conf.at("base_file");
dataset_conf_.query_file = conf.at("query_file");
dataset_conf_.distance = conf.at("distance");
if (conf.contains("groundtruth_neighbors_file")) {
dataset_conf_.groundtruth_neighbors_file = conf.at("groundtruth_neighbors_file");
}
if (conf.contains("subset_first_row")) {
dataset_conf_.subset_first_row = conf.at("subset_first_row");
}
if (conf.contains("subset_size")) { dataset_conf_.subset_size = conf.at("subset_size"); }
if (conf.contains("dtype")) {
dataset_conf_.dtype = conf.at("dtype");
} else {
auto filename = dataset_conf_.base_file;
if (!filename.compare(filename.size() - 4, 4, "fbin")) {
dataset_conf_.dtype = "float";
} else if (!filename.compare(filename.size() - 5, 5, "u8bin")) {
dataset_conf_.dtype = "uint8";
} else if (!filename.compare(filename.size() - 5, 5, "i8bin")) {
dataset_conf_.dtype = "int8";
} else {
log_error("Could not determine data type of the dataset %s", filename.c_str());
}
}
}
inline void parse_index_(const nlohmann::json& index_conf,
const nlohmann::json& search_basic_conf)
{
const int batch_size = search_basic_conf.at("batch_size");
const int k = search_basic_conf.at("k");
for (const auto& conf : index_conf) {
Index index;
index.name = conf.at("name");
index.algo = conf.at("algo");
index.build_param = conf.at("build_param");
index.file = conf.at("file");
index.batch_size = batch_size;
index.k = k;
if (conf.contains("multigpu")) {
for (auto it : conf.at("multigpu")) {
index.dev_list.push_back(it);
}
if (index.dev_list.empty()) { throw std::runtime_error("dev_list shouln't be empty!"); }
index.dev_list.shrink_to_fit();
index.build_param["multigpu"] = conf["multigpu"];
}
for (auto param : conf.at("search_params")) {
/* ### Special parameters for backward compatibility ###
- Local values of `k` and `n_queries` take priority.
- The legacy "batch_size" renamed to `n_queries`.
- Basic search params are used otherwise.
*/
if (!param.contains("k")) { param["k"] = k; }
if (!param.contains("n_queries")) {
if (param.contains("batch_size")) {
param["n_queries"] = param["batch_size"];
param.erase("batch_size");
} else {
param["n_queries"] = batch_size;
}
}
index.search_params.push_back(param);
}
indices_.push_back(index);
}
}
DatasetConf dataset_conf_;
std::vector<Index> indices_;
};
} // namespace raft::bench::ann
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/common/ann_types.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "cuda_stub.hpp" // cudaStream_t
#include <stdexcept>
#include <string>
#include <vector>
namespace raft::bench::ann {
enum Objective {
THROUGHPUT, // See how many vectors we can push through
LATENCY // See how fast we can push a vector through
};
enum class MemoryType {
Host,
HostMmap,
Device,
};
enum class Metric {
kInnerProduct,
kEuclidean,
};
inline auto parse_metric(const std::string& metric_str) -> Metric
{
if (metric_str == "inner_product") {
return raft::bench::ann::Metric::kInnerProduct;
} else if (metric_str == "euclidean") {
return raft::bench::ann::Metric::kEuclidean;
} else {
throw std::runtime_error("invalid metric: '" + metric_str + "'");
}
}
inline auto parse_memory_type(const std::string& memory_type) -> MemoryType
{
if (memory_type == "host") {
return MemoryType::Host;
} else if (memory_type == "mmap") {
return MemoryType::HostMmap;
} else if (memory_type == "device") {
return MemoryType::Device;
} else {
throw std::runtime_error("invalid memory type: '" + memory_type + "'");
}
}
class AlgoProperty {
public:
inline AlgoProperty() {}
inline AlgoProperty(MemoryType dataset_memory_type_, MemoryType query_memory_type_)
: dataset_memory_type(dataset_memory_type_), query_memory_type(query_memory_type_)
{
}
MemoryType dataset_memory_type;
// neighbors/distances should have same memory type as queries
MemoryType query_memory_type;
virtual ~AlgoProperty() = default;
};
class AnnBase {
public:
inline AnnBase(Metric metric, int dim) : metric_(metric), dim_(dim) {}
virtual ~AnnBase() = default;
protected:
Metric metric_;
int dim_;
};
template <typename T>
class ANN : public AnnBase {
public:
struct AnnSearchParam {
Objective metric_objective = Objective::LATENCY;
virtual ~AnnSearchParam() = default;
[[nodiscard]] virtual auto needs_dataset() const -> bool { return false; };
};
inline ANN(Metric metric, int dim) : AnnBase(metric, dim) {}
virtual void build(const T* dataset, size_t nrow, cudaStream_t stream = 0) = 0;
virtual void set_search_param(const AnnSearchParam& param) = 0;
// TODO: this assumes that an algorithm can always return k results.
// This is not always possible.
virtual void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const = 0;
virtual void save(const std::string& file) const = 0;
virtual void load(const std::string& file) = 0;
virtual AlgoProperty get_preference() const = 0;
// Some algorithms don't save the building dataset in their indices.
// So they should be given the access to that dataset during searching.
// The advantage of this way is that index has smaller size
// and many indices can share one dataset.
//
// SearchParam::needs_dataset() of such algorithm should be true,
// and set_search_dataset() should save the passed-in pointer somewhere.
// The client code should call set_search_dataset() before searching,
// and should not release dataset before searching is finished.
virtual void set_search_dataset(const T* /*dataset*/, size_t /*nrow*/){};
};
} // namespace raft::bench::ann
#define REGISTER_ALGO_INSTANCE(DataT) \
template auto raft::bench::ann::create_algo<DataT>( \
const std::string&, const std::string&, int, const nlohmann::json&, const std::vector<int>&) \
->std::unique_ptr<raft::bench::ann::ANN<DataT>>; \
template auto raft::bench::ann::create_search_param<DataT>(const std::string&, \
const nlohmann::json&) \
->std::unique_ptr<typename raft::bench::ann::ANN<DataT>::AnnSearchParam>;
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/common/thread_pool.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <atomic>
#include <future>
#include <memory>
#include <mutex>
#include <omp.h>
#include <stdexcept>
#include <thread>
#include <utility>
class FixedThreadPool {
public:
FixedThreadPool(int num_threads)
{
if (num_threads < 1) {
throw std::runtime_error("num_threads must >= 1");
} else if (num_threads == 1) {
return;
}
tasks_ = new Task_[num_threads];
threads_.reserve(num_threads);
for (int i = 0; i < num_threads; ++i) {
threads_.emplace_back([&, i] {
auto& task = tasks_[i];
while (true) {
std::unique_lock<std::mutex> lock(task.mtx);
task.cv.wait(lock,
[&] { return task.has_task || finished_.load(std::memory_order_relaxed); });
if (finished_.load(std::memory_order_relaxed)) { break; }
task.task();
task.has_task = false;
}
});
}
}
~FixedThreadPool()
{
if (threads_.empty()) { return; }
finished_.store(true, std::memory_order_relaxed);
for (unsigned i = 0; i < threads_.size(); ++i) {
auto& task = tasks_[i];
std::lock_guard<std::mutex>(task.mtx);
task.cv.notify_one();
threads_[i].join();
}
delete[] tasks_;
}
template <typename Func, typename IdxT>
void submit(Func f, IdxT len)
{
// Run functions in main thread if thread pool has no threads
if (threads_.empty()) {
for (IdxT i = 0; i < len; ++i) {
f(i);
}
return;
}
const int num_threads = threads_.size();
// one extra part for competition among threads
const IdxT items_per_thread = len / (num_threads + 1);
std::atomic<IdxT> cnt(items_per_thread * num_threads);
// Wrap function
auto wrapped_f = [&](IdxT start, IdxT end) {
for (IdxT i = start; i < end; ++i) {
f(i);
}
while (true) {
IdxT i = cnt.fetch_add(1, std::memory_order_relaxed);
if (i >= len) { break; }
f(i);
}
};
std::vector<std::future<void>> futures;
futures.reserve(num_threads);
for (int i = 0; i < num_threads; ++i) {
IdxT start = i * items_per_thread;
auto& task = tasks_[i];
{
std::lock_guard lock(task.mtx);
(void)lock; // stop nvcc warning
task.task = std::packaged_task<void()>([=] { wrapped_f(start, start + items_per_thread); });
futures.push_back(task.task.get_future());
task.has_task = true;
}
task.cv.notify_one();
}
for (auto& fut : futures) {
fut.wait();
}
return;
}
private:
struct alignas(64) Task_ {
std::mutex mtx;
std::condition_variable cv;
bool has_task = false;
std::packaged_task<void()> task;
};
Task_* tasks_;
std::vector<std::thread> threads_;
std::atomic<bool> finished_{false};
};
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/common/benchmark.cpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_stub.hpp" // must go first
#include "ann_types.hpp"
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
#include <memory>
#include <unordered_map>
#include <dlfcn.h>
#include <filesystem>
namespace raft::bench::ann {
struct lib_handle {
void* handle{nullptr};
explicit lib_handle(const std::string& name)
{
handle = dlopen(name.c_str(), RTLD_LAZY | RTLD_LOCAL);
if (handle == nullptr) {
auto error_msg = "Failed to load " + name;
auto err = dlerror();
if (err != nullptr && err[0] != '\0') { error_msg += ": " + std::string(err); }
throw std::runtime_error(error_msg);
}
}
~lib_handle() noexcept
{
if (handle != nullptr) { dlclose(handle); }
}
};
auto load_lib(const std::string& algo) -> void*
{
static std::unordered_map<std::string, lib_handle> libs{};
auto found = libs.find(algo);
if (found != libs.end()) { return found->second.handle; }
auto lib_name = "lib" + algo + "_ann_bench.so";
return libs.emplace(algo, lib_name).first->second.handle;
}
auto get_fun_name(void* addr) -> std::string
{
Dl_info dl_info;
if (dladdr(addr, &dl_info) != 0) {
if (dl_info.dli_sname != nullptr && dl_info.dli_sname[0] != '\0') {
return std::string{dl_info.dli_sname};
}
}
throw std::logic_error("Failed to find out name of the looked up function");
}
template <typename T>
auto create_algo(const std::string& algo,
const std::string& distance,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list) -> std::unique_ptr<raft::bench::ann::ANN<T>>
{
static auto fname = get_fun_name(reinterpret_cast<void*>(&create_algo<T>));
auto handle = load_lib(algo);
auto fun_addr = dlsym(handle, fname.c_str());
if (fun_addr == nullptr) {
throw std::runtime_error("Couldn't load the create_algo function (" + algo + ")");
}
auto fun = reinterpret_cast<decltype(&create_algo<T>)>(fun_addr);
return fun(algo, distance, dim, conf, dev_list);
}
template <typename T>
std::unique_ptr<typename raft::bench::ann::ANN<T>::AnnSearchParam> create_search_param(
const std::string& algo, const nlohmann::json& conf)
{
static auto fname = get_fun_name(reinterpret_cast<void*>(&create_search_param<T>));
auto handle = load_lib(algo);
auto fun_addr = dlsym(handle, fname.c_str());
if (fun_addr == nullptr) {
throw std::runtime_error("Couldn't load the create_search_param function (" + algo + ")");
}
auto fun = reinterpret_cast<decltype(&create_search_param<T>)>(fun_addr);
return fun(algo, conf);
}
}; // namespace raft::bench::ann
REGISTER_ALGO_INSTANCE(float);
REGISTER_ALGO_INSTANCE(std::int8_t);
REGISTER_ALGO_INSTANCE(std::uint8_t);
#include "benchmark.hpp"
int main(int argc, char** argv) { return raft::bench::ann::run_main(argc, argv); }
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/common/dataset.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "util.hpp"
#ifndef BUILD_CPU_ONLY
#include <cuda_fp16.h>
#else
typedef uint16_t half;
#endif
#include <errno.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <cassert>
#include <cstdint>
#include <cstdio>
#include <optional>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <vector>
namespace raft::bench::ann {
// http://big-ann-benchmarks.com/index.html:
// binary format that starts with 8 bytes of data consisting of num_points(uint32_t)
// num_dimensions(uint32) followed by num_pts x num_dimensions x sizeof(type) bytes of
// data stored one vector after another.
// Data files will have suffixes .fbin, .u8bin, and .i8bin to represent float32, uint8
// and int8 type data.
// As extensions for this benchmark, half and int data files will have suffixes .f16bin
// and .ibin, respectively.
template <typename T>
class BinFile {
public:
BinFile(const std::string& file,
const std::string& mode,
uint32_t subset_first_row = 0,
uint32_t subset_size = 0);
~BinFile()
{
if (mapped_ptr_ != nullptr) { unmap(); }
if (fp_ != nullptr) { fclose(fp_); }
}
BinFile(const BinFile&) = delete;
BinFile& operator=(const BinFile&) = delete;
void get_shape(size_t* nrows, int* ndims) const
{
assert(read_mode_);
if (!fp_) { open_file_(); }
*nrows = nrows_;
*ndims = ndims_;
}
void read(T* data) const
{
assert(read_mode_);
if (!fp_) { open_file_(); }
size_t total = static_cast<size_t>(nrows_) * ndims_;
if (fread(data, sizeof(T), total, fp_) != total) {
throw std::runtime_error("fread() BinFile " + file_ + " failed");
}
}
void write(const T* data, uint32_t nrows, uint32_t ndims)
{
assert(!read_mode_);
if (!fp_) { open_file_(); }
if (fwrite(&nrows, sizeof(uint32_t), 1, fp_) != 1) {
throw std::runtime_error("fwrite() BinFile " + file_ + " failed");
}
if (fwrite(&ndims, sizeof(uint32_t), 1, fp_) != 1) {
throw std::runtime_error("fwrite() BinFile " + file_ + " failed");
}
size_t total = static_cast<size_t>(nrows) * ndims;
if (fwrite(data, sizeof(T), total, fp_) != total) {
throw std::runtime_error("fwrite() BinFile " + file_ + " failed");
}
}
T* map() const
{
assert(read_mode_);
if (!fp_) { open_file_(); }
int fid = fileno(fp_);
mapped_ptr_ = mmap(nullptr, file_size_, PROT_READ, MAP_PRIVATE, fid, 0);
if (mapped_ptr_ == MAP_FAILED) {
mapped_ptr_ = nullptr;
throw std::runtime_error("mmap error: Value of errno " + std::to_string(errno) + ", " +
std::string(strerror(errno)));
}
return reinterpret_cast<T*>(reinterpret_cast<uint8_t*>(mapped_ptr_) + 2 * sizeof(uint32_t) +
subset_first_row_ * ndims_ * sizeof(T));
}
void unmap() const
{
if (munmap(mapped_ptr_, file_size_) == -1) {
throw std::runtime_error("munmap error: " + std::string(strerror(errno)));
}
}
private:
void check_suffix_();
void open_file_() const;
std::string file_;
bool read_mode_;
uint32_t subset_first_row_;
uint32_t subset_size_;
mutable FILE* fp_{nullptr};
mutable uint32_t nrows_;
mutable uint32_t ndims_;
mutable size_t file_size_;
mutable void* mapped_ptr_{nullptr};
};
template <typename T>
BinFile<T>::BinFile(const std::string& file,
const std::string& mode,
uint32_t subset_first_row,
uint32_t subset_size)
: file_(file),
read_mode_(mode == "r"),
subset_first_row_(subset_first_row),
subset_size_(subset_size),
fp_(nullptr)
{
check_suffix_();
if (!read_mode_) {
if (mode == "w") {
if (subset_first_row != 0) {
throw std::runtime_error("subset_first_row should be zero for write mode");
}
if (subset_size != 0) {
throw std::runtime_error("subset_size should be zero for write mode");
}
} else {
throw std::runtime_error("BinFile's mode must be either 'r' or 'w': " + file_);
}
}
}
template <typename T>
void BinFile<T>::open_file_() const
{
fp_ = fopen(file_.c_str(), read_mode_ ? "r" : "w");
if (!fp_) { throw std::runtime_error("open BinFile failed: " + file_); }
if (read_mode_) {
struct stat statbuf;
if (stat(file_.c_str(), &statbuf) != 0) { throw std::runtime_error("stat() failed: " + file_); }
file_size_ = statbuf.st_size;
uint32_t header[2];
if (fread(header, sizeof(uint32_t), 2, fp_) != 2) {
throw std::runtime_error("read header of BinFile failed: " + file_);
}
nrows_ = header[0];
ndims_ = header[1];
size_t expected_file_size =
2 * sizeof(uint32_t) + static_cast<size_t>(nrows_) * ndims_ * sizeof(T);
if (file_size_ != expected_file_size) {
throw std::runtime_error("expected file size of " + file_ + " is " +
std::to_string(expected_file_size) + ", however, actual size is " +
std::to_string(file_size_));
}
if (subset_first_row_ >= nrows_) {
throw std::runtime_error(file_ + ": subset_first_row (" + std::to_string(subset_first_row_) +
") >= nrows (" + std::to_string(nrows_) + ")");
}
if (subset_first_row_ + subset_size_ > nrows_) {
throw std::runtime_error(file_ + ": subset_first_row (" + std::to_string(subset_first_row_) +
") + subset_size (" + std::to_string(subset_size_) + ") > nrows (" +
std::to_string(nrows_) + ")");
}
if (subset_first_row_) {
static_assert(sizeof(long) == 8, "fseek() don't support 64-bit offset");
if (fseek(fp_, sizeof(T) * subset_first_row_ * ndims_, SEEK_CUR) == -1) {
throw std::runtime_error(file_ + ": fseek failed");
}
nrows_ -= subset_first_row_;
}
if (subset_size_) { nrows_ = subset_size_; }
}
}
template <typename T>
void BinFile<T>::check_suffix_()
{
auto pos = file_.rfind('.');
if (pos == std::string::npos) {
throw std::runtime_error("name of BinFile doesn't have a suffix: " + file_);
}
std::string suffix = file_.substr(pos + 1);
if constexpr (std::is_same_v<T, float>) {
if (suffix != "fbin") {
throw std::runtime_error("BinFile<float> should has .fbin suffix: " + file_);
}
} else if constexpr (std::is_same_v<T, half>) {
if (suffix != "f16bin") {
throw std::runtime_error("BinFile<half> should has .f16bin suffix: " + file_);
}
} else if constexpr (std::is_same_v<T, int>) {
if (suffix != "ibin") {
throw std::runtime_error("BinFile<int> should has .ibin suffix: " + file_);
}
} else if constexpr (std::is_same_v<T, uint8_t>) {
if (suffix != "u8bin") {
throw std::runtime_error("BinFile<uint8_t> should has .u8bin suffix: " + file_);
}
} else if constexpr (std::is_same_v<T, int8_t>) {
if (suffix != "i8bin") {
throw std::runtime_error("BinFile<int8_t> should has .i8bin suffix: " + file_);
}
} else {
throw std::runtime_error(
"T of BinFile<T> should be one of float, half, int, uint8_t, or int8_t");
}
}
template <typename T>
class Dataset {
public:
Dataset(const std::string& name) : name_(name) {}
Dataset(const std::string& name, const std::string& distance) : name_(name), distance_(distance)
{
}
Dataset(const Dataset&) = delete;
Dataset& operator=(const Dataset&) = delete;
virtual ~Dataset();
std::string name() const { return name_; }
std::string distance() const { return distance_; }
virtual int dim() const = 0;
virtual uint32_t max_k() const = 0;
virtual size_t base_set_size() const = 0;
virtual size_t query_set_size() const = 0;
// load data lazily, so don't pay the overhead of reading unneeded set
// e.g. don't load base set when searching
const T* base_set() const
{
if (!base_set_) { load_base_set_(); }
return base_set_;
}
const T* query_set() const
{
if (!query_set_) { load_query_set_(); }
return query_set_;
}
const int32_t* gt_set() const
{
if (!gt_set_) { load_gt_set_(); }
return gt_set_;
}
const T* base_set_on_gpu() const;
const T* query_set_on_gpu() const;
const T* mapped_base_set() const;
auto query_set(MemoryType memory_type) const -> const T*
{
switch (memory_type) {
case MemoryType::Device: return query_set_on_gpu();
default: return query_set();
}
}
auto base_set(MemoryType memory_type) const -> const T*
{
switch (memory_type) {
case MemoryType::Device: return base_set_on_gpu();
case MemoryType::Host: return base_set();
case MemoryType::HostMmap: return mapped_base_set();
default: return nullptr;
}
}
protected:
virtual void load_base_set_() const = 0;
virtual void load_gt_set_() const = 0;
virtual void load_query_set_() const = 0;
virtual void map_base_set_() const = 0;
std::string name_;
std::string distance_;
mutable T* base_set_ = nullptr;
mutable T* query_set_ = nullptr;
mutable T* d_base_set_ = nullptr;
mutable T* d_query_set_ = nullptr;
mutable T* mapped_base_set_ = nullptr;
mutable int32_t* gt_set_ = nullptr;
};
template <typename T>
Dataset<T>::~Dataset()
{
delete[] base_set_;
delete[] query_set_;
delete[] gt_set_;
#ifndef BUILD_CPU_ONLY
if (d_base_set_) { cudaFree(d_base_set_); }
if (d_query_set_) { cudaFree(d_query_set_); }
#endif
}
template <typename T>
const T* Dataset<T>::base_set_on_gpu() const
{
#ifndef BUILD_CPU_ONLY
if (!d_base_set_) {
base_set();
cudaMalloc((void**)&d_base_set_, base_set_size() * dim() * sizeof(T));
cudaMemcpy(d_base_set_, base_set_, base_set_size() * dim() * sizeof(T), cudaMemcpyHostToDevice);
}
#endif
return d_base_set_;
}
template <typename T>
const T* Dataset<T>::query_set_on_gpu() const
{
#ifndef BUILD_CPU_ONLY
if (!d_query_set_) {
query_set();
cudaMalloc((void**)&d_query_set_, query_set_size() * dim() * sizeof(T));
cudaMemcpy(
d_query_set_, query_set_, query_set_size() * dim() * sizeof(T), cudaMemcpyHostToDevice);
}
#endif
return d_query_set_;
}
template <typename T>
const T* Dataset<T>::mapped_base_set() const
{
if (!mapped_base_set_) { map_base_set_(); }
return mapped_base_set_;
}
template <typename T>
class BinDataset : public Dataset<T> {
public:
BinDataset(const std::string& name,
const std::string& base_file,
size_t subset_first_row,
size_t subset_size,
const std::string& query_file,
const std::string& distance,
const std::optional<std::string>& groundtruth_neighbors_file);
int dim() const override;
uint32_t max_k() const override;
size_t base_set_size() const override;
size_t query_set_size() const override;
private:
void load_base_set_() const override;
void load_query_set_() const override;
void load_gt_set_() const override;
void map_base_set_() const override;
mutable int dim_ = 0;
mutable uint32_t max_k_ = 0;
mutable size_t base_set_size_ = 0;
mutable size_t query_set_size_ = 0;
BinFile<T> base_file_;
BinFile<T> query_file_;
std::optional<BinFile<std::int32_t>> gt_file_{std::nullopt};
};
template <typename T>
BinDataset<T>::BinDataset(const std::string& name,
const std::string& base_file,
size_t subset_first_row,
size_t subset_size,
const std::string& query_file,
const std::string& distance,
const std::optional<std::string>& groundtruth_neighbors_file)
: Dataset<T>(name, distance),
base_file_(base_file, "r", subset_first_row, subset_size),
query_file_(query_file, "r")
{
if (groundtruth_neighbors_file.has_value()) {
gt_file_.emplace(groundtruth_neighbors_file.value(), "r");
}
}
template <typename T>
int BinDataset<T>::dim() const
{
if (dim_ > 0) { return dim_; }
if (base_set_size() > 0) { return dim_; }
if (query_set_size() > 0) { return dim_; }
return dim_;
}
template <typename T>
uint32_t BinDataset<T>::max_k() const
{
if (!this->gt_set_) { load_gt_set_(); }
return max_k_;
}
template <typename T>
size_t BinDataset<T>::query_set_size() const
{
if (query_set_size_ > 0) { return query_set_size_; }
int dim;
query_file_.get_shape(&query_set_size_, &dim);
if (query_set_size_ == 0) { throw std::runtime_error("Zero query set size"); }
if (dim == 0) { throw std::runtime_error("Zero query set dim"); }
if (dim_ == 0) {
dim_ = dim;
} else if (dim_ != dim) {
throw std::runtime_error("base set dim (" + std::to_string(dim_) + ") != query set dim (" +
std::to_string(dim));
}
return query_set_size_;
}
template <typename T>
size_t BinDataset<T>::base_set_size() const
{
if (base_set_size_ > 0) { return base_set_size_; }
int dim;
base_file_.get_shape(&base_set_size_, &dim);
if (base_set_size_ == 0) { throw std::runtime_error("Zero base set size"); }
if (dim == 0) { throw std::runtime_error("Zero base set dim"); }
if (dim_ == 0) {
dim_ = dim;
} else if (dim_ != dim) {
throw std::runtime_error("base set dim (" + std::to_string(dim) + ") != query set dim (" +
std::to_string(dim_));
}
return base_set_size_;
}
template <typename T>
void BinDataset<T>::load_base_set_() const
{
this->base_set_ = new T[base_set_size() * dim()];
base_file_.read(this->base_set_);
}
template <typename T>
void BinDataset<T>::load_query_set_() const
{
this->query_set_ = new T[query_set_size() * dim()];
query_file_.read(this->query_set_);
}
template <typename T>
void BinDataset<T>::load_gt_set_() const
{
if (gt_file_.has_value()) {
size_t queries;
int k;
gt_file_->get_shape(&queries, &k);
this->gt_set_ = new std::int32_t[queries * k];
gt_file_->read(this->gt_set_);
max_k_ = k;
}
}
template <typename T>
void BinDataset<T>::map_base_set_() const
{
this->mapped_base_set_ = base_file_.map();
}
} // namespace raft::bench::ann
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/common/cuda_stub.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/*
The content of this header is governed by two preprocessor definitions:
- BUILD_CPU_ONLY - whether none of the CUDA functions are used.
- ANN_BENCH_LINK_CUDART - dynamically link against this string if defined.
______________________________________________________________________________
|BUILD_CPU_ONLY | ANN_BENCH_LINK_CUDART | cudart | cuda_runtime_api.h |
| | | found | needed | included |
|---------|-----------------------|-----------|---------|--------------------|
| ON | <not defined> | false | false | NO |
| ON | "cudart.so.xx.xx" | false | false | NO |
| OFF | <nod defined> | true | true | YES |
| OFF | "cudart.so.xx.xx" | <runtime> | true | YES |
------------------------------------------------------------------------------
*/
#ifndef BUILD_CPU_ONLY
#include <cuda_runtime_api.h>
#ifdef ANN_BENCH_LINK_CUDART
#include <cstring>
#include <dlfcn.h>
#endif
#else
typedef void* cudaStream_t;
typedef void* cudaEvent_t;
#endif
namespace raft::bench::ann {
struct cuda_lib_handle {
void* handle{nullptr};
explicit cuda_lib_handle()
{
#ifdef ANN_BENCH_LINK_CUDART
constexpr int kFlags = RTLD_NOW | RTLD_GLOBAL | RTLD_DEEPBIND | RTLD_NODELETE;
// The full name of the linked cudart library 'cudart.so.MAJOR.MINOR.PATCH'
char libname[] = ANN_BENCH_LINK_CUDART; // NOLINT
handle = dlopen(ANN_BENCH_LINK_CUDART, kFlags);
if (handle != nullptr) { return; }
// try strip the PATCH
auto p = strrchr(libname, '.');
p[0] = 0;
handle = dlopen(libname, kFlags);
if (handle != nullptr) { return; }
// try set the MINOR version to 0
p = strrchr(libname, '.');
p[1] = '0';
p[2] = 0;
handle = dlopen(libname, kFlags);
if (handle != nullptr) { return; }
// try strip the MINOR
p[0] = 0;
handle = dlopen(libname, kFlags);
if (handle != nullptr) { return; }
// try strip the MAJOR
p = strrchr(libname, '.');
p[0] = 0;
handle = dlopen(libname, kFlags);
#endif
}
~cuda_lib_handle() noexcept
{
#ifdef ANN_BENCH_LINK_CUDART
if (handle != nullptr) { dlclose(handle); }
#endif
}
template <typename Symbol>
auto sym(const char* name) -> Symbol
{
#ifdef ANN_BENCH_LINK_CUDART
return reinterpret_cast<Symbol>(dlsym(handle, name));
#else
return nullptr;
#endif
}
/** Whether this is NOT a cpu-only package. */
[[nodiscard]] constexpr inline auto needed() const -> bool
{
#if defined(BUILD_CPU_ONLY)
return false;
#else
return true;
#endif
}
/** CUDA found, either at compile time or at runtime. */
[[nodiscard]] inline auto found() const -> bool
{
#if defined(BUILD_CPU_ONLY)
return false;
#elif defined(ANN_BENCH_LINK_CUDART)
return handle != nullptr;
#else
return true;
#endif
}
};
static inline cuda_lib_handle cudart{};
#ifdef ANN_BENCH_LINK_CUDART
namespace stub {
[[gnu::weak, gnu::noinline]] cudaError_t cudaMemcpy(void* dst,
const void* src,
size_t count,
enum cudaMemcpyKind kind)
{
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaMalloc(void** ptr, size_t size)
{
*ptr = nullptr;
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaMemset(void* devPtr, int value, size_t count)
{
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaFree(void* devPtr) { return cudaSuccess; }
[[gnu::weak, gnu::noinline]] cudaError_t cudaStreamCreate(cudaStream_t* pStream)
{
*pStream = 0;
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaStreamCreateWithFlags(cudaStream_t* pStream,
unsigned int flags)
{
*pStream = 0;
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaStreamDestroy(cudaStream_t pStream)
{
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaDeviceSynchronize() { return cudaSuccess; }
[[gnu::weak, gnu::noinline]] cudaError_t cudaStreamSynchronize(cudaStream_t pStream)
{
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaEventCreate(cudaEvent_t* event)
{
*event = 0;
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaEventRecord(cudaEvent_t event, cudaStream_t stream)
{
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaEventSynchronize(cudaEvent_t event)
{
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaEventElapsedTime(float* ms,
cudaEvent_t start,
cudaEvent_t end)
{
*ms = 0;
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaEventDestroy(cudaEvent_t event) { return cudaSuccess; }
[[gnu::weak, gnu::noinline]] cudaError_t cudaGetDevice(int* device)
{
*device = 0;
return cudaSuccess;
};
[[gnu::weak, gnu::noinline]] cudaError_t cudaDriverGetVersion(int* driver)
{
*driver = 0;
return cudaSuccess;
};
[[gnu::weak, gnu::noinline]] cudaError_t cudaRuntimeGetVersion(int* runtime)
{
*runtime = 0;
return cudaSuccess;
};
[[gnu::weak, gnu::noinline]] cudaError_t cudaGetDeviceProperties(struct cudaDeviceProp* prop,
int device)
{
*prop = cudaDeviceProp{};
return cudaSuccess;
}
} // namespace stub
#define RAFT_DECLARE_CUDART(fun) \
static inline decltype(&stub::fun) fun = \
cudart.found() ? cudart.sym<decltype(&stub::fun)>(#fun) : &stub::fun
RAFT_DECLARE_CUDART(cudaMemcpy);
RAFT_DECLARE_CUDART(cudaMalloc);
RAFT_DECLARE_CUDART(cudaMemset);
RAFT_DECLARE_CUDART(cudaFree);
RAFT_DECLARE_CUDART(cudaStreamCreate);
RAFT_DECLARE_CUDART(cudaStreamCreateWithFlags);
RAFT_DECLARE_CUDART(cudaStreamDestroy);
RAFT_DECLARE_CUDART(cudaDeviceSynchronize);
RAFT_DECLARE_CUDART(cudaStreamSynchronize);
RAFT_DECLARE_CUDART(cudaEventCreate);
RAFT_DECLARE_CUDART(cudaEventRecord);
RAFT_DECLARE_CUDART(cudaEventSynchronize);
RAFT_DECLARE_CUDART(cudaEventElapsedTime);
RAFT_DECLARE_CUDART(cudaEventDestroy);
RAFT_DECLARE_CUDART(cudaGetDevice);
RAFT_DECLARE_CUDART(cudaDriverGetVersion);
RAFT_DECLARE_CUDART(cudaRuntimeGetVersion);
RAFT_DECLARE_CUDART(cudaGetDeviceProperties);
#undef RAFT_DECLARE_CUDART
#endif
}; // namespace raft::bench::ann
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/common/benchmark.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "ann_types.hpp"
#include "conf.hpp"
#include "dataset.hpp"
#include "util.hpp"
#include <benchmark/benchmark.h>
#include <raft/core/logger.hpp>
#include <algorithm>
#include <atomic>
#include <chrono>
#include <cmath>
#include <condition_variable>
#include <cstdint>
#include <fstream>
#include <limits>
#include <memory>
#include <mutex>
#include <numeric>
#include <sstream>
#include <string>
#include <unistd.h>
#include <vector>
namespace raft::bench::ann {
std::mutex init_mutex;
std::condition_variable cond_var;
std::atomic_int processed_threads{0};
static inline std::unique_ptr<AnnBase> current_algo{nullptr};
static inline std::shared_ptr<AlgoProperty> current_algo_props{nullptr};
using kv_series = std::vector<std::tuple<std::string, std::vector<nlohmann::json>>>;
inline auto apply_overrides(const std::vector<nlohmann::json>& configs,
const kv_series& overrides,
std::size_t override_idx = 0) -> std::vector<nlohmann::json>
{
std::vector<nlohmann::json> results{};
if (override_idx >= overrides.size()) {
auto n = configs.size();
for (size_t i = 0; i < n; i++) {
auto c = configs[i];
c["override_suffix"] = n > 1 ? "/" + std::to_string(i) : "";
results.push_back(c);
}
return results;
}
auto rec_configs = apply_overrides(configs, overrides, override_idx + 1);
auto [key, vals] = overrides[override_idx];
auto n = vals.size();
for (size_t i = 0; i < n; i++) {
const auto& val = vals[i];
for (auto rc : rec_configs) {
if (n > 1) {
rc["override_suffix"] =
static_cast<std::string>(rc["override_suffix"]) + "/" + std::to_string(i);
}
rc[key] = val;
results.push_back(rc);
}
}
return results;
}
inline auto apply_overrides(const nlohmann::json& config,
const kv_series& overrides,
std::size_t override_idx = 0)
{
return apply_overrides(std::vector{config}, overrides, 0);
}
inline void dump_parameters(::benchmark::State& state, nlohmann::json params)
{
std::string label = "";
bool label_empty = true;
for (auto& [key, val] : params.items()) {
if (val.is_number()) {
state.counters.insert({{key, val}});
} else if (val.is_boolean()) {
state.counters.insert({{key, val ? 1.0 : 0.0}});
} else {
auto kv = key + "=" + val.dump();
if (label_empty) {
label = kv;
} else {
label += "#" + kv;
}
label_empty = false;
}
}
if (!label_empty) { state.SetLabel(label); }
}
inline auto parse_algo_property(AlgoProperty prop, const nlohmann::json& conf) -> AlgoProperty
{
if (conf.contains("dataset_memory_type")) {
prop.dataset_memory_type = parse_memory_type(conf.at("dataset_memory_type"));
}
if (conf.contains("query_memory_type")) {
prop.query_memory_type = parse_memory_type(conf.at("query_memory_type"));
}
return prop;
};
template <typename T>
void bench_build(::benchmark::State& state,
std::shared_ptr<const Dataset<T>> dataset,
Configuration::Index index,
bool force_overwrite)
{
dump_parameters(state, index.build_param);
if (file_exists(index.file)) {
if (force_overwrite) {
log_info("Overwriting file: %s", index.file.c_str());
} else {
return state.SkipWithMessage(
"Index file already exists (use --force to overwrite the index).");
}
}
std::unique_ptr<ANN<T>> algo;
try {
algo = ann::create_algo<T>(
index.algo, dataset->distance(), dataset->dim(), index.build_param, index.dev_list);
} catch (const std::exception& e) {
return state.SkipWithError("Failed to create an algo: " + std::string(e.what()));
}
const auto algo_property = parse_algo_property(algo->get_preference(), index.build_param);
const T* base_set = dataset->base_set(algo_property.dataset_memory_type);
std::size_t index_size = dataset->base_set_size();
cuda_timer gpu_timer;
{
nvtx_case nvtx{state.name()};
for (auto _ : state) {
[[maybe_unused]] auto ntx_lap = nvtx.lap();
[[maybe_unused]] auto gpu_lap = gpu_timer.lap();
try {
algo->build(base_set, index_size, gpu_timer.stream());
} catch (const std::exception& e) {
state.SkipWithError(std::string(e.what()));
}
}
}
state.counters.insert(
{{"GPU", gpu_timer.total_time() / state.iterations()}, {"index_size", index_size}});
if (state.skipped()) { return; }
make_sure_parent_dir_exists(index.file);
algo->save(index.file);
}
template <typename T>
void bench_search(::benchmark::State& state,
Configuration::Index index,
std::size_t search_param_ix,
std::shared_ptr<const Dataset<T>> dataset,
Objective metric_objective)
{
std::size_t queries_processed = 0;
const auto& sp_json = index.search_params[search_param_ix];
if (state.thread_index() == 0) { dump_parameters(state, sp_json); }
// NB: `k` and `n_queries` are guaranteed to be populated in conf.cpp
const std::uint32_t k = sp_json["k"];
// Amount of data processes in one go
const std::size_t n_queries = sp_json["n_queries"];
// Round down the query data to a multiple of the batch size to loop over full batches of data
const std::size_t query_set_size = (dataset->query_set_size() / n_queries) * n_queries;
if (dataset->query_set_size() < n_queries) {
std::stringstream msg;
msg << "Not enough queries in benchmark set. Expected " << n_queries << ", actual "
<< dataset->query_set_size();
return state.SkipWithError(msg.str());
}
// Each thread start from a different offset, so that the queries that they process do not
// overlap.
std::ptrdiff_t batch_offset = (state.thread_index() * n_queries) % query_set_size;
std::ptrdiff_t queries_stride = state.threads() * n_queries;
// Output is saved into a contiguous buffer (separate buffers for each thread).
std::ptrdiff_t out_offset = 0;
const T* query_set = nullptr;
if (!file_exists(index.file)) {
state.SkipWithError("Index file is missing. Run the benchmark in the build mode first.");
return;
}
/**
* Make sure the first thread loads the algo and dataset
*/
if (state.thread_index() == 0) {
std::unique_lock lk(init_mutex);
cond_var.wait(lk, [] { return processed_threads.load(std::memory_order_acquire) == 0; });
// algo is static to cache it between close search runs to save time on index loading
static std::string index_file = "";
if (index.file != index_file) {
current_algo.reset();
index_file = index.file;
}
std::unique_ptr<typename ANN<T>::AnnSearchParam> search_param;
ANN<T>* algo;
try {
if (!current_algo || (algo = dynamic_cast<ANN<T>*>(current_algo.get())) == nullptr) {
auto ualgo = ann::create_algo<T>(
index.algo, dataset->distance(), dataset->dim(), index.build_param, index.dev_list);
algo = ualgo.get();
algo->load(index_file);
current_algo = std::move(ualgo);
}
search_param = ann::create_search_param<T>(index.algo, sp_json);
search_param->metric_objective = metric_objective;
} catch (const std::exception& e) {
state.SkipWithError("Failed to create an algo: " + std::string(e.what()));
return;
}
auto algo_property = parse_algo_property(algo->get_preference(), sp_json);
current_algo_props = std::make_shared<AlgoProperty>(algo_property.dataset_memory_type,
algo_property.query_memory_type);
if (search_param->needs_dataset()) {
try {
algo->set_search_dataset(dataset->base_set(current_algo_props->dataset_memory_type),
dataset->base_set_size());
} catch (const std::exception& ex) {
state.SkipWithError("The algorithm '" + index.name +
"' requires the base set, but it's not available. " +
"Exception: " + std::string(ex.what()));
return;
}
}
try {
algo->set_search_param(*search_param);
} catch (const std::exception& ex) {
state.SkipWithError("An error occurred setting search parameters: " + std::string(ex.what()));
return;
}
query_set = dataset->query_set(current_algo_props->query_memory_type);
processed_threads.store(state.threads(), std::memory_order_acq_rel);
cond_var.notify_all();
} else {
std::unique_lock lk(init_mutex);
// All other threads will wait for the first thread to initialize the algo.
cond_var.wait(lk, [&state] {
return processed_threads.load(std::memory_order_acquire) == state.threads();
});
// gbench ensures that all threads are synchronized at the start of the benchmark loop.
// We are accessing shared variables (like current_algo, current_algo_probs) before the
// benchmark loop, therefore the synchronization here is necessary.
}
const auto algo_property = *current_algo_props;
query_set = dataset->query_set(algo_property.query_memory_type);
/**
* Each thread will manage its own outputs
*/
std::shared_ptr<buf<float>> distances =
std::make_shared<buf<float>>(algo_property.query_memory_type, k * query_set_size);
std::shared_ptr<buf<std::size_t>> neighbors =
std::make_shared<buf<std::size_t>>(algo_property.query_memory_type, k * query_set_size);
cuda_timer gpu_timer;
auto start = std::chrono::high_resolution_clock::now();
{
nvtx_case nvtx{state.name()};
ANN<T>* algo = dynamic_cast<ANN<T>*>(current_algo.get());
for (auto _ : state) {
[[maybe_unused]] auto ntx_lap = nvtx.lap();
[[maybe_unused]] auto gpu_lap = gpu_timer.lap();
// run the search
try {
algo->search(query_set + batch_offset * dataset->dim(),
n_queries,
k,
neighbors->data + out_offset * k,
distances->data + out_offset * k,
gpu_timer.stream());
} catch (const std::exception& e) {
state.SkipWithError(std::string(e.what()));
}
// advance to the next batch
batch_offset = (batch_offset + queries_stride) % query_set_size;
out_offset = (out_offset + n_queries) % query_set_size;
queries_processed += n_queries;
}
}
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::duration<double>>(end - start).count();
if (state.thread_index() == 0) { state.counters.insert({{"end_to_end", duration}}); }
state.counters.insert(
{"Latency", {duration / double(state.iterations()), benchmark::Counter::kAvgThreads}});
state.SetItemsProcessed(queries_processed);
if (cudart.found()) {
double gpu_time_per_iteration = gpu_timer.total_time() / (double)state.iterations();
state.counters.insert({"GPU", {gpu_time_per_iteration, benchmark::Counter::kAvgThreads}});
}
// This will be the total number of queries across all threads
state.counters.insert({{"total_queries", queries_processed}});
if (state.skipped()) { return; }
// assume thread has finished processing successfully at this point
// last thread to finish processing notifies all
if (processed_threads-- == 0) { cond_var.notify_all(); }
// Each thread calculates recall on their partition of queries.
// evaluate recall
if (dataset->max_k() >= k) {
const std::int32_t* gt = dataset->gt_set();
const std::uint32_t max_k = dataset->max_k();
buf<std::size_t> neighbors_host = neighbors->move(MemoryType::Host);
std::size_t rows = std::min(queries_processed, query_set_size);
std::size_t match_count = 0;
std::size_t total_count = rows * static_cast<size_t>(k);
// We go through the groundtruth with same stride as the benchmark loop.
size_t out_offset = 0;
size_t batch_offset = (state.thread_index() * n_queries) % query_set_size;
while (out_offset < rows) {
for (std::size_t i = 0; i < n_queries; i++) {
size_t i_orig_idx = batch_offset + i;
size_t i_out_idx = out_offset + i;
if (i_out_idx < rows) {
for (std::uint32_t j = 0; j < k; j++) {
auto act_idx = std::int32_t(neighbors_host.data[i_out_idx * k + j]);
for (std::uint32_t l = 0; l < k; l++) {
auto exp_idx = gt[i_orig_idx * max_k + l];
if (act_idx == exp_idx) {
match_count++;
break;
}
}
}
}
}
out_offset += n_queries;
batch_offset = (batch_offset + queries_stride) % query_set_size;
}
double actual_recall = static_cast<double>(match_count) / static_cast<double>(total_count);
state.counters.insert({"Recall", {actual_recall, benchmark::Counter::kAvgThreads}});
}
}
inline void printf_usage()
{
::benchmark::PrintDefaultHelp();
fprintf(stdout,
" [--build|--search] \n"
" [--force]\n"
" [--data_prefix=<prefix>]\n"
" [--index_prefix=<prefix>]\n"
" [--override_kv=<key:value1:value2:...:valueN>]\n"
" [--mode=<latency|throughput>\n"
" [--threads=min[:max]]\n"
" <conf>.json\n"
"\n"
"Note the non-standard benchmark parameters:\n"
" --build: build mode, will build index\n"
" --search: search mode, will search using the built index\n"
" one and only one of --build and --search should be specified\n"
" --force: force overwriting existing index files\n"
" --data_prefix=<prefix>:"
" prepend <prefix> to dataset file paths specified in the <conf>.json (default = "
"'data/').\n"
" --index_prefix=<prefix>:"
" prepend <prefix> to index file paths specified in the <conf>.json (default = "
"'index/').\n"
" --override_kv=<key:value1:value2:...:valueN>:"
" override a build/search key one or more times multiplying the number of configurations;"
" you can use this parameter multiple times to get the Cartesian product of benchmark"
" configs.\n"
" --mode=<latency|throughput>"
" run the benchmarks in latency (accumulate times spent in each batch) or "
" throughput (pipeline batches and measure end-to-end) mode\n"
" --threads=min[:max] specify the number threads to use for throughput benchmark."
" Power of 2 values between 'min' and 'max' will be used. If only 'min' is specified,"
" then a single test is run with 'min' threads. By default min=1, max=<num hyper"
" threads>.\n");
}
template <typename T>
void register_build(std::shared_ptr<const Dataset<T>> dataset,
std::vector<Configuration::Index> indices,
bool force_overwrite)
{
for (auto index : indices) {
auto suf = static_cast<std::string>(index.build_param["override_suffix"]);
auto file_suf = suf;
index.build_param.erase("override_suffix");
std::replace(file_suf.begin(), file_suf.end(), '/', '-');
index.file += file_suf;
auto* b = ::benchmark::RegisterBenchmark(
index.name + suf, bench_build<T>, dataset, index, force_overwrite);
b->Unit(benchmark::kSecond);
b->MeasureProcessCPUTime();
b->UseRealTime();
}
}
template <typename T>
void register_search(std::shared_ptr<const Dataset<T>> dataset,
std::vector<Configuration::Index> indices,
Objective metric_objective,
const std::vector<int>& threads)
{
for (auto index : indices) {
for (std::size_t i = 0; i < index.search_params.size(); i++) {
auto suf = static_cast<std::string>(index.search_params[i]["override_suffix"]);
index.search_params[i].erase("override_suffix");
auto* b = ::benchmark::RegisterBenchmark(
index.name + suf, bench_search<T>, index, i, dataset, metric_objective)
->Unit(benchmark::kMillisecond)
/**
* The following are important for getting accuracy QPS measurements on both CPU
* and GPU These make sure that
* - `end_to_end` ~ (`Time` * `Iterations`)
* - `items_per_second` ~ (`total_queries` / `end_to_end`)
* - Throughput = `items_per_second`
*/
->MeasureProcessCPUTime()
->UseRealTime();
if (metric_objective == Objective::THROUGHPUT) { b->ThreadRange(threads[0], threads[1]); }
}
}
}
template <typename T>
void dispatch_benchmark(const Configuration& conf,
bool force_overwrite,
bool build_mode,
bool search_mode,
std::string data_prefix,
std::string index_prefix,
kv_series override_kv,
Objective metric_objective,
const std::vector<int>& threads)
{
if (cudart.found()) {
for (auto [key, value] : cuda_info()) {
::benchmark::AddCustomContext(key, value);
}
}
const auto dataset_conf = conf.get_dataset_conf();
auto base_file = combine_path(data_prefix, dataset_conf.base_file);
auto query_file = combine_path(data_prefix, dataset_conf.query_file);
auto gt_file = dataset_conf.groundtruth_neighbors_file;
if (gt_file.has_value()) { gt_file.emplace(combine_path(data_prefix, gt_file.value())); }
auto dataset = std::make_shared<BinDataset<T>>(dataset_conf.name,
base_file,
dataset_conf.subset_first_row,
dataset_conf.subset_size,
query_file,
dataset_conf.distance,
gt_file);
::benchmark::AddCustomContext("dataset", dataset_conf.name);
::benchmark::AddCustomContext("distance", dataset_conf.distance);
std::vector<Configuration::Index> indices = conf.get_indices();
if (build_mode) {
if (file_exists(base_file)) {
log_info("Using the dataset file '%s'", base_file.c_str());
::benchmark::AddCustomContext("n_records", std::to_string(dataset->base_set_size()));
::benchmark::AddCustomContext("dim", std::to_string(dataset->dim()));
} else {
log_warn("Dataset file '%s' does not exist; benchmarking index building is impossible.",
base_file.c_str());
}
std::vector<Configuration::Index> more_indices{};
for (auto& index : indices) {
for (auto param : apply_overrides(index.build_param, override_kv)) {
auto modified_index = index;
modified_index.build_param = param;
modified_index.file = combine_path(index_prefix, modified_index.file);
more_indices.push_back(modified_index);
}
}
register_build<T>(dataset, more_indices, force_overwrite);
} else if (search_mode) {
if (file_exists(query_file)) {
log_info("Using the query file '%s'", query_file.c_str());
::benchmark::AddCustomContext("max_n_queries", std::to_string(dataset->query_set_size()));
::benchmark::AddCustomContext("dim", std::to_string(dataset->dim()));
if (gt_file.has_value()) {
if (file_exists(*gt_file)) {
log_info("Using the ground truth file '%s'", gt_file->c_str());
::benchmark::AddCustomContext("max_k", std::to_string(dataset->max_k()));
} else {
log_warn("Ground truth file '%s' does not exist; the recall won't be reported.",
gt_file->c_str());
}
} else {
log_warn(
"Ground truth file is not provided; the recall won't be reported. NB: use "
"the 'groundtruth_neighbors_file' alongside the 'query_file' key to specify the "
"path to "
"the ground truth in your conf.json.");
}
} else {
log_warn("Query file '%s' does not exist; benchmarking search is impossible.",
query_file.c_str());
}
for (auto& index : indices) {
index.search_params = apply_overrides(index.search_params, override_kv);
index.file = combine_path(index_prefix, index.file);
}
register_search<T>(dataset, indices, metric_objective, threads);
}
}
inline auto parse_bool_flag(const char* arg, const char* pat, bool& result) -> bool
{
if (strcmp(arg, pat) == 0) {
result = true;
return true;
}
return false;
}
inline auto parse_string_flag(const char* arg, const char* pat, std::string& result) -> bool
{
auto n = strlen(pat);
if (strncmp(pat, arg, strlen(pat)) == 0) {
result = arg + n + 1;
return true;
}
return false;
}
inline auto run_main(int argc, char** argv) -> int
{
bool force_overwrite = false;
bool build_mode = false;
bool search_mode = false;
std::string data_prefix = "data";
std::string index_prefix = "index";
std::string new_override_kv = "";
std::string mode = "latency";
std::string threads_arg_txt = "";
std::vector<int> threads = {1, -1}; // min_thread, max_thread
std::string log_level_str = "";
int raft_log_level = raft::logger::get(RAFT_NAME).get_level();
kv_series override_kv{};
char arg0_default[] = "benchmark"; // NOLINT
char* args_default = arg0_default;
if (!argv) {
argc = 1;
argv = &args_default;
}
if (argc == 1) {
printf_usage();
return -1;
}
char* conf_path = argv[--argc];
std::ifstream conf_stream(conf_path);
for (int i = 1; i < argc; i++) {
if (parse_bool_flag(argv[i], "--force", force_overwrite) ||
parse_bool_flag(argv[i], "--build", build_mode) ||
parse_bool_flag(argv[i], "--search", search_mode) ||
parse_string_flag(argv[i], "--data_prefix", data_prefix) ||
parse_string_flag(argv[i], "--index_prefix", index_prefix) ||
parse_string_flag(argv[i], "--mode", mode) ||
parse_string_flag(argv[i], "--override_kv", new_override_kv) ||
parse_string_flag(argv[i], "--threads", threads_arg_txt) ||
parse_string_flag(argv[i], "--raft_log_level", log_level_str)) {
if (!log_level_str.empty()) {
raft_log_level = std::stoi(log_level_str);
log_level_str = "";
}
if (!threads_arg_txt.empty()) {
auto threads_arg = split(threads_arg_txt, ':');
threads[0] = std::stoi(threads_arg[0]);
if (threads_arg.size() > 1) {
threads[1] = std::stoi(threads_arg[1]);
} else {
threads[1] = threads[0];
}
threads_arg_txt = "";
}
if (!new_override_kv.empty()) {
auto kvv = split(new_override_kv, ':');
auto key = kvv[0];
std::vector<nlohmann::json> vals{};
for (std::size_t j = 1; j < kvv.size(); j++) {
vals.push_back(nlohmann::json::parse(kvv[j]));
}
override_kv.emplace_back(key, vals);
new_override_kv = "";
}
for (int j = i; j < argc - 1; j++) {
argv[j] = argv[j + 1];
}
argc--;
i--;
}
}
raft::logger::get(RAFT_NAME).set_level(raft_log_level);
Objective metric_objective = Objective::LATENCY;
if (mode == "throughput") { metric_objective = Objective::THROUGHPUT; }
int max_threads =
(metric_objective == Objective::THROUGHPUT) ? std::thread::hardware_concurrency() : 1;
if (threads[1] == -1) threads[1] = max_threads;
if (metric_objective == Objective::LATENCY) {
if (threads[0] != 1 || threads[1] != 1) {
log_warn("Latency mode enabled. Overriding threads arg, running with single thread.");
threads = {1, 1};
}
}
if (build_mode == search_mode) {
log_error("One and only one of --build and --search should be specified");
printf_usage();
return -1;
}
if (!conf_stream) {
log_error("Can't open configuration file: %s", conf_path);
return -1;
}
if (cudart.needed() && !cudart.found()) {
log_warn("cudart library is not found, GPU-based indices won't work.");
}
Configuration conf(conf_stream);
std::string dtype = conf.get_dataset_conf().dtype;
if (dtype == "float") {
dispatch_benchmark<float>(conf,
force_overwrite,
build_mode,
search_mode,
data_prefix,
index_prefix,
override_kv,
metric_objective,
threads);
} else if (dtype == "uint8") {
dispatch_benchmark<std::uint8_t>(conf,
force_overwrite,
build_mode,
search_mode,
data_prefix,
index_prefix,
override_kv,
metric_objective,
threads);
} else if (dtype == "int8") {
dispatch_benchmark<std::int8_t>(conf,
force_overwrite,
build_mode,
search_mode,
data_prefix,
index_prefix,
override_kv,
metric_objective,
threads);
} else {
log_error("datatype '%s' is not supported", dtype.c_str());
return -1;
}
::benchmark::Initialize(&argc, argv, printf_usage);
if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return -1;
::benchmark::RunSpecifiedBenchmarks();
::benchmark::Shutdown();
// Release a possibly cached ANN object, so that it cannot be alive longer than the handle
// to a shared library it depends on (dynamic benchmark executable).
current_algo.reset();
return 0;
}
}; // namespace raft::bench::ann
| 0 |
rapidsai_public_repos/raft/cpp/bench/ann/src | rapidsai_public_repos/raft/cpp/bench/ann/src/common/cuda_pinned_resource.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/error.hpp>
#include <cstddef>
namespace raft::mr {
/**
* @brief `device_memory_resource` derived class that uses cudaMallocHost/Free for
* allocation/deallocation.
*
* This is almost the same as rmm::mr::host::pinned_memory_resource, but it has
* device_memory_resource as base class. Pinned memory can be accessed from device,
* and using this allocator we can create device_mdarray backed by pinned allocator.
*
* TODO(tfeher): it would be preferred to just rely on the existing allocator from rmm
* (pinned_memory_resource), but that is incompatible with the container_policy class
* for device matrix, because the latter expects a device_memory_resource. We shall
* revise this once we progress with Issue https://github.com/rapidsai/raft/issues/1819
*/
class cuda_pinned_resource final : public rmm::mr::device_memory_resource {
public:
cuda_pinned_resource() = default;
~cuda_pinned_resource() override = default;
cuda_pinned_resource(cuda_pinned_resource const&) = default;
cuda_pinned_resource(cuda_pinned_resource&&) = default;
cuda_pinned_resource& operator=(cuda_pinned_resource const&) = default;
cuda_pinned_resource& operator=(cuda_pinned_resource&&) = default;
/**
* @brief Query whether the resource supports use of non-null CUDA streams for
* allocation/deallocation. `cuda_pinned_resource` does not support streams.
*
* @returns bool false
*/
[[nodiscard]] bool supports_streams() const noexcept override { return false; }
/**
* @brief Query whether the resource supports the get_mem_info API.
*
* @return true
*/
[[nodiscard]] bool supports_get_mem_info() const noexcept override { return true; }
private:
/**
* @brief Allocates memory of size at least `bytes` using cudaMalloc.
*
* The returned pointer has at least 256B alignment.
*
* @note Stream argument is ignored
*
* @throws `rmm::bad_alloc` if the requested allocation could not be fulfilled
*
* @param bytes The size, in bytes, of the allocation
* @return void* Pointer to the newly allocated memory
*/
void* do_allocate(std::size_t bytes, rmm::cuda_stream_view) override
{
void* ptr{nullptr};
RMM_CUDA_TRY_ALLOC(cudaMallocHost(&ptr, bytes));
return ptr;
}
/**
* @brief Deallocate memory pointed to by \p p.
*
* @note Stream argument is ignored.
*
* @throws Nothing.
*
* @param p Pointer to be deallocated
*/
void do_deallocate(void* ptr, std::size_t, rmm::cuda_stream_view) override
{
RMM_ASSERT_CUDA_SUCCESS(cudaFreeHost(ptr));
}
/**
* @brief Compare this resource to another.
*
* Two cuda_pinned_resources always compare equal, because they can each
* deallocate memory allocated by the other.
*
* @throws Nothing.
*
* @param other The other resource to compare to
* @return true If the two resources are equivalent
* @return false If the two resources are not equal
*/
[[nodiscard]] bool do_is_equal(device_memory_resource const& other) const noexcept override
{
return dynamic_cast<cuda_pinned_resource const*>(&other) != nullptr;
}
/**
* @brief Get free and available memory for memory resource
*
* @throws `rmm::cuda_error` if unable to retrieve memory info.
*
* @return std::pair contaiing free_size and total_size of memory
*/
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
rmm::cuda_stream_view) const override
{
std::size_t free_size{};
std::size_t total_size{};
RMM_CUDA_TRY(cudaMemGetInfo(&free_size, &total_size));
return std::make_pair(free_size, total_size);
}
};
} // namespace raft::mr | 0 |
rapidsai_public_repos/raft/cpp | rapidsai_public_repos/raft/cpp/scripts/analyze_nvcc_log.py | #!/usr/bin/env python3
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
from matplotlib import colors
def main(input_path):
input_path = Path(input_path)
print("-- loading data")
df = pd.read_csv(input_path)
print("-- analyzing data")
# Strip spaces from column names
df = df.rename(columns=str.strip)
df["seconds"] = df["metric"] / 1000
df["file"] = df["source file name"]
df["phase"] = df["phase name"].str.strip()
dfp = (df
# Remove nvcc driver entries. They don't contain a source file name
.query("phase!='nvcc (driver)'")
# Make a pivot table containing files as row, phase (preprocessing,
# cicc, etc.) as column and the total times as table entries. NOTE:
# if compiled for multiple archs, the archs will be summed.
.pivot_table(index="file", values="seconds", columns="phase", aggfunc='sum'))
dfp_sum = dfp.sum(axis="columns")
df_fraction = dfp.divide(dfp_sum, axis="index")
df_fraction["total time"] = dfp_sum
df_fraction = df_fraction.melt(ignore_index=False, id_vars="total time", var_name="phase", value_name="fraction")
dfp["total time"] = dfp_sum
df_absolute = dfp.melt(ignore_index=False, id_vars="total time", var_name="phase", value_name="seconds")
# host: light red to dark red (preprocessing, cudafe, gcc (compiling))
# device: ligt green to dark green (preprocessing, cicc, ptxas)
palette = {
"gcc (preprocessing 4)": colors.hsv_to_rgb((0, 1, 1)),
'cudafe++': colors.hsv_to_rgb((0, 1, .75)),
'gcc (compiling)': colors.hsv_to_rgb((0, 1, .4)),
"gcc (preprocessing 1)": colors.hsv_to_rgb((.33, 1, 1)),
'cicc': colors.hsv_to_rgb((.33, 1, 0.75)),
'ptxas': colors.hsv_to_rgb((.33, 1, 0.4)),
'fatbinary': "grey",
}
print("-- Ten longest translation units:")
colwidth = pd.get_option('display.max_colwidth') - 1
dfp = dfp.reset_index()
dfp["file"] = dfp["file"].apply(lambda s: s[-colwidth:])
print(dfp.sort_values("total time", ascending=False).reset_index().loc[:10])
print("-- Plotting absolute compile times")
abs_out_path = f"{input_path}.absolute.compile_times.png"
sns.displot(
df_absolute.sort_values("total time").reset_index(),
y="file",
hue="phase",
hue_order=reversed(
["gcc (preprocessing 4)", 'cudafe++', 'gcc (compiling)',
"gcc (preprocessing 1)", 'cicc', 'ptxas',
'fatbinary',
]),
palette=palette,
weights="seconds",
multiple="stack",
kind="hist",
height=20,
)
plt.xlabel("seconds");
plt.savefig(abs_out_path)
print(f"-- Wrote absolute compile time plot to {abs_out_path}")
print("-- Plotting relative compile times")
rel_out_path = f"{input_path}.relative.compile_times.png"
sns.displot(
df_fraction.sort_values('total time').reset_index(),
y="file",
hue="phase",
hue_order=reversed(["gcc (preprocessing 4)", 'cudafe++', 'gcc (compiling)',
"gcc (preprocessing 1)", 'cicc', 'ptxas',
'fatbinary',
]),
palette=palette,
weights="fraction",
multiple="stack",
kind="hist",
height=15,
)
plt.xlabel("fraction");
plt.savefig(rel_out_path)
print(f"-- Wrote relative compile time plot to {rel_out_path}")
if __name__ == "__main__":
if len(sys.argv) != 2:
printf("""NVCC log analyzer
Analyzes nvcc logs and outputs a figure with highest ranking translation
units.
Usage:
python analyze_nvcc_log.py <nvcc_log_file.csv>
cpp/scripts/analyze_nvcc_log.py <nvcc_log_file.csv>
Generate the nvcc log file by adding:
list(APPEND RAFT_CUDA_FLAGS "--time=CMakeFiles/nvcc_compile_log.csv")
to cpp/cmake/modules/ConfigureCUDA.cmake.
""")
input_path = Path(sys.argv[1])
if not input_path.exists():
print(f"Path {input_path} does not exist.")
else:
main(input_path)
| 0 |
rapidsai_public_repos/raft/cpp | rapidsai_public_repos/raft/cpp/scripts/__clang_cuda_additional_intrinsics.h | // Copyright (c) 2022, NVIDIA CORPORATION.
#ifndef __CLANG_CUDA_ADDITIONAL_INTRINSICS_H__
#define __CLANG_CUDA_ADDITIONAL_INTRINSICS_H__
#ifndef __CUDA__
#error "This file is for CUDA compilation only."
#endif
// for some of these macros, see cuda_fp16.hpp
#if defined(__cplusplus) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320))
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
#define __LDG_PTR "l"
#define __LBITS "64"
#else
#define __LDG_PTR "r"
#define __LBITS "32"
#endif // (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
#define __NOARG
#define __MAKE_LD(cop, c_typ, int_typ, ptx_typ, inl_typ, mem) \
__device__ __forceinline__ c_typ __ld##cop(const c_typ* addr) \
{ \
int_typ out; \
asm("ld." #cop "." ptx_typ " %0, [%1];" : "=" inl_typ(out) : __LDG_PTR(addr) mem); \
return (c_typ)out; \
}
#define __MAKE_LD2(cop, c_typ, int_typ, ptx_typ, inl_typ, mem) \
__device__ __forceinline__ c_typ __ld##cop(const c_typ* addr) \
{ \
int_typ out1, out2; \
asm("ld." #cop ".v2." ptx_typ " {%0, %1}, [%2];" \
: "=" inl_typ(out1), "=" inl_typ(out2) \
: __LDG_PTR(addr) mem); \
c_typ out; \
out.x = out1; \
out.y = out2; \
return out; \
}
#define __MAKE_LD4(cop, c_typ, int_typ, ptx_typ, inl_typ, mem) \
__device__ __forceinline__ c_typ __ld##cop(const c_typ* addr) \
{ \
int_typ out1, out2, out3, out4; \
asm("ld." #cop ".v4." ptx_typ " {%0, %1, %2, %3}, [%4];" \
: "=" inl_typ(out1), "=" inl_typ(out2), "=" inl_typ(out3), "=" inl_typ(out4) \
: __LDG_PTR(addr) mem); \
c_typ out; \
out.x = out1; \
out.y = out2; \
out.z = out3; \
out.w = out4; \
return out; \
}
__MAKE_LD(cg, char, short, "s8", "h", __NOARG)
__MAKE_LD(cg, signed char, short, "s8", "h", __NOARG)
__MAKE_LD(cg, unsigned char, short, "u8", "h", __NOARG)
__MAKE_LD(cg, short, short, "s16", "h", __NOARG)
__MAKE_LD(cg, unsigned short, unsigned short, "u16", "h", __NOARG)
__MAKE_LD(cg, int, int, "s32", "r", __NOARG)
__MAKE_LD(cg, unsigned int, unsigned int, "u32", "r", __NOARG)
__MAKE_LD(cg, long, long, "s" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(cg, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(cg, long long, long long, "s64", "l", __NOARG)
__MAKE_LD(cg, unsigned long long, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD(cg, float, float, "f32", "f", __NOARG)
__MAKE_LD(cg, double, double, "f64", "d", __NOARG)
__MAKE_LD2(cg, char2, short, "s8", "h", __NOARG)
__MAKE_LD2(cg, uchar2, short, "u8", "h", __NOARG)
__MAKE_LD2(cg, short2, short, "s16", "h", __NOARG)
__MAKE_LD2(cg, ushort2, unsigned short, "u16", "h", __NOARG)
__MAKE_LD2(cg, int2, int, "s32", "r", __NOARG)
__MAKE_LD2(cg, uint2, unsigned int, "u32", "r", __NOARG)
__MAKE_LD2(cg, longlong2, long long, "s64", "l", __NOARG)
__MAKE_LD2(cg, ulonglong2, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD2(cg, float2, float, "f32", "f", __NOARG)
__MAKE_LD2(cg, double2, double, "f64", "d", __NOARG)
__MAKE_LD4(cg, char4, short, "s8", "h", __NOARG)
__MAKE_LD4(cg, uchar4, short, "u8", "h", __NOARG)
__MAKE_LD4(cg, short4, short, "s16", "h", __NOARG)
__MAKE_LD4(cg, ushort4, unsigned short, "u16", "h", __NOARG)
__MAKE_LD4(cg, int4, int, "s32", "r", __NOARG)
__MAKE_LD4(cg, uint4, unsigned int, "u32", "r", __NOARG)
__MAKE_LD4(cg, float4, float, "f32", "f", __NOARG)
__MAKE_LD(ca, char, short, "s8", "h", __NOARG)
__MAKE_LD(ca, signed char, short, "s8", "h", __NOARG)
__MAKE_LD(ca, unsigned char, short, "u8", "h", __NOARG)
__MAKE_LD(ca, short, short, "s16", "h", __NOARG)
__MAKE_LD(ca, unsigned short, unsigned short, "u16", "h", __NOARG)
__MAKE_LD(ca, int, int, "s32", "r", __NOARG)
__MAKE_LD(ca, unsigned int, unsigned int, "u32", "r", __NOARG)
__MAKE_LD(ca, long, long, "s" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(ca, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(ca, long long, long long, "s64", "l", __NOARG)
__MAKE_LD(ca, unsigned long long, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD(ca, float, float, "f32", "f", __NOARG)
__MAKE_LD(ca, double, double, "f64", "d", __NOARG)
__MAKE_LD2(ca, char2, short, "s8", "h", __NOARG)
__MAKE_LD2(ca, uchar2, short, "u8", "h", __NOARG)
__MAKE_LD2(ca, short2, short, "s16", "h", __NOARG)
__MAKE_LD2(ca, ushort2, unsigned short, "u16", "h", __NOARG)
__MAKE_LD2(ca, int2, int, "s32", "r", __NOARG)
__MAKE_LD2(ca, uint2, unsigned int, "u32", "r", __NOARG)
__MAKE_LD2(ca, longlong2, long long, "s64", "l", __NOARG)
__MAKE_LD2(ca, ulonglong2, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD2(ca, float2, float, "f32", "f", __NOARG)
__MAKE_LD2(ca, double2, double, "f64", "d", __NOARG)
__MAKE_LD4(ca, char4, short, "s8", "h", __NOARG)
__MAKE_LD4(ca, uchar4, short, "u8", "h", __NOARG)
__MAKE_LD4(ca, short4, short, "s16", "h", __NOARG)
__MAKE_LD4(ca, ushort4, unsigned short, "u16", "h", __NOARG)
__MAKE_LD4(ca, int4, int, "s32", "r", __NOARG)
__MAKE_LD4(ca, uint4, unsigned int, "u32", "r", __NOARG)
__MAKE_LD4(ca, float4, float, "f32", "f", __NOARG)
__MAKE_LD(cs, char, short, "s8", "h", __NOARG)
__MAKE_LD(cs, signed char, short, "s8", "h", __NOARG)
__MAKE_LD(cs, unsigned char, short, "u8", "h", __NOARG)
__MAKE_LD(cs, short, short, "s16", "h", __NOARG)
__MAKE_LD(cs, unsigned short, unsigned short, "u16", "h", __NOARG)
__MAKE_LD(cs, int, int, "s32", "r", __NOARG)
__MAKE_LD(cs, unsigned int, unsigned int, "u32", "r", __NOARG)
__MAKE_LD(cs, long, long, "s" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(cs, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(cs, long long, long long, "s64", "l", __NOARG)
__MAKE_LD(cs, unsigned long long, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD(cs, float, float, "f32", "f", __NOARG)
__MAKE_LD(cs, double, double, "f64", "d", __NOARG)
__MAKE_LD2(cs, char2, short, "s8", "h", __NOARG)
__MAKE_LD2(cs, uchar2, short, "u8", "h", __NOARG)
__MAKE_LD2(cs, short2, short, "s16", "h", __NOARG)
__MAKE_LD2(cs, ushort2, unsigned short, "u16", "h", __NOARG)
__MAKE_LD2(cs, int2, int, "s32", "r", __NOARG)
__MAKE_LD2(cs, uint2, unsigned int, "u32", "r", __NOARG)
__MAKE_LD2(cs, longlong2, long long, "s64", "l", __NOARG)
__MAKE_LD2(cs, ulonglong2, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD2(cs, float2, float, "f32", "f", __NOARG)
__MAKE_LD2(cs, double2, double, "f64", "d", __NOARG)
__MAKE_LD4(cs, char4, short, "s8", "h", __NOARG)
__MAKE_LD4(cs, uchar4, short, "u8", "h", __NOARG)
__MAKE_LD4(cs, short4, short, "s16", "h", __NOARG)
__MAKE_LD4(cs, ushort4, unsigned short, "u16", "h", __NOARG)
__MAKE_LD4(cs, int4, int, "s32", "r", __NOARG)
__MAKE_LD4(cs, uint4, unsigned int, "u32", "r", __NOARG)
__MAKE_LD4(cs, float4, float, "f32", "f", __NOARG)
__MAKE_LD(lu, char, short, "s8", "h", : "memory")
__MAKE_LD(lu, signed char, short, "s8", "h", : "memory")
__MAKE_LD(lu, unsigned char, short, "u8", "h", : "memory")
__MAKE_LD(lu, short, short, "s16", "h", : "memory")
__MAKE_LD(lu, unsigned short, unsigned short, "u16", "h", : "memory")
__MAKE_LD(lu, int, int, "s32", "r", : "memory")
__MAKE_LD(lu, unsigned int, unsigned int, "u32", "r", : "memory")
__MAKE_LD(lu, long, long, "s" __LBITS, __LDG_PTR, : "memory")
__MAKE_LD(lu, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR, : "memory")
__MAKE_LD(lu, long long, long long, "s64", "l", : "memory")
__MAKE_LD(lu, unsigned long long, unsigned long long, "u64", "l", : "memory")
__MAKE_LD(lu, float, float, "f32", "f", : "memory")
__MAKE_LD(lu, double, double, "f64", "d", : "memory")
__MAKE_LD2(lu, char2, short, "s8", "h", : "memory")
__MAKE_LD2(lu, uchar2, short, "u8", "h", : "memory")
__MAKE_LD2(lu, short2, short, "s16", "h", : "memory")
__MAKE_LD2(lu, ushort2, unsigned short, "u16", "h", : "memory")
__MAKE_LD2(lu, int2, int, "s32", "r", : "memory")
__MAKE_LD2(lu, uint2, unsigned int, "u32", "r", : "memory")
__MAKE_LD2(lu, longlong2, long long, "s64", "l", : "memory")
__MAKE_LD2(lu, ulonglong2, unsigned long long, "u64", "l", : "memory")
__MAKE_LD2(lu, float2, float, "f32", "f", : "memory")
__MAKE_LD2(lu, double2, double, "f64", "d", : "memory")
__MAKE_LD4(lu, char4, short, "s8", "h", : "memory")
__MAKE_LD4(lu, uchar4, short, "u8", "h", : "memory")
__MAKE_LD4(lu, short4, short, "s16", "h", : "memory")
__MAKE_LD4(lu, ushort4, unsigned short, "u16", "h", : "memory")
__MAKE_LD4(lu, int4, int, "s32", "r", : "memory")
__MAKE_LD4(lu, uint4, unsigned int, "u32", "r", : "memory")
__MAKE_LD4(lu, float4, float, "f32", "f", : "memory")
__MAKE_LD(cv, char, short, "s8", "h", : "memory")
__MAKE_LD(cv, signed char, short, "s8", "h", : "memory")
__MAKE_LD(cv, unsigned char, short, "u8", "h", : "memory")
__MAKE_LD(cv, short, short, "s16", "h", : "memory")
__MAKE_LD(cv, unsigned short, unsigned short, "u16", "h", : "memory")
__MAKE_LD(cv, int, int, "s32", "r", : "memory")
__MAKE_LD(cv, unsigned int, unsigned int, "u32", "r", : "memory")
__MAKE_LD(cv, long, long, "s" __LBITS, __LDG_PTR, : "memory")
__MAKE_LD(cv, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR, : "memory")
__MAKE_LD(cv, long long, long long, "s64", "l", : "memory")
__MAKE_LD(cv, unsigned long long, unsigned long long, "u64", "l", : "memory")
__MAKE_LD(cv, float, float, "f32", "f", : "memory")
__MAKE_LD(cv, double, double, "f64", "d", : "memory")
__MAKE_LD2(cv, char2, short, "s8", "h", : "memory")
__MAKE_LD2(cv, uchar2, short, "u8", "h", : "memory")
__MAKE_LD2(cv, short2, short, "s16", "h", : "memory")
__MAKE_LD2(cv, ushort2, unsigned short, "u16", "h", : "memory")
__MAKE_LD2(cv, int2, int, "s32", "r", : "memory")
__MAKE_LD2(cv, uint2, unsigned int, "u32", "r", : "memory")
__MAKE_LD2(cv, longlong2, long long, "s64", "l", : "memory")
__MAKE_LD2(cv, ulonglong2, unsigned long long, "u64", "l", : "memory")
__MAKE_LD2(cv, float2, float, "f32", "f", : "memory")
__MAKE_LD2(cv, double2, double, "f64", "d", : "memory")
__MAKE_LD4(cv, char4, short, "s8", "h", : "memory")
__MAKE_LD4(cv, uchar4, short, "u8", "h", : "memory")
__MAKE_LD4(cv, short4, short, "s16", "h", : "memory")
__MAKE_LD4(cv, ushort4, unsigned short, "u16", "h", : "memory")
__MAKE_LD4(cv, int4, int, "s32", "r", : "memory")
__MAKE_LD4(cv, uint4, unsigned int, "u32", "r", : "memory")
__MAKE_LD4(cv, float4, float, "f32", "f", : "memory")
#define __MAKE_ST(cop, c_typ, int_typ, ptx_typ, inl_typ) \
__device__ __forceinline__ void __st##cop(c_typ* addr, c_typ v) \
{ \
asm("st." #cop "." ptx_typ " [%0], %1;" ::__LDG_PTR(addr), inl_typ((int_typ)v) : "memory"); \
}
#define __MAKE_ST2(cop, c_typ, int_typ, ptx_typ, inl_typ) \
__device__ __forceinline__ void __st##cop(c_typ* addr, c_typ v) \
{ \
int_typ v1 = v.x, v2 = v.y; \
asm("st." #cop ".v2." ptx_typ " [%0], {%1, %2};" ::__LDG_PTR(addr), inl_typ(v1), inl_typ(v2) \
: "memory"); \
}
#define __MAKE_ST4(cop, c_typ, int_typ, ptx_typ, inl_typ) \
__device__ __forceinline__ c_typ __st##cop(c_typ* addr, c_typ v) \
{ \
int_typ v1 = v.x, v2 = v.y, v3 = v.z, v4 = v.w; \
asm("st." #cop ".v4." ptx_typ " [%0], {%1, %2, %3, %4};" ::__LDG_PTR(addr), \
inl_typ(v1), \
inl_typ(v2), \
inl_typ(v3), \
inl_typ(v4) \
: "memory"); \
}
__MAKE_ST(wb, char, short, "s8", "h")
__MAKE_ST(wb, signed char, short, "s8", "h")
__MAKE_ST(wb, unsigned char, short, "u8", "h")
__MAKE_ST(wb, short, short, "s16", "h")
__MAKE_ST(wb, unsigned short, unsigned short, "u16", "h")
__MAKE_ST(wb, int, int, "s32", "r")
__MAKE_ST(wb, unsigned int, unsigned int, "u32", "r")
__MAKE_ST(wb, long, long, "s" __LBITS, __LDG_PTR)
__MAKE_ST(wb, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR)
__MAKE_ST(wb, long long, long long, "s64", "l")
__MAKE_ST(wb, unsigned long long, unsigned long long, "u64", "l")
__MAKE_ST(wb, float, float, "f32", "f")
__MAKE_ST(wb, double, double, "f64", "d")
__MAKE_ST2(wb, char2, short, "s8", "h")
__MAKE_ST2(wb, uchar2, short, "u8", "h")
__MAKE_ST2(wb, short2, short, "s16", "h")
__MAKE_ST2(wb, ushort2, unsigned short, "u16", "h")
__MAKE_ST2(wb, int2, int, "s32", "r")
__MAKE_ST2(wb, uint2, unsigned int, "u32", "r")
__MAKE_ST2(wb, longlong2, long long, "s64", "l")
__MAKE_ST2(wb, ulonglong2, unsigned long long, "u64", "l")
__MAKE_ST2(wb, float2, float, "f32", "f")
__MAKE_ST2(wb, double2, double, "f64", "d")
__MAKE_ST4(wb, char4, short, "s8", "h")
__MAKE_ST4(wb, uchar4, short, "u8", "h")
__MAKE_ST4(wb, short4, short, "s16", "h")
__MAKE_ST4(wb, ushort4, unsigned short, "u16", "h")
__MAKE_ST4(wb, int4, int, "s32", "r")
__MAKE_ST4(wb, uint4, unsigned int, "u32", "r")
__MAKE_ST4(wb, float4, float, "f32", "f")
__MAKE_ST(cg, char, short, "s8", "h")
__MAKE_ST(cg, signed char, short, "s8", "h")
__MAKE_ST(cg, unsigned char, short, "u8", "h")
__MAKE_ST(cg, short, short, "s16", "h")
__MAKE_ST(cg, unsigned short, unsigned short, "u16", "h")
__MAKE_ST(cg, int, int, "s32", "r")
__MAKE_ST(cg, unsigned int, unsigned int, "u32", "r")
__MAKE_ST(cg, long, long, "s" __LBITS, __LDG_PTR)
__MAKE_ST(cg, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR)
__MAKE_ST(cg, long long, long long, "s64", "l")
__MAKE_ST(cg, unsigned long long, unsigned long long, "u64", "l")
__MAKE_ST(cg, float, float, "f32", "f")
__MAKE_ST(cg, double, double, "f64", "d")
__MAKE_ST2(cg, char2, short, "s8", "h")
__MAKE_ST2(cg, uchar2, short, "u8", "h")
__MAKE_ST2(cg, short2, short, "s16", "h")
__MAKE_ST2(cg, ushort2, unsigned short, "u16", "h")
__MAKE_ST2(cg, int2, int, "s32", "r")
__MAKE_ST2(cg, uint2, unsigned int, "u32", "r")
__MAKE_ST2(cg, longlong2, long long, "s64", "l")
__MAKE_ST2(cg, ulonglong2, unsigned long long, "u64", "l")
__MAKE_ST2(cg, float2, float, "f32", "f")
__MAKE_ST2(cg, double2, double, "f64", "d")
__MAKE_ST4(cg, char4, short, "s8", "h")
__MAKE_ST4(cg, uchar4, short, "u8", "h")
__MAKE_ST4(cg, short4, short, "s16", "h")
__MAKE_ST4(cg, ushort4, unsigned short, "u16", "h")
__MAKE_ST4(cg, int4, int, "s32", "r")
__MAKE_ST4(cg, uint4, unsigned int, "u32", "r")
__MAKE_ST4(cg, float4, float, "f32", "f")
__MAKE_ST(cs, char, short, "s8", "h")
__MAKE_ST(cs, signed char, short, "s8", "h")
__MAKE_ST(cs, unsigned char, short, "u8", "h")
__MAKE_ST(cs, short, short, "s16", "h")
__MAKE_ST(cs, unsigned short, unsigned short, "u16", "h")
__MAKE_ST(cs, int, int, "s32", "r")
__MAKE_ST(cs, unsigned int, unsigned int, "u32", "r")
__MAKE_ST(cs, long, long, "s" __LBITS, __LDG_PTR)
__MAKE_ST(cs, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR)
__MAKE_ST(cs, long long, long long, "s64", "l")
__MAKE_ST(cs, unsigned long long, unsigned long long, "u64", "l")
__MAKE_ST(cs, float, float, "f32", "f")
__MAKE_ST(cs, double, double, "f64", "d")
__MAKE_ST2(cs, char2, short, "s8", "h")
__MAKE_ST2(cs, uchar2, short, "u8", "h")
__MAKE_ST2(cs, short2, short, "s16", "h")
__MAKE_ST2(cs, ushort2, unsigned short, "u16", "h")
__MAKE_ST2(cs, int2, int, "s32", "r")
__MAKE_ST2(cs, uint2, unsigned int, "u32", "r")
__MAKE_ST2(cs, longlong2, long long, "s64", "l")
__MAKE_ST2(cs, ulonglong2, unsigned long long, "u64", "l")
__MAKE_ST2(cs, float2, float, "f32", "f")
__MAKE_ST2(cs, double2, double, "f64", "d")
__MAKE_ST4(cs, char4, short, "s8", "h")
__MAKE_ST4(cs, uchar4, short, "u8", "h")
__MAKE_ST4(cs, short4, short, "s16", "h")
__MAKE_ST4(cs, ushort4, unsigned short, "u16", "h")
__MAKE_ST4(cs, int4, int, "s32", "r")
__MAKE_ST4(cs, uint4, unsigned int, "u32", "r")
__MAKE_ST4(cs, float4, float, "f32", "f")
__MAKE_ST(wt, char, short, "s8", "h")
__MAKE_ST(wt, signed char, short, "s8", "h")
__MAKE_ST(wt, unsigned char, short, "u8", "h")
__MAKE_ST(wt, short, short, "s16", "h")
__MAKE_ST(wt, unsigned short, unsigned short, "u16", "h")
__MAKE_ST(wt, int, int, "s32", "r")
__MAKE_ST(wt, unsigned int, unsigned int, "u32", "r")
__MAKE_ST(wt, long, long, "s" __LBITS, __LDG_PTR)
__MAKE_ST(wt, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR)
__MAKE_ST(wt, long long, long long, "s64", "l")
__MAKE_ST(wt, unsigned long long, unsigned long long, "u64", "l")
__MAKE_ST(wt, float, float, "f32", "f")
__MAKE_ST(wt, double, double, "f64", "d")
__MAKE_ST2(wt, char2, short, "s8", "h")
__MAKE_ST2(wt, uchar2, short, "u8", "h")
__MAKE_ST2(wt, short2, short, "s16", "h")
__MAKE_ST2(wt, ushort2, unsigned short, "u16", "h")
__MAKE_ST2(wt, int2, int, "s32", "r")
__MAKE_ST2(wt, uint2, unsigned int, "u32", "r")
__MAKE_ST2(wt, longlong2, long long, "s64", "l")
__MAKE_ST2(wt, ulonglong2, unsigned long long, "u64", "l")
__MAKE_ST2(wt, float2, float, "f32", "f")
__MAKE_ST2(wt, double2, double, "f64", "d")
__MAKE_ST4(wt, char4, short, "s8", "h")
__MAKE_ST4(wt, uchar4, short, "u8", "h")
__MAKE_ST4(wt, short4, short, "s16", "h")
__MAKE_ST4(wt, ushort4, unsigned short, "u16", "h")
__MAKE_ST4(wt, int4, int, "s32", "r")
__MAKE_ST4(wt, uint4, unsigned int, "u32", "r")
__MAKE_ST4(wt, float4, float, "f32", "f")
#undef __MAKE_ST4
#undef __MAKE_ST2
#undef __MAKE_ST
#undef __MAKE_LD4
#undef __MAKE_LD2
#undef __MAKE_LD
#undef __NOARG
#undef __LBITS
#undef __LDG_PTR
#endif // defined(__cplusplus) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320))
#endif // defined(__CLANG_CUDA_ADDITIONAL_INTRINSICS_H__)
| 0 |
rapidsai_public_repos/raft/cpp | rapidsai_public_repos/raft/cpp/scripts/include_checker.py | # Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import re
import os
import subprocess
import argparse
IncludeRegex = re.compile(r"\s*#include\s*(\S+)")
RemoveComments = re.compile(r"//.*")
exclusion_regex = re.compile(r".*thirdparty.*")
def parse_args():
argparser = argparse.ArgumentParser(
"Checks for a consistent '#include' syntax")
argparser.add_argument("--regex", type=str,
default=r"[.](cu|cuh|h|hpp|hxx|cpp)$",
help="Regex string to filter in sources")
argparser.add_argument("dirs", type=str, nargs="*",
help="List of dirs where to find sources")
args = argparser.parse_args()
args.regex_compiled = re.compile(args.regex)
return args
def list_all_source_file(file_regex, srcdirs):
all_files = []
for srcdir in srcdirs:
for root, dirs, files in os.walk(srcdir):
for f in files:
if not re.search(exclusion_regex, root) and re.search(file_regex, f):
src = os.path.join(root, f)
all_files.append(src)
return all_files
def check_includes_in(src):
errs = []
dir = os.path.dirname(src)
for line_number, line in enumerate(open(src)):
line = RemoveComments.sub("", line)
match = IncludeRegex.search(line)
if match is None:
continue
val = match.group(1)
inc_file = val[1:-1] # strip out " or <
full_path = os.path.join(dir, inc_file)
line_num = line_number + 1
if val[0] == "\"" and not os.path.exists(full_path):
errs.append("Line:%d use #include <...>" % line_num)
elif val[0] == "<" and os.path.exists(full_path):
errs.append("Line:%d use #include \"...\"" % line_num)
return errs
def main():
args = parse_args()
all_files = list_all_source_file(args.regex_compiled, args.dirs)
all_errs = {}
for f in all_files:
errs = check_includes_in(f)
if len(errs) > 0:
all_errs[f] = errs
if len(all_errs) == 0:
print("include-check PASSED")
else:
print("include-check FAILED! See below for errors...")
for f, errs in all_errs.items():
print("File: %s" % f)
for e in errs:
print(" %s" % e)
sys.exit(-1)
return
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/raft/cpp | rapidsai_public_repos/raft/cpp/scripts/gitutils.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import os
import re
def isFileEmpty(f):
return os.stat(f).st_size == 0
def __git(*opts):
"""Runs a git command and returns its output"""
cmd = "git " + " ".join(list(opts))
ret = subprocess.check_output(cmd, shell=True)
return ret.decode("UTF-8").rstrip("\n")
def __gitdiff(*opts):
"""Runs a git diff command with no pager set"""
return __git("--no-pager", "diff", *opts)
def branch():
"""Returns the name of the current branch"""
name = __git("rev-parse", "--abbrev-ref", "HEAD")
name = name.rstrip()
return name
def repo_version():
"""
Determines the version of the repo by using `git describe`
Returns
-------
str
The full version of the repo in the format 'v#.#.#{a|b|rc}'
"""
return __git("describe", "--tags", "--abbrev=0")
def repo_version_major_minor():
"""
Determines the version of the repo using `git describe` and returns only
the major and minor portion
Returns
-------
str
The partial version of the repo in the format '{major}.{minor}'
"""
full_repo_version = repo_version()
match = re.match(r"^v?(?P<major>[0-9]+)(?:\.(?P<minor>[0-9]+))?",
full_repo_version)
if (match is None):
print(" [DEBUG] Could not determine repo major minor version. "
f"Full repo version: {full_repo_version}.")
return None
out_version = match.group("major")
if (match.group("minor")):
out_version += "." + match.group("minor")
return out_version
def determine_merge_commit(current_branch="HEAD"):
"""
When running outside of CI, this will estimate the target merge commit hash
of `current_branch` by finding a common ancestor with the remote branch
'branch-{major}.{minor}' where {major} and {minor} are determined from the
repo version.
Parameters
----------
current_branch : str, optional
Which branch to consider as the current branch, by default "HEAD"
Returns
-------
str
The common commit hash ID
"""
try:
# Try to determine the target branch from the most recent tag
head_branch = __git("describe",
"--all",
"--tags",
"--match='branch-*'",
"--abbrev=0")
except subprocess.CalledProcessError:
print(" [DEBUG] Could not determine target branch from most recent "
"tag. Falling back to 'branch-{major}.{minor}.")
head_branch = None
if (head_branch is not None):
# Convert from head to branch name
head_branch = __git("name-rev", "--name-only", head_branch)
else:
# Try and guess the target branch as "branch-<major>.<minor>"
version = repo_version_major_minor()
if (version is None):
return None
head_branch = "branch-{}".format(version)
try:
# Now get the remote tracking branch
remote_branch = __git("rev-parse",
"--abbrev-ref",
"--symbolic-full-name",
head_branch + "@{upstream}")
except subprocess.CalledProcessError:
print(" [DEBUG] Could not remote tracking reference for "
f"branch {head_branch}.")
remote_branch = None
if (remote_branch is None):
return None
print(f" [DEBUG] Determined TARGET_BRANCH as: '{remote_branch}'. "
"Finding common ancestor.")
common_commit = __git("merge-base", remote_branch, current_branch)
return common_commit
def uncommittedFiles():
"""
Returns a list of all changed files that are not yet committed. This
means both untracked/unstaged as well as uncommitted files too.
"""
files = __git("status", "-u", "-s")
ret = []
for f in files.splitlines():
f = f.strip(" ")
f = re.sub("\s+", " ", f) # noqa: W605
tmp = f.split(" ", 1)
# only consider staged files or uncommitted files
# in other words, ignore untracked files
if tmp[0] == "M" or tmp[0] == "A":
ret.append(tmp[1])
return ret
def changedFilesBetween(baseName, branchName, commitHash):
"""
Returns a list of files changed between branches baseName and latest commit
of branchName.
"""
current = branch()
# checkout "base" branch
__git("checkout", "--force", baseName)
# checkout branch for comparing
__git("checkout", "--force", branchName)
# checkout latest commit from branch
__git("checkout", "-fq", commitHash)
files = __gitdiff("--name-only",
"--ignore-submodules",
f"{baseName}..{branchName}")
# restore the original branch
__git("checkout", "--force", current)
return files.splitlines()
def changesInFileBetween(file, b1, b2, filter=None):
"""Filters the changed lines to a file between the branches b1 and b2"""
current = branch()
__git("checkout", "--quiet", b1)
__git("checkout", "--quiet", b2)
diffs = __gitdiff("--ignore-submodules",
"-w",
"--minimal",
"-U0",
"%s...%s" % (b1, b2),
"--",
file)
__git("checkout", "--quiet", current)
lines = []
for line in diffs.splitlines():
if filter is None or filter(line):
lines.append(line)
return lines
def modifiedFiles(pathFilter=None):
"""
If inside a CI-env (ie. TARGET_BRANCH and COMMIT_HASH are defined, and
current branch is "current-pr-branch"), then lists out all files modified
between these 2 branches. Locally, TARGET_BRANCH will try to be determined
from the current repo version and finding a corresponding branch named
'branch-{major}.{minor}'. If this fails, this function will list out all
the uncommitted files in the current branch.
Such utility function is helpful while putting checker scripts as part of
cmake, as well as CI process. This way, during development, only the files
touched (but not yet committed) by devs can be checked. But, during the CI
process ALL files modified by the dev, as submiited in the PR, will be
checked. This happens, all the while using the same script.
"""
targetBranch = os.environ.get("TARGET_BRANCH")
commitHash = os.environ.get("COMMIT_HASH")
currentBranch = branch()
print(
f" [DEBUG] TARGET_BRANCH={targetBranch}, COMMIT_HASH={commitHash}, "
f"currentBranch={currentBranch}")
if targetBranch and commitHash and (currentBranch == "current-pr-branch"):
print(" [DEBUG] Assuming a CI environment.")
allFiles = changedFilesBetween(targetBranch, currentBranch, commitHash)
else:
print(" [DEBUG] Did not detect CI environment. "
"Determining TARGET_BRANCH locally.")
common_commit = determine_merge_commit(currentBranch)
if (common_commit is not None):
# Now get the diff. Use --staged to get both diff between
# common_commit..HEAD and any locally staged files
allFiles = __gitdiff("--name-only",
"--ignore-submodules",
"--staged",
f"{common_commit}").splitlines()
else:
# Fallback to just uncommitted files
allFiles = uncommittedFiles()
files = []
for f in allFiles:
if pathFilter is None or pathFilter(f):
files.append(f)
filesToCheckString = "\n\t".join(files) if files else "<None>"
print(f" [DEBUG] Found files to check:\n\t{filesToCheckString}\n")
return files
def listAllFilesInDir(folder):
"""Utility function to list all files/subdirs in the input folder"""
allFiles = []
for root, dirs, files in os.walk(folder):
for name in files:
allFiles.append(os.path.join(root, name))
return allFiles
def listFilesToCheck(filesDirs, filter=None):
"""
Utility function to filter the input list of files/dirs based on the input
filter method and returns all the files that need to be checked
"""
allFiles = []
for f in filesDirs:
if os.path.isfile(f):
if filter is None or filter(f):
allFiles.append(f)
elif os.path.isdir(f):
files = listAllFilesInDir(f)
for f_ in files:
if filter is None or filter(f_):
allFiles.append(f_)
return allFiles
| 0 |
rapidsai_public_repos/raft/cpp | rapidsai_public_repos/raft/cpp/scripts/run-clang-compile.py | # Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# IMPORTANT DISCLAIMER: #
# This file is experimental and may not run successfully on the entire repo! #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
from __future__ import print_function
import argparse
import json
import multiprocessing as mp
import os
import re
import shutil
import subprocess
CMAKE_COMPILER_REGEX = re.compile(
r"^\s*CMAKE_CXX_COMPILER:FILEPATH=(.+)\s*$", re.MULTILINE)
CLANG_COMPILER = "clang++"
GPU_ARCH_REGEX = re.compile(r"sm_(\d+)")
SPACES = re.compile(r"\s+")
XCOMPILER_FLAG = re.compile(r"-((Xcompiler)|(-compiler-options))=?")
XPTXAS_FLAG = re.compile(r"-((Xptxas)|(-ptxas-options))=?")
# any options that may have equal signs in nvcc but not in clang
# add those options here if you find any
OPTIONS_NO_EQUAL_SIGN = ['-isystem']
SEPARATOR = "-" * 8
END_SEPARATOR = "*" * 64
def parse_args():
argparser = argparse.ArgumentParser("Runs clang++ on a project instead of nvcc")
argparser.add_argument(
"-cdb", type=str, default="compile_commands.json",
help="Path to cmake-generated compilation database")
argparser.add_argument(
"-ignore", type=str, default=None,
help="Regex used to ignore files from checking")
argparser.add_argument(
"-select", type=str, default=None,
help="Regex used to select files for checking")
argparser.add_argument(
"-j", type=int, default=-1, help="Number of parallel jobs to launch.")
argparser.add_argument(
"-build_dir", type=str, default=None,
help="Directory from which compile commands should be called. "
"By default, directory of compile_commands.json file.")
args = argparser.parse_args()
if args.j <= 0:
args.j = mp.cpu_count()
args.ignore_compiled = re.compile(args.ignore) if args.ignore else None
args.select_compiled = re.compile(args.select) if args.select else None
# we don't check clang's version, it should be OK with any clang
# recent enough to handle CUDA >= 11
if not os.path.exists(args.cdb):
raise Exception("Compilation database '%s' missing" % args.cdb)
if args.build_dir is None:
args.build_dir = os.path.dirname(args.cdb)
return args
def get_gcc_root(build_dir):
# first try to determine GCC based on CMakeCache
cmake_cache = os.path.join(build_dir, "CMakeCache.txt")
if os.path.isfile(cmake_cache):
with open(cmake_cache) as f:
content = f.read()
match = CMAKE_COMPILER_REGEX.search(content)
if match:
return os.path.dirname(os.path.dirname(match.group(1)))
# first fall-back to CONDA prefix if we have a build sysroot there
conda_prefix = os.environ.get("CONDA_PREFIX", "")
conda_sysroot = os.environ.get("CONDA_BUILD_SYSROOT", "")
if conda_prefix and conda_sysroot:
return conda_prefix
# second fall-back to default g++ install
default_gxx = shutil.which("g++")
if default_gxx:
return os.path.dirname(os.path.dirname(default_gxx))
raise Exception("Cannot find any g++ install on the system.")
def list_all_cmds(cdb):
with open(cdb, "r") as fp:
return json.load(fp)
def get_gpu_archs(command):
# clang only accepts a single architecture, so first determine the lowest
archs = []
for loc in range(len(command)):
if (command[loc] != "-gencode" and command[loc] != "--generate-code"
and not command[loc].startswith("--generate-code=")):
continue
if command[loc].startswith("--generate-code="):
arch_flag = command[loc][len("--generate-code="):]
else:
arch_flag = command[loc + 1]
match = GPU_ARCH_REGEX.search(arch_flag)
if match is not None:
archs.append(int(match.group(1)))
return ["--cuda-gpu-arch=sm_%d" % min(archs)]
def get_index(arr, item_options):
return set(i for i, s in enumerate(arr) for item in item_options
if s == item)
def remove_items(arr, item_options):
for i in sorted(get_index(arr, item_options), reverse=True):
del arr[i]
def remove_items_plus_one(arr, item_options):
for i in sorted(get_index(arr, item_options), reverse=True):
if i < len(arr) - 1:
del arr[i + 1]
del arr[i]
idx = set(i for i, s in enumerate(arr) for item in item_options
if s.startswith(item + "="))
for i in sorted(idx, reverse=True):
del arr[i]
def add_cuda_path(command, nvcc):
nvcc_path = shutil.which(nvcc)
if not nvcc_path:
raise Exception("Command %s has invalid compiler %s" % (command, nvcc))
cuda_root = os.path.dirname(os.path.dirname(nvcc_path))
command.append('--cuda-path=%s' % cuda_root)
def get_clang_args(cmd, build_dir):
command, file = cmd["command"], cmd["file"]
is_cuda = file.endswith(".cu")
command = re.split(SPACES, command)
# get original compiler
cc_orig = command[0]
# compiler is always clang++!
command[0] = "clang++"
# remove compilation and output targets from the original command
remove_items_plus_one(command, ["--compile", "-c"])
remove_items_plus_one(command, ["--output-file", "-o"])
if is_cuda:
# replace nvcc's "-gencode ..." with clang's "--cuda-gpu-arch ..."
archs = get_gpu_archs(command)
command.extend(archs)
# provide proper cuda path to clang
add_cuda_path(command, cc_orig)
# remove all kinds of nvcc flags clang doesn't know about
remove_items_plus_one(command, [
"--generate-code",
"-gencode",
"--x",
"-x",
"--compiler-bindir",
"-ccbin",
"--diag_suppress",
"-diag-suppress",
"--default-stream",
"-default-stream",
])
remove_items(command, [
"-extended-lambda",
"--extended-lambda",
"-expt-extended-lambda",
"--expt-extended-lambda",
"-expt-relaxed-constexpr",
"--expt-relaxed-constexpr",
"--device-debug",
"-G",
"--generate-line-info",
"-lineinfo",
])
# "-x cuda" is the right usage in clang
command.extend(["-x", "cuda"])
# we remove -Xcompiler flags: here we basically have to hope for the
# best that clang++ will accept any flags which nvcc passed to gcc
for i, c in reversed(list(enumerate(command))):
new_c = XCOMPILER_FLAG.sub('', c)
if new_c == c:
continue
command[i:i + 1] = new_c.split(',')
# we also change -Xptxas to -Xcuda-ptxas, always adding space here
for i, c in reversed(list(enumerate(command))):
if XPTXAS_FLAG.search(c):
if not c.endswith("=") and i < len(command) - 1:
del command[i + 1]
command[i] = '-Xcuda-ptxas'
command.insert(i + 1, XPTXAS_FLAG.sub('', c))
# several options like isystem don't expect `=`
for opt in OPTIONS_NO_EQUAL_SIGN:
opt_eq = opt + '='
# make sure that we iterate from back to front here for insert
for i, c in reversed(list(enumerate(command))):
if not c.startswith(opt_eq):
continue
x = c.split('=')
# we only care about the first `=`
command[i] = x[0]
command.insert(i + 1, '='.join(x[1:]))
# use extensible whole program, to avoid ptx resolution/linking
command.extend(["-Xcuda-ptxas", "-ewp"])
# for libcudacxx, we need to allow variadic functions
command.extend(["-Xclang", "-fcuda-allow-variadic-functions"])
# add some additional CUDA intrinsics
cuda_intrinsics_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"__clang_cuda_additional_intrinsics.h")
command.extend(["-include", cuda_intrinsics_file])
# somehow this option gets onto the commandline, it is unrecognized by clang
remove_items(command, [
"--forward-unknown-to-host-compiler",
"-forward-unknown-to-host-compiler"
])
# do not treat warnings as errors here !
for i, x in reversed(list(enumerate(command))):
if x.startswith("-Werror"):
del command[i]
# try to figure out which GCC CMAKE used, and tell clang all about it
command.append("--gcc-toolchain=%s" % get_gcc_root(build_dir))
return command
def run_clang_command(clang_cmd, cwd):
cmd = " ".join(clang_cmd)
result = subprocess.run(cmd, check=False, shell=True, cwd=cwd,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result.stdout = result.stdout.decode("utf-8").strip()
out = "CMD: " + cmd + "\n"
out += "CWD: " + cwd + "\n"
out += "EXIT-CODE: %d\n" % result.returncode
status = result.returncode == 0
out += result.stdout
return status, out
class LockContext(object):
def __init__(self, lock=None) -> None:
self._lock = lock
def __enter__(self):
if self._lock:
self._lock.acquire()
return self
def __exit__(self, _, __, ___):
if self._lock:
self._lock.release()
return False # we don't handle exceptions
def print_result(passed, stdout, file):
status_str = "PASSED" if passed else "FAILED"
print("%s File:%s %s %s" % (SEPARATOR, file, status_str, SEPARATOR))
if not passed and stdout:
print(stdout)
print("%s\n" % END_SEPARATOR)
def run_clang(cmd, args):
command = get_clang_args(cmd, args.build_dir)
# compile only and dump output to /dev/null
command.extend(["-c", cmd["file"], "-o", os.devnull])
status, out = run_clang_command(command, args.build_dir)
# we immediately print the result since this is more interactive for user
with lock:
print_result(status, out, cmd["file"])
return status
# mostly used for debugging purposes
def run_sequential(args, all_files):
# lock must be defined as in `run_parallel`
global lock
lock = LockContext()
results = []
for cmd in all_files:
# skip files that we don't want to look at
if args.ignore_compiled is not None and \
re.search(args.ignore_compiled, cmd["file"]) is not None:
continue
if args.select_compiled is not None and \
re.search(args.select_compiled, cmd["file"]) is None:
continue
results.append(run_clang(cmd, args))
return all(results)
def copy_lock(init_lock):
# this is required to pass locks to pool workers
# see https://stackoverflow.com/questions/25557686/
# python-sharing-a-lock-between-processes
global lock
lock = init_lock
def run_parallel(args, all_files):
init_lock = LockContext(mp.Lock())
pool = mp.Pool(args.j, initializer=copy_lock, initargs=(init_lock,))
results = []
for cmd in all_files:
# skip files that we don't want to look at
if args.ignore_compiled is not None and \
re.search(args.ignore_compiled, cmd["file"]) is not None:
continue
if args.select_compiled is not None and \
re.search(args.select_compiled, cmd["file"]) is None:
continue
results.append(pool.apply_async(run_clang, args=(cmd, args)))
results_final = [r.get() for r in results]
pool.close()
pool.join()
return all(results_final)
def main():
args = parse_args()
all_files = list_all_cmds(args.cdb)
# ensure that we use only the real paths
for cmd in all_files:
cmd["file"] = os.path.realpath(os.path.expanduser(cmd["file"]))
if args.j == 1:
status = run_sequential(args, all_files)
else:
status = run_parallel(args, all_files)
if not status:
raise Exception("clang++ failed! Refer to the errors above.")
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/raft/cpp | rapidsai_public_repos/raft/cpp/scripts/run-cmake-format.sh | #!/bin/bash
# Copyright (c) 2022, NVIDIA CORPORATION.
# This script is a wrapper for cmakelang that may be used with pre-commit. The
# wrapping is necessary because RAPIDS libraries split configuration for
# cmakelang linters between a local config file and a second config file that's
# shared across all of RAPIDS via rapids-cmake. In order to keep it up to date
# this file is only maintained in one place (the rapids-cmake repo) and
# pulled down during builds. We need a way to invoke CMake linting commands
# without causing pre-commit failures (which could block local commits or CI),
# while also being sufficiently flexible to allow users to maintain the config
# file independently of a build directory.
#
# This script provides the minimal functionality to enable those use cases. It
# searches in a number of predefined locations for the rapids-cmake config file
# and exits gracefully if the file is not found. If a user wishes to specify a
# config file at a nonstandard location, they may do so by setting the
# environment variable RAPIDS_CMAKE_FORMAT_FILE.
#
# This script can be invoked directly anywhere within the project repository.
# Alternatively, it may be invoked as a pre-commit hook via
# `pre-commit run (cmake-format)|(cmake-lint)`.
#
# Usage:
# bash run-cmake-format.sh {cmake-format,cmake-lint} infile [infile ...]
status=0
if [ -z ${RAFT_ROOT:+PLACEHOLDER} ]; then
RAFT_BUILD_DIR=$(git rev-parse --show-toplevel 2>&1)/cpp/build
status=$?
else
RAFT_BUILD_DIR=${RAFT_ROOT}
fi
if ! [ ${status} -eq 0 ]; then
if [[ ${RAFT_BUILD_DIR} == *"not a git repository"* ]]; then
echo "This script must be run inside the raft repository, or the RAFT_ROOT environment variable must be set."
else
echo "Script failed with unknown error attempting to determine project root:"
echo ${RAFT_BUILD_DIR}
fi
exit 1
fi
DEFAULT_FORMAT_FILE_LOCATIONS=(
"${RAFT_BUILD_DIR:-${HOME}}/_deps/rapids-cmake-src/cmake-format-rapids-cmake.json"
)
if [ -z ${RAPIDS_CMAKE_FORMAT_FILE:+PLACEHOLDER} ]; then
for file_path in ${DEFAULT_FORMAT_FILE_LOCATIONS[@]}; do
if [ -f ${file_path} ]; then
RAPIDS_CMAKE_FORMAT_FILE=${file_path}
break
fi
done
fi
if [ -z ${RAPIDS_CMAKE_FORMAT_FILE:+PLACEHOLDER} ]; then
echo "The rapids-cmake cmake-format configuration file was not found at any of the default search locations: "
echo ""
( IFS=$'\n'; echo "${DEFAULT_FORMAT_FILE_LOCATIONS[*]}" )
echo ""
echo "Try setting the environment variable RAPIDS_CMAKE_FORMAT_FILE to the path to the config file."
exit 0
else
echo "Using format file ${RAPIDS_CMAKE_FORMAT_FILE}"
fi
if [[ $1 == "cmake-format" ]]; then
cmake-format -i --config-files cpp/cmake/config.json ${RAPIDS_CMAKE_FORMAT_FILE} -- ${@:2}
elif [[ $1 == "cmake-lint" ]]; then
# Since the pre-commit hook is verbose, we have to be careful to only
# present cmake-lint's output (which is quite verbose) if we actually
# observe a failure.
OUTPUT=$(cmake-lint --config-files cpp/cmake/config.json ${RAPIDS_CMAKE_FORMAT_FILE} -- ${@:2})
status=$?
if ! [ ${status} -eq 0 ]; then
echo "${OUTPUT}"
fi
exit ${status}
fi
| 0 |
rapidsai_public_repos/raft/cpp | rapidsai_public_repos/raft/cpp/scripts/run-clang-tidy.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# IMPORTANT DISCLAIMER: #
# This file is experimental and may not run successfully on the entire repo! #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
from __future__ import print_function
import argparse
import json
import multiprocessing as mp
import os
import re
import shutil
import subprocess
EXPECTED_VERSIONS = ("16.0.6",)
VERSION_REGEX = re.compile(r"clang version ([0-9.]+)")
CMAKE_COMPILER_REGEX = re.compile(
r"^\s*CMAKE_CXX_COMPILER:FILEPATH=(.+)\s*$", re.MULTILINE)
CLANG_COMPILER = "clang++"
GPU_ARCH_REGEX = re.compile(r"sm_(\d+)")
SPACES = re.compile(r"\s+")
XCOMPILER_FLAG = re.compile(r"-((Xcompiler)|(-compiler-options))=?")
XPTXAS_FLAG = re.compile(r"-((Xptxas)|(-ptxas-options))=?")
# any options that may have equal signs in nvcc but not in clang
# add those options here if you find any
OPTIONS_NO_EQUAL_SIGN = ['-isystem']
SEPARATOR = "-" * 8
END_SEPARATOR = "*" * 64
def parse_args():
argparser = argparse.ArgumentParser("Runs clang-tidy on a project")
argparser.add_argument(
"-cdb", type=str, default="compile_commands.json",
help="Path to cmake-generated compilation database")
argparser.add_argument(
"-exe", type=str, default="clang-tidy", help="Path to clang-tidy exe")
argparser.add_argument(
"-ignore", type=str, default=None,
help="Regex used to ignore files from checking")
argparser.add_argument(
"-select", type=str, default=None,
help="Regex used to select files for checking")
argparser.add_argument(
"-j", type=int, default=-1, help="Number of parallel jobs to launch.")
argparser.add_argument(
"-root", type=str, default=None,
help="Repo root path to filter headers correctly, CWD by default.")
argparser.add_argument(
"-thrust_dir", type=str, default=None,
help="Pass the directory to a THRUST git repo recent enough for clang.")
argparser.add_argument(
"-build_dir", type=str, default=None,
help="Directory from which compile commands should be called. "
"By default, directory of compile_commands.json file.")
args = argparser.parse_args()
if args.j <= 0:
args.j = mp.cpu_count()
args.ignore_compiled = re.compile(args.ignore) if args.ignore else None
args.select_compiled = re.compile(args.select) if args.select else None
# we check clang's version so that it will work in CI
ret = subprocess.check_output("%s --version" % CLANG_COMPILER, shell=True)
ret = ret.decode("utf-8")
version = VERSION_REGEX.match(ret)
if version is None:
raise Exception("Failed to figure out clang compiler version!")
version = version.group(1)
if version not in EXPECTED_VERSIONS:
raise Exception("clang compiler version must be in %s found '%s'" %
(EXPECTED_VERSIONS, version))
if not os.path.exists(args.cdb):
raise Exception("Compilation database '%s' missing" % args.cdb)
# we assume that this script is run from repo root
if args.root is None:
args.root = os.getcwd()
args.root = os.path.realpath(os.path.expanduser(args.root))
# we need to have a recent enough cub version for clang to compile
if args.thrust_dir is None:
args.thrust_dir = os.path.join(
os.path.dirname(args.cdb), "thrust_1.15", "src", "thrust_1.15")
if args.build_dir is None:
args.build_dir = os.path.dirname(args.cdb)
if not os.path.isdir(args.thrust_dir):
raise Exception("Cannot find custom thrust dir '%s" % args.thrust_dir)
return args
def get_gcc_root(args):
# first try to determine GCC based on CMakeCache
cmake_cache = os.path.join(args.build_dir, "CMakeCache.txt")
if os.path.isfile(cmake_cache):
with open(cmake_cache) as f:
content = f.read()
match = CMAKE_COMPILER_REGEX.search(content)
if match:
return os.path.dirname(os.path.dirname(match.group(1)))
# first fall-back to CONDA prefix if we have a build sysroot there
conda_prefix = os.environ.get("CONDA_PREFIX", "")
conda_sysroot = os.environ.get("CONDA_BUILD_SYSROOT", "")
if conda_prefix and conda_sysroot:
return conda_prefix
# second fall-back to default g++ install
default_gxx = shutil.which("g++")
if default_gxx:
return os.path.dirname(os.path.dirname(default_gxx))
raise Exception("Cannot find any g++ install on the system.")
def list_all_cmds(cdb):
with open(cdb, "r") as fp:
return json.load(fp)
def get_gpu_archs(command):
# clang only accepts a single architecture, so first determine the lowest
archs = []
for loc in range(len(command)):
if (command[loc] != "-gencode" and command[loc] != "--generate-code"
and not command[loc].startswith("--generate-code=")):
continue
if command[loc].startswith("--generate-code="):
arch_flag = command[loc][len("--generate-code="):]
else:
arch_flag = command[loc + 1]
match = GPU_ARCH_REGEX.search(arch_flag)
if match is not None:
archs.append(int(match.group(1)))
return ["--cuda-gpu-arch=sm_%d" % min(archs)]
def get_index(arr, item_options):
return set(i for i, s in enumerate(arr) for item in item_options
if s == item)
def remove_items(arr, item_options):
for i in sorted(get_index(arr, item_options), reverse=True):
del arr[i]
def remove_items_plus_one(arr, item_options):
for i in sorted(get_index(arr, item_options), reverse=True):
if i < len(arr) - 1:
del arr[i + 1]
del arr[i]
idx = set(i for i, s in enumerate(arr) for item in item_options
if s.startswith(item + "="))
for i in sorted(idx, reverse=True):
del arr[i]
def add_cuda_path(command, nvcc):
nvcc_path = shutil.which(nvcc)
if not nvcc_path:
raise Exception("Command %s has invalid compiler %s" % (command, nvcc))
cuda_root = os.path.dirname(os.path.dirname(nvcc_path))
command.append('--cuda-path=%s' % cuda_root)
def get_tidy_args(cmd, args):
command, file = cmd["command"], cmd["file"]
is_cuda = file.endswith(".cu")
command = re.split(SPACES, command)
# get original compiler
cc_orig = command[0]
# compiler is always clang++!
command[0] = "clang++"
# remove compilation and output targets from the original command
remove_items_plus_one(command, ["--compile", "-c"])
remove_items_plus_one(command, ["--output-file", "-o"])
if is_cuda:
# include our own cub before anything else
# (left-most should have highest priority)
command.insert(1, "-I%s" % args.thrust_dir)
# replace nvcc's "-gencode ..." with clang's "--cuda-gpu-arch ..."
archs = get_gpu_archs(command)
command.extend(archs)
# provide proper cuda path to clang
add_cuda_path(command, cc_orig)
# remove all kinds of nvcc flags clang doesn't know about
remove_items_plus_one(command, [
"--generate-code",
"-gencode",
"--x",
"-x",
"--compiler-bindir",
"-ccbin",
"--diag_suppress",
"-diag-suppress",
"--default-stream",
"-default-stream",
])
remove_items(command, [
"-extended-lambda",
"--extended-lambda",
"-expt-extended-lambda",
"--expt-extended-lambda",
"-expt-relaxed-constexpr",
"--expt-relaxed-constexpr",
"--device-debug",
"-G",
"--generate-line-info",
"-lineinfo",
])
# "-x cuda" is the right usage in clang
command.extend(["-x", "cuda"])
# we remove -Xcompiler flags: here we basically have to hope for the
# best that clang++ will accept any flags which nvcc passed to gcc
for i, c in reversed(list(enumerate(command))):
new_c = XCOMPILER_FLAG.sub('', c)
if new_c == c:
continue
command[i:i + 1] = new_c.split(',')
# we also change -Xptxas to -Xcuda-ptxas, always adding space here
for i, c in reversed(list(enumerate(command))):
if XPTXAS_FLAG.search(c):
if not c.endswith("=") and i < len(command) - 1:
del command[i + 1]
command[i] = '-Xcuda-ptxas'
command.insert(i + 1, XPTXAS_FLAG.sub('', c))
# several options like isystem don't expect `=`
for opt in OPTIONS_NO_EQUAL_SIGN:
opt_eq = opt + '='
# make sure that we iterate from back to front here for insert
for i, c in reversed(list(enumerate(command))):
if not c.startswith(opt_eq):
continue
x = c.split('=')
# we only care about the first `=`
command[i] = x[0]
command.insert(i + 1, '='.join(x[1:]))
# use extensible whole program, to avoid ptx resolution/linking
command.extend(["-Xcuda-ptxas", "-ewp"])
# for libcudacxx, we need to allow variadic functions
command.extend(["-Xclang", "-fcuda-allow-variadic-functions"])
# add some additional CUDA intrinsics
cuda_intrinsics_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"__clang_cuda_additional_intrinsics.h")
command.extend(["-include", cuda_intrinsics_file])
# somehow this option gets onto the commandline, it is unrecognized by tidy
remove_items(command, [
"--forward-unknown-to-host-compiler",
"-forward-unknown-to-host-compiler"
])
# do not treat warnings as errors here !
for i, x in reversed(list(enumerate(command))):
if x.startswith("-Werror"):
del command[i]
# try to figure out which GCC CMAKE used, and tell clang all about it
command.append("--gcc-toolchain=%s" % get_gcc_root(args))
return command, is_cuda
def check_output_for_errors(output):
# there shouldn't really be any allowed errors
warnings_found = 0
errors = []
for line in output.splitlines():
if line.find("error:") >= 0:
errors.append(line)
if line.find("warning:") >= 0:
warnings_found += 1
return warnings_found, errors
def run_clang_tidy_command(tidy_cmd, cwd):
cmd = " ".join(tidy_cmd)
result = subprocess.run(cmd, check=False, shell=True, cwd=cwd,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result.stdout = result.stdout.decode("utf-8").strip()
out = "CMD: " + cmd + "\n"
out += "EXIT-CODE: %d\n" % result.returncode
n_warnings, errors = check_output_for_errors(result.stdout)
status = n_warnings == 0 and not errors
out += result.stdout
return status, out, errors
class LockContext(object):
def __init__(self, lock=None) -> None:
self._lock = lock
def __enter__(self):
if self._lock:
self._lock.acquire()
return self
def __exit__(self, _, __, ___):
if self._lock:
self._lock.release()
return False # we don't handle exceptions
def print_result(passed, stdout, file, errors):
if any(errors):
raise Exception(
"File %s: got %d errors:\n%s" % (file, len(errors), stdout))
status_str = "PASSED" if passed else "FAILED"
print("%s File:%s %s %s" % (SEPARATOR, file, status_str, SEPARATOR))
if not passed and stdout:
print(stdout)
print("%s\n" % END_SEPARATOR)
return stdout.splitlines()
return []
def run_clang_tidy(cmd, args):
command, is_cuda = get_tidy_args(cmd, args)
header_path_any = os.path.join(os.path.basename(args.root), "cpp", ".*")
header_filter = "-header-filter='.*%s[.](cuh|h|hpp)$'" % header_path_any
tidy_cmd = [args.exe, header_filter, cmd["file"], "--"]
tidy_cmd.extend(command)
status = True
out = ""
if is_cuda:
tidy_cmd.append("--cuda-device-only")
tidy_cmd.append(cmd["file"])
ret, out1, errors1 = run_clang_tidy_command(tidy_cmd, args.build_dir)
out += out1
out += "\n%s\n" % SEPARATOR
status = status and ret
tidy_cmd[-2] = "--cuda-host-only"
ret, out1, errors2 = run_clang_tidy_command(tidy_cmd, args.build_dir)
status = status and ret
out += out1
errors = errors1 + errors2
else:
tidy_cmd.append(cmd["file"])
ret, out1, errors = run_clang_tidy_command(tidy_cmd, args.build_dir)
status = status and ret
out += out1
# we immediately print the result since this is more interactive for user
with lock:
lines = print_result(status, out, cmd["file"], errors)
return status, lines
def parse_results(results):
return all(r[0] for r in results), [s for r in results for s in r[1]]
# mostly used for debugging purposes
def run_sequential(args, all_files):
# lock must be defined as in `run_parallel`
global lock
lock = LockContext()
results = []
# actual tidy checker
for cmd in all_files:
# skip files that we don't want to look at
if args.ignore_compiled is not None and \
re.search(args.ignore_compiled, cmd["file"]) is not None:
continue
if args.select_compiled is not None and \
re.search(args.select_compiled, cmd["file"]) is None:
continue
results.append(run_clang_tidy(cmd, args))
return parse_results(results)
def copy_lock(init_lock):
# this is required to pass locks to pool workers
# see https://stackoverflow.com/questions/25557686/
# python-sharing-a-lock-between-processes
global lock
lock = init_lock
def run_parallel(args, all_files):
init_lock = LockContext(mp.Lock())
pool = mp.Pool(args.j, initializer=copy_lock, initargs=(init_lock,))
results = []
# actual tidy checker
for cmd in all_files:
# skip files that we don't want to look at
if args.ignore_compiled is not None and \
re.search(args.ignore_compiled, cmd["file"]) is not None:
continue
if args.select_compiled is not None and \
re.search(args.select_compiled, cmd["file"]) is None:
continue
results.append(pool.apply_async(run_clang_tidy, args=(cmd, args)))
results_final = [r.get() for r in results]
pool.close()
pool.join()
return parse_results(results_final)
def main():
args = parse_args()
# Attempt to making sure that we run this script from root of repo always
if not os.path.exists(".git"):
raise Exception("This needs to always be run from the root of repo")
all_files = list_all_cmds(args.cdb)
# ensure that we use only the real paths
for cmd in all_files:
cmd["file"] = os.path.realpath(os.path.expanduser(cmd["file"]))
if args.j == 1:
status, lines = run_sequential(args, all_files)
else:
status, lines = run_parallel(args, all_files)
if not status:
# first get a list of all checks that were run
ret = subprocess.check_output(args.exe + " --list-checks", shell=True)
ret = ret.decode("utf-8")
checks = [line.strip() for line in ret.splitlines()
if line.startswith(' ' * 4)]
max_check_len = max(len(c) for c in checks)
check_counts = dict()
content = os.linesep.join(lines)
for check in checks:
check_counts[check] = content.count(check)
sorted_counts = sorted(
check_counts.items(), key=lambda x: x[1], reverse=True)
print("Failed {} check(s) in total. Counts as per below:".format(
sum(1 for _, count in sorted_counts if count > 0)))
for check, count in sorted_counts:
if count <= 0:
break
n_space = max_check_len - len(check) + 4
print("{}:{}{}".format(check, ' ' * n_space, count))
raise Exception("clang-tidy failed! Refer to the errors above.")
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/raft/cpp/scripts/heuristics | rapidsai_public_repos/raft/cpp/scripts/heuristics/select_k/generate_plots.ipynb | from collections import defaultdict
import pandas as pd
import json
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme()from select_k_dataset import load_dataframe, get_dataset
df = load_dataframe("select_k_times.json")
df = df[(df.use_memory_pool == True)]
df = df[(df.index_type == 'int64_t') & (df.key_type == 'float')]
dfdef generate_plot(df, x_axis="col", title=""):
fig, ax = plt.subplots()
for algo in sorted(set(df.algo)):
current = df[(df.algo == algo) & (df.time < np.inf)]
ax.plot(current[x_axis], current["time"], label=algo)
ax.set_xscale('log', base=2)
ax.set_yscale('log', base=2)
ax.set_xlabel(x_axis)
ax.set_ylabel("time(s)")
ax.set_title(title)
fig.set_dpi(200)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=4)
# fig.legend()
plt.show()
def generate_k_plot(df, col, row):
return generate_plot(df[(df.col == col) & (df.row == row)], "k", f"#cols={col}, #rows={row}")
def generate_col_plot(df, row, k):
return generate_plot(df[(df.row == row) & (df.k == k)], "col", f"#rows={row}, k={k}")
def generate_row_plot(df, col, k):
return generate_plot(df[(df.col == col) & (df.k == k)], "row", f"#cols={col}, k={k}")generate_col_plot(df, 256, 32)generate_k_plot(df, 2**20, 256) | 0 |
rapidsai_public_repos/raft/cpp/scripts/heuristics | rapidsai_public_repos/raft/cpp/scripts/heuristics/select_k/generate_heuristic.ipynb | from select_k_dataset import load_dataframe, get_dataset
import sklearn.tree
import numpy as np# load up the timings from the MATRIX_BENCH script into a pandas dataframe
df = load_dataframe("select_k_times.json")
# we're limiting down to 3 different select_k methods - chosen by
# the 'algorithm_selection.ipynb' script here
df = df[df.algo.isin(['kWarpImmediate', 'kRadix11bitsExtraPass', 'kRadix11bits', 'kWarpDistributedShm'])]
# we're also assuming we have a memory pool for now
df = df[(df.use_memory_pool == True)]
# df = df[(df.index_type == 'int64_t') & (df.key_type == 'float')]
df# break down into a train/set set
X, y, weights = get_dataset(df)
train_test_sets = sklearn.model_selection.train_test_split(X, y, weights, test_size=0.15, random_state=1)
X_train, X_test, y_train, y_test, weights_train, weights_test = train_test_sets
X_train.shape, X_test.shapemodel = sklearn.tree.DecisionTreeClassifier(max_depth=4, max_leaf_nodes=8)
model.fit(X_train, y_train) #, weights_train)model.score(X_train, y_train, weights_train)model.score(X_test, y_test, weights_test)# print(sklearn.tree.export_text(model, feature_names=["k", "rows", "cols"]))import matplotlib.pyplot as plt
plt.figure(figsize=(12,12))
viz = sklearn.tree.plot_tree(model, fontsize=8, class_names=list(model.classes_), feature_names=["k", "rows", "cols", "use_memory_pool"], impurity=True)def convert_model_to_code(model):
classes = model.classes_
tree = model.tree_
feature_names = ["k", "rows", "cols", "use_memory_pool"]
def _get_label(nodeid):
""" returns the most frequent class name for the node """
return classes[np.argsort(tree.value[nodeid, 0])[-1]]
def _is_leaf_node(nodeid):
""" returns whether or not the node is a leaf node in the tree"""
# negative values here indicate we're a leaf
if tree.feature[nodeid] < 0:
return True
# some nodes have both branches with the same label, combine those
left, right = tree.children_left[nodeid], tree.children_right[nodeid]
if (_is_leaf_node(left) and
_is_leaf_node(right) and
_get_label(left) == _get_label(right)):
return True
return False
code = []
def _convert_node(nodeid, indent):
if _is_leaf_node(nodeid):
# we're a leaf node, just output the label of the most frequent algorithm
class_name = _get_label(nodeid)
code.append(" " * indent + f"return Algo::{class_name};")
else:
feature = feature_names[tree.feature[nodeid]]
threshold = int(np.floor(tree.threshold[nodeid]))
code.append(" " * indent + f"if ({feature} > {threshold}) " + "{")
_convert_node(tree.children_right[nodeid], indent + 2)
code.append(" " * indent + "} else {")
_convert_node(tree.children_left[nodeid], indent + 2)
code.append(" " * indent + "}")
code.append("inline Algo choose_select_k_algorithm(size_t rows, size_t cols, int k)")
code.append("{")
_convert_node(0, indent=2)
code.append("}")
return "\n".join(code)
code = convert_model_to_code(model)
print(code)# also update the source code in raft/matrix/detail/select_k.cuh
import pathlib
select_k_path = pathlib.Path.cwd() / ".." / ".." / ".." / "include" / "raft" / "matrix" / "detail" / "select_k-inl.cuh"
source_lines = open(select_k_path.resolve()).read().split("\n")
# figure out the location of the code snippet in the file, and splice it in
code_lines = code.split("\n")
first_line = source_lines.index(code_lines[0])
last_line = source_lines.index(code_lines[-1], first_line)
new_source = source_lines[:first_line] + code_lines + source_lines[last_line+1:]
open(select_k_path.resolve(), "w").write("\n".join(new_source)) | 0 |
rapidsai_public_repos/raft/cpp/scripts/heuristics | rapidsai_public_repos/raft/cpp/scripts/heuristics/select_k/algorithm_selection.ipynb | from select_k_dataset import load_dataframe, get_dataset
df = load_dataframe("select_k_times.json")
dffrom collections import Counter
def rank_algos(df, use_relative_speedup=False):
_, y, weights = get_dataset(df)
times = Counter()
for algo, speedup in zip(y, weights):
times[algo] += speedup if use_relative_speedup else 1
return sorted(times.items(), key=lambda x:-x[-1])# show the number of times each algorithm is fastest for a given k/# of rows/# of cols / dtype / memory pool etc
rank_algos(df)# kRadix8bits seems to have a performance issue with 64 bit index types, it is one
# of the worst performing algorithms for 64bit indices, but one of the top 3 for 32 bit
rank_algos(df[df.index_type == "int64_t"])rank_algos(df[df.index_type == "uint32_t"])# do an algorithm selection pass, repeatedly remove the lowest performing algorithm
#
# The idea here is that we can simplify the decision logic, reduce the binary size
# and speed up the compilation time by only including a subset of selection algorithms.
# we're aiming to get algorithms that perform well in different situations, and complement
# each other - so to do this, we're iteratively removing the worst performing algorithm,
# after which algorithms are re-evaluated on their speedups relative to the remaining
# algorithms. This gets us a minimum spanning set of selection algorithms that performs
# well over diverse inputs.
#
# note: the lowest performing algorithm here might actually be pretty good, but
# just not provide much benefit over another similar algorithm.
# As an example, kWarpDistributed is an excellent selection algorithm, but in testing
# kWarpDistributedShm is slightly faster than it in situations where it does well,
# meaning that it gets removed early on in this loop
current = df[df.use_memory_pool == True]
algos = set(df.algo)
# we're arbitrarily getting this down to 3 selection algorithms
while len(algos) > 4:
times = rank_algos(current, use_relative_speedup=False)
algo, speedup = times[-1]
algos.remove(algo)
current = df[df.algo.isin(algos)]
print("selected", algos)
rank_algos(current)# experimenting with different subsets of index type / dtype / use memory seems
# to pretty consistently show that kRadix11bits / kWarpDistributedShm / kFaissBlockSelect
# all get selected here | 0 |
rapidsai_public_repos/raft/cpp/scripts/heuristics | rapidsai_public_repos/raft/cpp/scripts/heuristics/select_k/select_k_dataset.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import defaultdict
import json
import pandas as pd
import numpy as np
def load_dataframe(filename):
"""Loads up the select_k benchmark times as a pandas dataframe
This loads up the timings from the MATRIX_BENCH script into a pandas dataframe
This file is generated by running:
./cpp/build/MATRIX_BENCH --benchmark_filter=SelectKDataset \
--benchmark_out_format=json \
--benchmark_out=select_k_times.json \
--select_k_dataset
Note running these MATRIX_BENCH tests takes over 24 hours right now
"""
benchmarks = json.load(open(filename))["benchmarks"]
df = pd.DataFrame(benchmarks, columns=["real_time", "run_name"])
run_info = [
run[1:4] + list(map(int, run[4:9]))
for run in df.run_name.str.split("/").tolist()
]
df[
[
"key_type",
"index_type",
"algo",
"row",
"col",
"k",
"use_index_input",
"use_memory_pool",
]
] = pd.DataFrame(run_info, index=df.index)
df["time"] = df["real_time"] / 1000
df = df.drop(["run_name", "real_time"], axis=1)
df = df.sort_values(
by=[
"k",
"row",
"col",
"key_type",
"index_type",
"use_index_input",
"use_memory_pool",
]
)
df = df.reset_index(drop=True)
return df
def get_dataset(df):
"""Returns the training features, labels and sample weights from a dataframe"""
# group the dataframe by the input features
feature_algo_time = defaultdict(list)
for row in df.itertuples():
feature_algo_time[
(
row.k,
row.row,
row.col,
row.use_memory_pool,
row.key_type,
row.index_type,
)
].append((row.algo, row.time))
# get the features (x), labels (y) and sample_weights from the grouped times
X, y, weights = [], [], []
for feature, algo_times in feature_algo_time.items():
# we can't yet handle the dtype values in training, remove
feature = feature[:-2]
# figure out the fastest algorithm for this set of features
algo_times = sorted(algo_times, key=lambda x: x[1])
best_algo, best_time = algo_times[0]
# set the sample_weight to the absolute speed increase above the
# time of the next fastest algorithm. the idea here is that
# we really want to capture the 2x or 10x speedups - but
# the 1% speedups might just be noise (and this is especially
# true for the faster runs)
if len(algo_times) == 1:
# no other algorithm handles this K value,
second_best_time = np.inf
else:
second_best_time = algo_times[1][1]
# sample_weight = min((second_best_time / best_time) - 1, 10)
sample_weight = min((second_best_time - best_time), 10)
X.append(feature)
y.append(best_algo)
weights.append(sample_weight)
return np.array(X), np.array(y), np.array(weights)
| 0 |
rapidsai_public_repos/raft/cpp | rapidsai_public_repos/raft/cpp/template/CMakeLists.txt | # =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR)
# ------------- configure rapids-cmake --------------#
include(cmake/thirdparty/fetch_rapids.cmake)
include(rapids-cmake)
include(rapids-cpm)
include(rapids-cuda)
include(rapids-export)
include(rapids-find)
# ------------- configure project --------------#
rapids_cuda_init_architectures(test_raft)
project(test_raft LANGUAGES CXX CUDA)
# ------------- configure raft -----------------#
rapids_cpm_init()
include(cmake/thirdparty/get_raft.cmake)
# -------------- compile tasks ----------------- #
add_executable(CAGRA_EXAMPLE src/cagra_example.cu)
target_link_libraries(CAGRA_EXAMPLE PRIVATE raft::raft raft::compiled)
add_executable(IVF_FLAT_EXAMPLE src/ivf_flat_example.cu)
target_link_libraries(IVF_FLAT_EXAMPLE PRIVATE raft::raft raft::compiled)
| 0 |
rapidsai_public_repos/raft/cpp | rapidsai_public_repos/raft/cpp/template/README.md | # Example RAFT Project Template
This template project provides a drop-in sample to either start building a new application with, or using RAFT in an existing CMake project.
First, please refer to our [installation docs](https://docs.rapids.ai/api/raft/stable/build.html#cuda-gpu-requirements) for the minimum requirements to use RAFT.
Once the minimum requirements are satisfied, this example template application can be built with the provided `build.sh` script. This is a bash script that calls the appropriate CMake commands, so you can look into it to see the typical CMake based build workflow.
This directory (`RAFT_SOURCE/cpp/template`) can be copied directly in order to build a new application with RAFT.
RAFT can be integrated into an existing CMake project by copying the contents in the `configure rapids-cmake` and `configure raft` sections of the provided `CMakeLists.txt` into your project, along with `cmake/thirdparty/get_raft.cmake`.
Make sure to link against the appropriate Cmake targets. Use `raft::raft`to add make the headers available and `raft::compiled` when utilizing the shared library.
```cmake
target_link_libraries(your_app_target PRIVATE raft::raft raft::compiled)
```
| 0 |
rapidsai_public_repos/raft/cpp | rapidsai_public_repos/raft/cpp/template/build.sh | #!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
# raft empty project template build script
# Abort script on first error
set -e
PARALLEL_LEVEL=${PARALLEL_LEVEL:=`nproc`}
BUILD_TYPE=Release
BUILD_DIR=build/
RAFT_REPO_REL=""
EXTRA_CMAKE_ARGS=""
set -e
if [[ ${RAFT_REPO_REL} != "" ]]; then
RAFT_REPO_PATH="`readlink -f \"${RAFT_REPO_REL}\"`"
EXTRA_CMAKE_ARGS="${EXTRA_CMAKE_ARGS} -DCPM_raft_SOURCE=${RAFT_REPO_PATH}"
fi
if [ "$1" == "clean" ]; then
rm -rf build
exit 0
fi
mkdir -p $BUILD_DIR
cd $BUILD_DIR
cmake \
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
-DRAFT_NVTX=OFF \
-DCMAKE_CUDA_ARCHITECTURES="NATIVE" \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
${EXTRA_CMAKE_ARGS} \
../
cmake --build . -j${PARALLEL_LEVEL}
| 0 |
rapidsai_public_repos/raft/cpp/template/cmake | rapidsai_public_repos/raft/cpp/template/cmake/thirdparty/fetch_rapids.cmake | # =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# Use this variable to update RAPIDS and RAFT versions
set(RAPIDS_VERSION "24.02")
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/RAFT_RAPIDS.cmake)
file(DOWNLOAD https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-${RAPIDS_VERSION}/RAPIDS.cmake
${CMAKE_CURRENT_BINARY_DIR}/RAFT_RAPIDS.cmake)
endif()
include(${CMAKE_CURRENT_BINARY_DIR}/RAFT_RAPIDS.cmake)
| 0 |
rapidsai_public_repos/raft/cpp/template/cmake | rapidsai_public_repos/raft/cpp/template/cmake/thirdparty/get_raft.cmake | # =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# Use RAPIDS_VERSION from cmake/thirdparty/fetch_rapids.cmake
set(RAFT_VERSION "${RAPIDS_VERSION}")
set(RAFT_FORK "rapidsai")
set(RAFT_PINNED_TAG "branch-${RAPIDS_VERSION}")
function(find_and_configure_raft)
set(oneValueArgs VERSION FORK PINNED_TAG COMPILE_LIBRARY ENABLE_NVTX ENABLE_MNMG_DEPENDENCIES)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN} )
set(RAFT_COMPONENTS "")
if(PKG_COMPILE_LIBRARY)
string(APPEND RAFT_COMPONENTS " compiled")
endif()
if(PKG_ENABLE_MNMG_DEPENDENCIES)
string(APPEND RAFT_COMPONENTS " distributed")
endif()
#-----------------------------------------------------
# Invoke CPM find_package()
#-----------------------------------------------------
rapids_cpm_find(raft ${PKG_VERSION}
GLOBAL_TARGETS raft::raft
BUILD_EXPORT_SET raft-template-exports
INSTALL_EXPORT_SET raft-template-exports
COMPONENTS ${RAFT_COMPONENTS}
CPM_ARGS
GIT_REPOSITORY https://github.com/${PKG_FORK}/raft.git
GIT_TAG ${PKG_PINNED_TAG}
SOURCE_SUBDIR cpp
OPTIONS
"BUILD_TESTS OFF"
"BUILD_PRIMS_BENCH OFF"
"BUILD_ANN_BENCH OFF"
"RAFT_NVTX ${ENABLE_NVTX}"
"RAFT_COMPILE_LIBRARY ${PKG_COMPILE_LIBRARY}"
)
endfunction()
# Change pinned tag here to test a commit in CI
# To use a different RAFT locally, set the CMake variable
# CPM_raft_SOURCE=/path/to/local/raft
find_and_configure_raft(VERSION ${RAFT_VERSION}.00
FORK ${RAFT_FORK}
PINNED_TAG ${RAFT_PINNED_TAG}
COMPILE_LIBRARY ON
ENABLE_MNMG_DEPENDENCIES OFF
ENABLE_NVTX OFF
)
| 0 |
rapidsai_public_repos/raft/cpp/template | rapidsai_public_repos/raft/cpp/template/src/ivf_flat_example.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
#include <optional>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/neighbors/ivf_flat.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include "common.cuh"
void ivf_flat_build_search_simple(raft::device_resources const& dev_resources,
raft::device_matrix_view<const float, int64_t> dataset,
raft::device_matrix_view<const float, int64_t> queries)
{
using namespace raft::neighbors;
ivf_flat::index_params index_params;
index_params.n_lists = 1024;
index_params.kmeans_trainset_fraction = 0.1;
index_params.metric = raft::distance::DistanceType::L2Expanded;
std::cout << "Building IVF-Flat index" << std::endl;
auto index = ivf_flat::build(dev_resources, index_params, dataset);
std::cout << "Number of clusters " << index.n_lists() << ", number of vectors added to index "
<< index.size() << std::endl;
// Create output arrays.
int64_t topk = 10;
int64_t n_queries = queries.extent(0);
auto neighbors = raft::make_device_matrix<int64_t>(dev_resources, n_queries, topk);
auto distances = raft::make_device_matrix<float>(dev_resources, n_queries, topk);
// Set search parameters.
ivf_flat::search_params search_params;
search_params.n_probes = 50;
// Search K nearest neighbors for each of the queries.
ivf_flat::search(
dev_resources, search_params, index, queries, neighbors.view(), distances.view());
// The call to ivf_flat::search is asynchronous. Before accessing the data, sync by calling
// raft::resource::sync_stream(dev_resources);
print_results(dev_resources, neighbors.view(), distances.view());
}
void ivf_flat_build_extend_search(raft::device_resources const& dev_resources,
raft::device_matrix_view<const float, int64_t> dataset,
raft::device_matrix_view<const float, int64_t> queries)
{
using namespace raft::neighbors;
// Define dataset indices.
auto data_indices = raft::make_device_vector<int64_t, int64_t>(dev_resources, dataset.extent(0));
thrust::counting_iterator<int64_t> first(0);
thrust::device_ptr<int64_t> ptr(data_indices.data_handle());
thrust::copy(
raft::resource::get_thrust_policy(dev_resources), first, first + dataset.extent(0), ptr);
// Sub-sample the dataset to create a training set.
auto trainset =
subsample(dev_resources, dataset, raft::make_const_mdspan(data_indices.view()), 0.1);
ivf_flat::index_params index_params;
index_params.n_lists = 100;
index_params.metric = raft::distance::DistanceType::L2Expanded;
index_params.add_data_on_build = false;
std::cout << "\nRun k-means clustering using the training set" << std::endl;
auto index =
ivf_flat::build(dev_resources, index_params, raft::make_const_mdspan(trainset.view()));
std::cout << "Number of clusters " << index.n_lists() << ", number of vectors added to index "
<< index.size() << std::endl;
std::cout << "Filling index with the dataset vectors" << std::endl;
index = ivf_flat::extend(dev_resources,
dataset,
std::make_optional(raft::make_const_mdspan(data_indices.view())),
index);
std::cout << "Index size after addin dataset vectors " << index.size() << std::endl;
// Set search parameters.
ivf_flat::search_params search_params;
search_params.n_probes = 10;
// Create output arrays.
int64_t topk = 10;
int64_t n_queries = queries.extent(0);
auto neighbors = raft::make_device_matrix<int64_t, int64_t>(dev_resources, n_queries, topk);
auto distances = raft::make_device_matrix<float, int64_t>(dev_resources, n_queries, topk);
// Search K nearest neighbors for each queries.
ivf_flat::search(
dev_resources, search_params, index, queries, neighbors.view(), distances.view());
// The call to ivf_flat::search is asynchronous. Before accessing the data, sync using:
// raft::resource::sync_stream(dev_resources);
print_results(dev_resources, neighbors.view(), distances.view());
}
int main()
{
raft::device_resources dev_resources;
// Set pool memory resource with 1 GiB initial pool size. All allocations use the same pool.
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> pool_mr(
rmm::mr::get_current_device_resource(), 1024 * 1024 * 1024ull);
rmm::mr::set_current_device_resource(&pool_mr);
// Alternatively, one could define a pool allocator for temporary arrays (used within RAFT
// algorithms). In that case only the internal arrays would use the pool, any other allocation
// uses the default RMM memory resource. Here is how to change the workspace memory resource to
// a pool with 2 GiB upper limit.
// raft::resource::set_workspace_to_pool_resource(dev_resources, 2 * 1024 * 1024 * 1024ull);
// Create input arrays.
int64_t n_samples = 10000;
int64_t n_dim = 3;
int64_t n_queries = 10;
auto dataset = raft::make_device_matrix<float, int64_t>(dev_resources, n_samples, n_dim);
auto queries = raft::make_device_matrix<float, int64_t>(dev_resources, n_queries, n_dim);
generate_dataset(dev_resources, dataset.view(), queries.view());
// Simple build and search example.
ivf_flat_build_search_simple(dev_resources,
raft::make_const_mdspan(dataset.view()),
raft::make_const_mdspan(queries.view()));
// Build and extend example.
ivf_flat_build_extend_search(dev_resources,
raft::make_const_mdspan(dataset.view()),
raft::make_const_mdspan(queries.view()));
}
| 0 |
rapidsai_public_repos/raft/cpp/template | rapidsai_public_repos/raft/cpp/template/src/common.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/matrix/copy.cuh>
#include <raft/random/make_blobs.cuh>
#include <raft/random/sample_without_replacement.cuh>
#include <raft/util/cudart_utils.hpp>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
// Fill dataset and queries with synthetic data.
void generate_dataset(raft::device_resources const& dev_resources,
raft::device_matrix_view<float, int64_t> dataset,
raft::device_matrix_view<float, int64_t> queries)
{
auto labels = raft::make_device_vector<int64_t, int64_t>(dev_resources, dataset.extent(0));
raft::random::make_blobs(dev_resources, dataset, labels.view());
raft::random::RngState r(1234ULL);
raft::random::uniform(dev_resources,
r,
raft::make_device_vector_view(queries.data_handle(), queries.size()),
-1.0f,
1.0f);
}
// Copy the results to host and print a few samples
template <typename IdxT>
void print_results(raft::device_resources const& dev_resources,
raft::device_matrix_view<IdxT, int64_t> neighbors,
raft::device_matrix_view<float, int64_t> distances)
{
int64_t topk = neighbors.extent(1);
auto neighbors_host = raft::make_host_matrix<IdxT, int64_t>(neighbors.extent(0), topk);
auto distances_host = raft::make_host_matrix<float, int64_t>(distances.extent(0), topk);
cudaStream_t stream = raft::resource::get_cuda_stream(dev_resources);
raft::copy(neighbors_host.data_handle(), neighbors.data_handle(), neighbors.size(), stream);
raft::copy(distances_host.data_handle(), distances.data_handle(), distances.size(), stream);
// The calls to RAFT algorithms and raft::copy is asynchronous.
// We need to sync the stream before accessing the data.
raft::resource::sync_stream(dev_resources, stream);
for (int query_id = 0; query_id < 2; query_id++) {
std::cout << "Query " << query_id << " neighbor indices: ";
raft::print_host_vector("", &neighbors_host(query_id, 0), topk, std::cout);
std::cout << "Query " << query_id << " neighbor distances: ";
raft::print_host_vector("", &distances_host(query_id, 0), topk, std::cout);
}
}
/** Subsample the dataset to create a training set*/
raft::device_matrix<float, int64_t> subsample(
raft::device_resources const& dev_resources,
raft::device_matrix_view<const float, int64_t> dataset,
raft::device_vector_view<const int64_t, int64_t> data_indices,
float fraction)
{
int64_t n_samples = dataset.extent(0);
int64_t n_dim = dataset.extent(1);
int64_t n_train = n_samples * fraction;
auto trainset = raft::make_device_matrix<float, int64_t>(dev_resources, n_train, n_dim);
int seed = 137;
raft::random::RngState rng(seed);
auto train_indices = raft::make_device_vector<int64_t>(dev_resources, n_train);
raft::random::sample_without_replacement(
dev_resources, rng, data_indices, std::nullopt, train_indices.view(), std::nullopt);
raft::matrix::copy_rows(
dev_resources, dataset, trainset.view(), raft::make_const_mdspan(train_indices.view()));
return trainset;
}
| 0 |
rapidsai_public_repos/raft/cpp/template | rapidsai_public_repos/raft/cpp/template/src/cagra_example.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/neighbors/cagra.cuh>
#include <raft/random/make_blobs.cuh>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include "common.cuh"
void cagra_build_search_simple(raft::device_resources const& dev_resources,
raft::device_matrix_view<const float, int64_t> dataset,
raft::device_matrix_view<const float, int64_t> queries)
{
using namespace raft::neighbors;
int64_t topk = 12;
int64_t n_queries = queries.extent(0);
// create output arrays
auto neighbors = raft::make_device_matrix<uint32_t>(dev_resources, n_queries, topk);
auto distances = raft::make_device_matrix<float>(dev_resources, n_queries, topk);
// use default index parameters
cagra::index_params index_params;
std::cout << "Building CAGRA index (search graph)" << std::endl;
auto index = cagra::build<float, uint32_t>(dev_resources, index_params, dataset);
std::cout << "CAGRA index has " << index.size() << " vectors" << std::endl;
std::cout << "CAGRA graph has degree " << index.graph_degree() << ", graph size ["
<< index.graph().extent(0) << ", " << index.graph().extent(1) << "]" << std::endl;
// use default search parameters
cagra::search_params search_params;
// search K nearest neighbors
cagra::search<float, uint32_t>(
dev_resources, search_params, index, queries, neighbors.view(), distances.view());
// The call to ivf_flat::search is asynchronous. Before accessing the data, sync by calling
// raft::resource::sync_stream(dev_resources);
print_results(dev_resources, neighbors.view(), distances.view());
}
int main()
{
raft::device_resources dev_resources;
// Set pool memory resource with 1 GiB initial pool size. All allocations use the same pool.
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> pool_mr(
rmm::mr::get_current_device_resource(), 1024 * 1024 * 1024ull);
rmm::mr::set_current_device_resource(&pool_mr);
// Alternatively, one could define a pool allocator for temporary arrays (used within RAFT
// algorithms). In that case only the internal arrays would use the pool, any other allocation
// uses the default RMM memory resource. Here is how to change the workspace memory resource to
// a pool with 2 GiB upper limit.
// raft::resource::set_workspace_to_pool_resource(dev_resources, 2 * 1024 * 1024 * 1024ull);
// Create input arrays.
int64_t n_samples = 10000;
int64_t n_dim = 90;
int64_t n_queries = 10;
auto dataset = raft::make_device_matrix<float, int64_t>(dev_resources, n_samples, n_dim);
auto queries = raft::make_device_matrix<float, int64_t>(dev_resources, n_queries, n_dim);
generate_dataset(dev_resources, dataset.view(), queries.view());
// Simple build and search example.
cagra_build_search_simple(dev_resources,
raft::make_const_mdspan(dataset.view()),
raft::make_const_mdspan(queries.view()));
}
| 0 |
rapidsai_public_repos/raft/cpp | rapidsai_public_repos/raft/cpp/cmake/config.json | {
"parse": {
"additional_commands": {
"CPMFindPackage": {
"kwargs": {
"NAME": 1,
"GITHUB_REPOSITORY": "?",
"GIT_TAG": "?",
"VERSION": "?",
"GIT_SHALLOW": "?",
"OPTIONS": "*",
"FIND_PACKAGE_ARGUMENTS": "*"
}
},
"ConfigureTest": {
"flags": ["TEST_NAME", "TEST_SRC"]
},
"ConfigureBench": {
"flags": ["BENCH_NAME", "BENCH_SRC"]
}
}
},
"format": {
"line_width": 100,
"tab_size": 2,
"command_case": "unchanged",
"max_lines_hwrap": 1,
"max_pargs_hwrap": 999,
"dangle_parens": true
},
"lint": {
"disabled_codes": ["C0301", "C0111", "C0113"],
"function_pattern": "[0-9A-z_]+",
"macro_pattern": "[0-9A-z_]+",
"global_var_pattern": "[A-z][0-9A-z_]+",
"internal_var_pattern": "_[A-z][0-9A-z_]+",
"local_var_pattern": "[A-z][A-z0-9_]+",
"private_var_pattern": "_[0-9A-z_]+",
"public_var_pattern": "[A-z][0-9A-z_]+",
"argument_var_pattern": "[A-z][A-z0-9_]+",
"keyword_pattern": "[A-z][0-9A-z_]+"
}
}
| 0 |
rapidsai_public_repos/raft/cpp/cmake | rapidsai_public_repos/raft/cpp/cmake/patches/nlohmann_json.patch | --- nlohmann/json.hpp 2021-05-06 11:40:39.770669693 +0800
+++ nlohmann/json_patched.hpp 2021-06-02 18:46:43.849334466 +0800
@@ -16607,6 +16607,21 @@
}
}
+
+ template <typename NumberType,
+ enable_if_t<std::is_signed<NumberType>::value, int> = 0>
+ bool is_negative_number(NumberType x)
+ {
+ return x < 0;
+ }
+
+ template < typename NumberType,
+ enable_if_t < std::is_unsigned<NumberType>::value, int > = 0 >
+ bool is_negative_number(NumberType /*unused*/)
+ {
+ return false;
+ }
+
/*!
@brief dump an integer
@@ -16649,12 +16664,11 @@
// use a pointer to fill the buffer
auto buffer_ptr = number_buffer.begin(); // NOLINT(llvm-qualified-auto,readability-qualified-auto,cppcoreguidelines-pro-type-vararg,hicpp-vararg)
- const bool is_negative = std::is_same<NumberType, number_integer_t>::value && !(x >= 0); // see issue #755
number_unsigned_t abs_value;
unsigned int n_chars{};
- if (is_negative)
+ if (is_negative_number(x))
{
*buffer_ptr = '-';
abs_value = remove_sign(static_cast<number_integer_t>(x));
| 0 |
rapidsai_public_repos/raft/cpp/cmake | rapidsai_public_repos/raft/cpp/cmake/patches/ggnn.patch | diff --git a/include/ggnn/cache/cuda_simple_knn_sym_cache.cuh b/include/ggnn/cache/cuda_simple_knn_sym_cache.cuh
index 890420e..d792903 100644
--- a/include/ggnn/cache/cuda_simple_knn_sym_cache.cuh
+++ b/include/ggnn/cache/cuda_simple_knn_sym_cache.cuh
@@ -62,7 +62,7 @@ struct SimpleKNNSymCache {
const ValueT dist_half)
: dist_query(dist_query), dist_half(dist_half) {}
- __device__ __forceinline__ DistQueryAndHalf() {}
+ DistQueryAndHalf() = default;
};
struct DistanceAndNorm {
@@ -98,8 +98,7 @@ struct SimpleKNNSymCache {
KeyT cache;
DistQueryAndHalf dist;
bool flag;
-
- __device__ __forceinline__ SyncTempStorage() {}
+ SyncTempStorage() = default;
};
public:
diff --git a/include/ggnn/cuda_knn_ggnn_gpu_instance.cuh b/include/ggnn/cuda_knn_ggnn_gpu_instance.cuh
index 8cbaf0d..6eb72ac 100644
--- a/include/ggnn/cuda_knn_ggnn_gpu_instance.cuh
+++ b/include/ggnn/cuda_knn_ggnn_gpu_instance.cuh
@@ -41,7 +41,6 @@ limitations under the License.
#include "ggnn/sym/cuda_knn_sym_query_layer.cuh"
#include "ggnn/utils/cuda_knn_utils.cuh"
#include "ggnn/utils/cuda_knn_constants.cuh"
-#include "ggnn/utils/cuda_knn_dataset.cuh"
template <typename ValueT>
__global__ void divide(ValueT* res, ValueT* input, ValueT N) {
@@ -98,9 +97,7 @@ struct GGNNGPUInstance {
typedef GGNNGraphDevice<KeyT, BaseT, ValueT> GGNNGraphDevice;
typedef GGNNGraphHost<KeyT, BaseT, ValueT> GGNNGraphHost;
- const Dataset<KeyT, BaseT, BAddrT>* dataset;
GGNNGraphBuffer<KeyT, ValueT>* ggnn_buffer {nullptr};
- GGNNQuery<KeyT, ValueT, BaseT> ggnn_query;
// Graph Shards resident on the GPU
std::vector<GGNNGraphDevice> ggnn_shards;
@@ -117,13 +114,12 @@ struct GGNNGPUInstance {
// number of shards that need to be processed by this instance
const int num_parts;
- GGNNGPUInstance(const int gpu_id, const Dataset<KeyT, BaseT, BAddrT>* dataset,
+ GGNNGPUInstance(const int gpu_id,
const int N_shard, const int L,
const bool enable_construction, const float tau_build,
const int num_parts=1, const int num_cpu_buffers=1) :
N_shard{N_shard}, L{L}, tau_build{tau_build},
- dataset{dataset}, gpu_id{gpu_id},
- ggnn_query{dataset->N_query, D, KQuery, num_parts},
+ gpu_id{gpu_id},
num_parts{num_parts}
{
CHECK_LE(L, MAX_LAYER);
@@ -135,7 +131,6 @@ struct GGNNGPUInstance {
CHECK_EQ(current_gpu_id, gpu_id) << "cudaSetDevice() needs to be called in advance!";
}
- ggnn_query.loadQueriesAsync(dataset->h_query, 0);
computeGraphParameters();
@@ -186,7 +181,7 @@ struct GGNNGPUInstance {
}
GGNNGPUInstance(const GGNNGPUInstance& other)
- : dataset{nullptr}, ggnn_query{0, D, KQuery},
+ :
gpu_id{0}, N_shard{0}, num_parts{0} {
// this exists to allow using vector::emplace_back
// when it triggers a reallocation, this code will be called.
@@ -305,6 +300,7 @@ struct GGNNGPUInstance {
// io
+ /*
void waitForDiskIO(const int shard_id) {
auto& cpu_buffer = ggnn_cpu_buffers[shard_id%ggnn_cpu_buffers.size()];
if (cpu_buffer.disk_io_thread.joinable())
@@ -468,11 +464,12 @@ struct GGNNGPUInstance {
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaPeekAtLastError());
}
+ */
// graph operations
template <int BLOCK_DIM_X = 32, int MAX_ITERATIONS = 400, int CACHE_SIZE = 512, int SORTED_SIZE = 256, bool DIST_STATS = false>
- void queryLayer(const int shard_id = 0) const {
+ void queryLayer(const BaseT* d_query, int batch_size, KeyT* d_query_result_ids, ValueT* d_query_result_dists, const int shard_id = 0) const {
CHECK_CUDA(cudaSetDevice(gpu_id));
const auto& shard = ggnn_shards.at(shard_id%ggnn_shards.size());
@@ -482,21 +479,21 @@ struct GGNNGPUInstance {
int* m_dist_statistics = nullptr;
if (DIST_STATS)
- cudaMallocManaged(&m_dist_statistics, dataset->N_query * sizeof(int));
+ cudaMallocManaged(&m_dist_statistics, batch_size * sizeof(int));
QueryKernel query_kernel;
query_kernel.d_base = shard.d_base;
- query_kernel.d_query = ggnn_query.d_query;
+ query_kernel.d_query = d_query;
query_kernel.d_graph = shard.d_graph;
- query_kernel.d_query_results = ggnn_query.d_query_result_ids;
- query_kernel.d_query_results_dists = ggnn_query.d_query_result_dists;
+ query_kernel.d_query_results = d_query_result_ids;
+ query_kernel.d_query_results_dists = d_query_result_dists;
query_kernel.d_translation = shard.d_translation;
query_kernel.d_nn1_stats = shard.d_nn1_stats;
- query_kernel.N = dataset->N_query;
+ query_kernel.N = batch_size;
query_kernel.N_offset = 0;
query_kernel.d_dist_stats = m_dist_statistics;
@@ -771,6 +768,16 @@ struct GGNNGPUInstance {
sym(layer, shard_id);
}
}
+
+ void set_stream(cudaStream_t stream) {
+ assert(ggnn_shards.size() == 1);
+ ggnn_shards.at(0).stream = stream;
+ }
+
+ void set_base_data(const BaseT* dataset) {
+ assert(ggnn_shards.size() == 1);
+ ggnn_shards.at(0).d_base = dataset;
+ }
};
#endif // INCLUDE_GGNN_CUDA_KNN_GGNN_GPU_INSTANCE_CUH_
diff --git a/include/ggnn/graph/cuda_knn_ggnn_graph_device.cuh b/include/ggnn/graph/cuda_knn_ggnn_graph_device.cuh
index c94a8f1..781226d 100644
--- a/include/ggnn/graph/cuda_knn_ggnn_graph_device.cuh
+++ b/include/ggnn/graph/cuda_knn_ggnn_graph_device.cuh
@@ -50,7 +50,7 @@ struct GGNNGraphDevice {
ValueT* d_nn1_stats;
/// base data pointer for the shard.
- BaseT* d_base;
+ const BaseT* d_base;
/// combined memory pool
char* d_memory;
@@ -69,7 +69,9 @@ struct GGNNGraphDevice {
const size_t selection_translation_size = align8(ST_all * sizeof(KeyT));
const size_t nn1_stats_size = align8(2 * sizeof(ValueT));
total_graph_size = graph_size + 2 * selection_translation_size + nn1_stats_size;
- base_size = align8(static_cast<size_t>(N) * D * sizeof(BaseT));
+ // base_size = align8(static_cast<size_t>(N) * D * sizeof(BaseT));
+ (void) N;
+ (void) D;
const size_t total_size = base_size+total_graph_size;
@@ -86,8 +88,7 @@ struct GGNNGraphDevice {
CHECK_CUDA(cudaMalloc(&d_memory, total_size));
size_t pos = 0;
- d_base = reinterpret_cast<BaseT*>(d_memory+pos);
- pos += base_size;
+ d_base = nullptr;
d_graph = reinterpret_cast<KeyT*>(d_memory+pos);
pos += graph_size;
d_translation = reinterpret_cast<KeyT*>(d_memory+pos);
@@ -99,14 +100,14 @@ struct GGNNGraphDevice {
CHECK_EQ(pos, total_size);
- CHECK_CUDA(cudaStreamCreate(&stream));
+ // CHECK_CUDA(cudaStreamCreate(&stream));
CHECK_CUDA(cudaPeekAtLastError());
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaPeekAtLastError());
}
- GGNNGraphDevice(const GGNNGraphDevice& other) {
+ GGNNGraphDevice(const GGNNGraphDevice&) {
// this exists to allow using vector::emplace_back
// when it triggers a reallocation, this code will be called.
// always make sure that enough memory is reserved ahead of time.
@@ -116,7 +117,7 @@ struct GGNNGraphDevice {
~GGNNGraphDevice() {
cudaFree(d_memory);
- CHECK_CUDA(cudaStreamDestroy(stream));
+ // CHECK_CUDA(cudaStreamDestroy(stream));
}
};
diff --git a/include/ggnn/graph/cuda_knn_ggnn_graph_host.cuh b/include/ggnn/graph/cuda_knn_ggnn_graph_host.cuh
index 2055f9e..ef5843a 100644
--- a/include/ggnn/graph/cuda_knn_ggnn_graph_host.cuh
+++ b/include/ggnn/graph/cuda_knn_ggnn_graph_host.cuh
@@ -92,7 +92,7 @@ struct GGNNGraphHost {
CHECK_CUDA(cudaPeekAtLastError());
}
- GGNNGraphHost(const GGNNGraphHost& other) {
+ GGNNGraphHost(const GGNNGraphHost&) {
// this exists to allow using vector::emplace_back
// when it triggers a reallocation, this code will be called.
// always make sure that enough memory is reserved ahead of time.
diff --git a/include/ggnn/select/cuda_knn_wrs_select_layer.cuh b/include/ggnn/select/cuda_knn_wrs_select_layer.cuh
index 49d76a1..eef69e6 100644
--- a/include/ggnn/select/cuda_knn_wrs_select_layer.cuh
+++ b/include/ggnn/select/cuda_knn_wrs_select_layer.cuh
@@ -22,7 +22,6 @@ limitations under the License.
#include <cuda.h>
#include <cuda_runtime.h>
-#include <gflags/gflags.h>
#include <cub/cub.cuh>
#include "ggnn/utils/cuda_knn_constants.cuh"
| 0 |
rapidsai_public_repos/raft/cpp/cmake | rapidsai_public_repos/raft/cpp/cmake/patches/hnswlib.patch | diff --git a/hnswlib/hnswalg.h b/hnswlib/hnswalg.h
index e95e0b5..f0fe50a 100644
--- a/hnswlib/hnswalg.h
+++ b/hnswlib/hnswalg.h
@@ -3,6 +3,7 @@
#include "visited_list_pool.h"
#include "hnswlib.h"
#include <atomic>
+#include <limits>
#include <random>
#include <stdlib.h>
#include <assert.h>
@@ -16,6 +17,8 @@ namespace hnswlib {
template<typename dist_t>
class HierarchicalNSW : public AlgorithmInterface<dist_t> {
public:
+ bool base_layer_only{false};
+ int num_seeds=32;
static const tableint max_update_element_locks = 65536;
HierarchicalNSW(SpaceInterface<dist_t> *s) {
}
@@ -56,7 +59,7 @@ namespace hnswlib {
visited_list_pool_ = new VisitedListPool(1, max_elements);
//initializations for special treatment of the first node
- enterpoint_node_ = -1;
+ enterpoint_node_ = std::numeric_limits<tableint>::max();
maxlevel_ = -1;
linkLists_ = (char **) malloc(sizeof(void *) * max_elements_);
@@ -527,7 +530,7 @@ namespace hnswlib {
tableint *datal = (tableint *) (data + 1);
for (int i = 0; i < size; i++) {
tableint cand = datal[i];
- if (cand < 0 || cand > max_elements_)
+ if (cand > max_elements_)
throw std::runtime_error("cand error");
dist_t d = fstdistfunc_(query_data, getDataByInternalId(cand), dist_func_param_);
@@ -1067,7 +1070,7 @@ namespace hnswlib {
tableint *datal = (tableint *) (data + 1);
for (int i = 0; i < size; i++) {
tableint cand = datal[i];
- if (cand < 0 || cand > max_elements_)
+ if (cand > max_elements_)
throw std::runtime_error("cand error");
dist_t d = fstdistfunc_(data_point, getDataByInternalId(cand), dist_func_param_);
if (d < curdist) {
@@ -1119,28 +1122,41 @@ namespace hnswlib {
tableint currObj = enterpoint_node_;
dist_t curdist = fstdistfunc_(query_data, getDataByInternalId(enterpoint_node_), dist_func_param_);
- for (int level = maxlevel_; level > 0; level--) {
- bool changed = true;
- while (changed) {
- changed = false;
- unsigned int *data;
+ if (base_layer_only) {
+ // You can increase the number of seeds when testing large-scale dataset, num_seeds = 48 for 100M-scale
+ for (int i = 0; i < num_seeds; i++) {
+ tableint obj = i * (max_elements_ / num_seeds);
+ dist_t dist = fstdistfunc_(query_data, getDataByInternalId(obj), dist_func_param_);
+ if (dist < curdist) {
+ curdist = dist;
+ currObj = obj;
+ }
+ }
+ }
+ else{
+ for (int level = maxlevel_; level > 0; level--) {
+ bool changed = true;
+ while (changed) {
+ changed = false;
+ unsigned int *data;
- data = (unsigned int *) get_linklist(currObj, level);
- int size = getListCount(data);
- metric_hops++;
- metric_distance_computations+=size;
+ data = (unsigned int *) get_linklist(currObj, level);
+ int size = getListCount(data);
+ metric_hops++;
+ metric_distance_computations+=size;
- tableint *datal = (tableint *) (data + 1);
- for (int i = 0; i < size; i++) {
- tableint cand = datal[i];
- if (cand < 0 || cand > max_elements_)
- throw std::runtime_error("cand error");
- dist_t d = fstdistfunc_(query_data, getDataByInternalId(cand), dist_func_param_);
+ tableint *datal = (tableint *) (data + 1);
+ for (int i = 0; i < size; i++) {
+ tableint cand = datal[i];
+ if (cand > max_elements_)
+ throw std::runtime_error("cand error");
+ dist_t d = fstdistfunc_(query_data, getDataByInternalId(cand), dist_func_param_);
- if (d < curdist) {
- curdist = d;
- currObj = cand;
- changed = true;
+ if (d < curdist) {
+ curdist = d;
+ currObj = cand;
+ changed = true;
+ }
}
}
}
diff --git a/hnswlib/visited_list_pool.h b/hnswlib/visited_list_pool.h
index 5e1a4a5..4195ebd 100644
--- a/hnswlib/visited_list_pool.h
+++ b/hnswlib/visited_list_pool.h
@@ -3,6 +3,7 @@
#include <mutex>
#include <string.h>
#include <deque>
+#include <limits>
namespace hnswlib {
typedef unsigned short int vl_type;
@@ -14,7 +15,7 @@ namespace hnswlib {
unsigned int numelements;
VisitedList(int numelements1) {
- curV = -1;
+ curV = std::numeric_limits<vl_type>::max();
numelements = numelements1;
mass = new vl_type[numelements];
}
| 0 |
rapidsai_public_repos/raft/cpp/cmake | rapidsai_public_repos/raft/cpp/cmake/modules/FindAVX.cmake | # =============================================================================
# Copyright (c) 2016- Facebook, Inc (Adam Paszke)
# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (Clement Farabet)
# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
#
# Note: This file was copied from PyTorch and modified for use in the RAFT library.
# Refer to thirdparty/LICENSES/LICENSE.pytorch for license and additional
# copyright information.
# =============================================================================
INCLUDE(CheckCXXSourceRuns)
SET(AVX_CODE
"
#include <immintrin.h>
int main()
{
__m256 a;
a = _mm256_set1_ps(0);
return 0;
}
"
)
SET(AVX512_CODE
"
#include <immintrin.h>
int main()
{
__m512i a = _mm512_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0);
__m512i b = a;
__mmask64 equality_mask = _mm512_cmp_epi8_mask(a, b, _MM_CMPINT_EQ);
return 0;
}
"
)
SET(AVX2_CODE
"
#include <immintrin.h>
int main()
{
__m256i a = {0};
a = _mm256_abs_epi16(a);
__m256i x;
_mm256_extract_epi64(x, 0); // we rely on this in our AVX2 code
return 0;
}
"
)
MACRO(CHECK_SSE lang type flags)
SET(__FLAG_I 1)
SET(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
FOREACH(__FLAG ${flags})
IF(NOT ${lang}_${type}_FOUND)
SET(CMAKE_REQUIRED_FLAGS ${__FLAG})
CHECK_CXX_SOURCE_RUNS("${${type}_CODE}" ${lang}_HAS_${type}_${__FLAG_I})
IF(${lang}_HAS_${type}_${__FLAG_I})
SET(${lang}_${type}_FOUND
TRUE
CACHE BOOL "${lang} ${type} support"
)
SET(${lang}_${type}_FLAGS
"${__FLAG}"
CACHE STRING "${lang} ${type} flags"
)
ENDIF()
MATH(EXPR __FLAG_I "${__FLAG_I}+1")
ENDIF()
ENDFOREACH()
SET(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE})
IF(NOT ${lang}_${type}_FOUND)
SET(${lang}_${type}_FOUND
FALSE
CACHE BOOL "${lang} ${type} support"
)
SET(${lang}_${type}_FLAGS
""
CACHE STRING "${lang} ${type} flags"
)
ENDIF()
MARK_AS_ADVANCED(${lang}_${type}_FOUND ${lang}_${type}_FLAGS)
ENDMACRO()
# CHECK_SSE(C "AVX" " ;-mavx;/arch:AVX") CHECK_SSE(C "AVX2" " ;-mavx2 -mfma;/arch:AVX2") CHECK_SSE(C
# "AVX512" " ;-mavx512f -mavx512dq -mavx512vl -mavx512bw -mfma;/arch:AVX512")
#
CHECK_SSE(CXX "AVX" " ;-mavx;/arch:AVX")
CHECK_SSE(CXX "AVX2" " ;-mavx2 -mfma;/arch:AVX2")
CHECK_SSE(CXX "AVX512" " ;-mavx512f -mavx512dq -mavx512vl -mavx512bw -mfma;/arch:AVX512")
| 0 |
rapidsai_public_repos/raft/cpp/cmake | rapidsai_public_repos/raft/cpp/cmake/modules/ConfigureCUDA.cmake | # =============================================================================
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
if(DISABLE_DEPRECATION_WARNINGS)
list(APPEND RAFT_CXX_FLAGS -Wno-deprecated-declarations)
list(APPEND RAFT_CUDA_FLAGS -Xcompiler=-Wno-deprecated-declarations)
endif()
# Be very strict when compiling with GCC as host compiler (and thus more lenient when compiling with
# clang)
if(CMAKE_COMPILER_IS_GNUCXX)
list(APPEND RAFT_CXX_FLAGS -Wall -Werror -Wno-unknown-pragmas -Wno-error=deprecated-declarations)
list(APPEND RAFT_CUDA_FLAGS -Xcompiler=-Wall,-Werror,-Wno-error=deprecated-declarations)
# set warnings as errors
if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL 11.2.0)
list(APPEND RAFT_CUDA_FLAGS -Werror=all-warnings)
endif()
endif()
if(CUDA_LOG_COMPILE_TIME)
list(APPEND RAFT_CUDA_FLAGS "--time=nvcc_compile_log.csv")
endif()
list(APPEND RAFT_CUDA_FLAGS --expt-extended-lambda --expt-relaxed-constexpr)
list(APPEND RAFT_CXX_FLAGS "-DCUDA_API_PER_THREAD_DEFAULT_STREAM")
list(APPEND RAFT_CUDA_FLAGS "-DCUDA_API_PER_THREAD_DEFAULT_STREAM")
# make sure we produce smallest binary size
list(APPEND RAFT_CUDA_FLAGS -Xfatbin=-compress-all)
# Option to enable line info in CUDA device compilation to allow introspection when profiling /
# memchecking
if(CUDA_ENABLE_LINEINFO)
list(APPEND RAFT_CUDA_FLAGS -lineinfo)
endif()
if(OpenMP_FOUND)
list(APPEND RAFT_CUDA_FLAGS -Xcompiler=${OpenMP_CXX_FLAGS})
endif()
# Debug options
if(CMAKE_BUILD_TYPE MATCHES Debug)
message(VERBOSE "RAFT: Building with debugging flags")
list(APPEND RAFT_CUDA_FLAGS -G -Xcompiler=-rdynamic)
list(APPEND RAFT_CUDA_FLAGS -Xptxas --suppress-stack-size-warning)
endif()
| 0 |
rapidsai_public_repos/raft/cpp/cmake | rapidsai_public_repos/raft/cpp/cmake/thirdparty/get_glog.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_glog)
set(oneValueArgs VERSION FORK PINNED_TAG EXCLUDE_FROM_ALL)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN} )
rapids_cpm_find(glog ${PKG_VERSION}
GLOBAL_TARGETS glog::glog
BUILD_EXPORT_SET raft-exports
INSTALL_EXPORT_SET raft-exports
CPM_ARGS
GIT_REPOSITORY https://github.com/${PKG_FORK}/glog.git
GIT_TAG ${PKG_PINNED_TAG}
EXCLUDE_FROM_ALL ${PKG_EXCLUDE_FROM_ALL}
)
if(glog_ADDED)
message(VERBOSE "RAFT: Using glog located in ${glog_SOURCE_DIR}")
else()
message(VERBOSE "RAFT: Using glog located in ${glog_DIR}")
endif()
endfunction()
# Change pinned tag here to test a commit in CI
# To use a different RAFT locally, set the CMake variable
# CPM_glog_SOURCE=/path/to/local/glog
find_and_configure_glog(VERSION 0.6.0
FORK google
PINNED_TAG v0.6.0
EXCLUDE_FROM_ALL ON
)
| 0 |
rapidsai_public_repos/raft/cpp/cmake | rapidsai_public_repos/raft/cpp/cmake/thirdparty/get_thrust.cmake | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Use CPM to find or clone thrust
function(find_and_configure_thrust)
include(${rapids-cmake-dir}/cpm/thrust.cmake)
rapids_cpm_thrust( NAMESPACE raft
BUILD_EXPORT_SET raft-exports
INSTALL_EXPORT_SET raft-exports)
endfunction()
find_and_configure_thrust()
| 0 |
rapidsai_public_repos/raft/cpp/cmake | rapidsai_public_repos/raft/cpp/cmake/thirdparty/get_spdlog.cmake | # =============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Use CPM to find or clone speedlog
function(find_and_configure_spdlog)
include(${rapids-cmake-dir}/cpm/spdlog.cmake)
rapids_cpm_spdlog(FMT_OPTION "EXTERNAL_FMT_HO" INSTALL_EXPORT_SET rmm-exports)
rapids_export_package(BUILD spdlog rmm-exports)
if(spdlog_ADDED)
rapids_export(
BUILD spdlog
EXPORT_SET spdlog
GLOBAL_TARGETS spdlog spdlog_header_only
NAMESPACE spdlog::)
include("${rapids-cmake-dir}/export/find_package_root.cmake")
rapids_export_find_package_root(BUILD spdlog [=[${CMAKE_CURRENT_LIST_DIR}]=] EXPORT_SET rmm-exports)
endif()
endfunction()
find_and_configure_spdlog() | 0 |
rapidsai_public_repos/raft/cpp/cmake | rapidsai_public_repos/raft/cpp/cmake/thirdparty/get_cutlass.cmake | # =============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
function(find_and_configure_cutlass)
set(oneValueArgs VERSION REPOSITORY PINNED_TAG)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
# if(RAFT_ENABLE_DIST_DEPENDENCIES OR RAFT_COMPILE_LIBRARIES)
set(CUTLASS_ENABLE_HEADERS_ONLY
ON
CACHE BOOL "Enable only the header library"
)
set(CUTLASS_NAMESPACE
"raft_cutlass"
CACHE STRING "Top level namespace of CUTLASS"
)
set(CUTLASS_ENABLE_CUBLAS
OFF
CACHE BOOL "Disable CUTLASS to build with cuBLAS library."
)
if (CUDA_STATIC_RUNTIME)
set(CUDART_LIBRARY "${CUDA_cudart_static_LIBRARY}" CACHE FILEPATH "fixing cutlass cmake code" FORCE)
endif()
rapids_cpm_find(
NvidiaCutlass ${PKG_VERSION}
GLOBAL_TARGETS nvidia::cutlass::cutlass
CPM_ARGS
GIT_REPOSITORY ${PKG_REPOSITORY}
GIT_TAG ${PKG_PINNED_TAG}
GIT_SHALLOW TRUE
OPTIONS "CUDAToolkit_ROOT ${CUDAToolkit_LIBRARY_DIR}"
)
if(TARGET CUTLASS AND NOT TARGET nvidia::cutlass::cutlass)
add_library(nvidia::cutlass::cutlass ALIAS CUTLASS)
endif()
if(NvidiaCutlass_ADDED)
rapids_export(
BUILD NvidiaCutlass
EXPORT_SET NvidiaCutlass
GLOBAL_TARGETS nvidia::cutlass::cutlass
NAMESPACE nvidia::cutlass::
)
endif()
# endif()
# We generate the cutlass-config files when we built cutlass locally, so always do
# `find_dependency`
rapids_export_package(
BUILD NvidiaCutlass raft-exports GLOBAL_TARGETS nvidia::cutlass::cutlass
)
rapids_export_package(
INSTALL NvidiaCutlass raft-exports GLOBAL_TARGETS nvidia::cutlass::cutlass
)
# Tell cmake where it can find the generated NvidiaCutlass-config.cmake we wrote.
include("${rapids-cmake-dir}/export/find_package_root.cmake")
rapids_export_find_package_root(
INSTALL NvidiaCutlass [=[${CMAKE_CURRENT_LIST_DIR}/../]=]
EXPORT_SET raft-exports
)
rapids_export_find_package_root(
BUILD NvidiaCutlass [=[${CMAKE_CURRENT_LIST_DIR}]=]
EXPORT_SET raft-exports
)
endfunction()
if(NOT RAFT_CUTLASS_GIT_TAG)
set(RAFT_CUTLASS_GIT_TAG v2.10.0)
endif()
if(NOT RAFT_CUTLASS_GIT_REPOSITORY)
set(RAFT_CUTLASS_GIT_REPOSITORY https://github.com/NVIDIA/cutlass.git)
endif()
find_and_configure_cutlass(
VERSION 2.10.0 REPOSITORY ${RAFT_CUTLASS_GIT_REPOSITORY} PINNED_TAG ${RAFT_CUTLASS_GIT_TAG}
)
| 0 |
rapidsai_public_repos/raft/cpp/cmake | rapidsai_public_repos/raft/cpp/cmake/thirdparty/get_hnswlib.cmake | #=============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_hnswlib)
set(oneValueArgs VERSION FORK PINNED_TAG EXCLUDE_FROM_ALL)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN} )
set ( EXTERNAL_INCLUDES_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} )
if( NOT EXISTS ${EXTERNAL_INCLUDES_DIRECTORY}/_deps/hnswlib-src )
execute_process (
COMMAND git clone --branch=v0.6.2 https://github.com/nmslib/hnswlib.git hnswlib-src
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/_deps )
message("SOURCE ${CMAKE_CURRENT_SOURCE_DIR}")
execute_process (
COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/cmake/patches/hnswlib.patch
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/_deps/hnswlib-src
)
endif ()
include(cmake/modules/FindAVX.cmake)
set(HNSW_CXX_FLAGS "")
if(CXX_AVX_FOUND)
set(HNSW_CXX_FLAGS "${HNSW_CXX_FLAGS} ${CXX_AVX_FLAGS}")
elseif(CXX_AVX2_FOUND)
set(HNSW_CXX_FLAGS "${HNSW_CXX_FLAGS} ${CXX_AVX2_FLAGS}")
elseif(CXX_AVX512_FOUND)
set(HNSW_CXX_FLAGS "${HNSW_CXX_FLAGS} ${CXX_AVX512_FLAGS}")
endif()
endfunction()
# Change pinned tag here to test a commit in CI
# To use a different RAFT locally, set the CMake variable
# CPM_raft_SOURCE=/path/to/local/raft
find_and_configure_hnswlib(VERSION 0.6.2
FORK nmslib
PINNED_TAG v0.6.2
EXCLUDE_FROM_ALL YES)
| 0 |
rapidsai_public_repos/raft/cpp/cmake | rapidsai_public_repos/raft/cpp/cmake/thirdparty/get_gtest.cmake | #=============================================================================
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_gtest )
include(${rapids-cmake-dir}/cpm/gtest.cmake)
rapids_cpm_gtest()
endfunction()
find_and_configure_gtest()
| 0 |
rapidsai_public_repos/raft/cpp/cmake | rapidsai_public_repos/raft/cpp/cmake/thirdparty/get_nlohmann_json.cmake | #=============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_nlohmann_json)
set(oneValueArgs VERSION FORK PINNED_TAG EXCLUDE_FROM_ALL)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN} )
rapids_cpm_find(nlohmann_json ${PKG_VERSION}
GLOBAL_TARGETS nlohmann_json::nlohmann_json
BUILD_EXPORT_SET raft-bench-ann-exports
INSTALL_EXPORT_SET raft-bench-ann-exports
CPM_ARGS
GIT_REPOSITORY https://github.com/${PKG_FORK}/json.git
GIT_TAG ${PKG_PINNED_TAG}
EXCLUDE_FROM_ALL ${PKG_EXCLUDE_FROM_ALL})
endfunction()
# Change pinned tag here to test a commit in CI
# To use a different RAFT locally, set the CMake variable
# CPM_raft_SOURCE=/path/to/local/raft
find_and_configure_nlohmann_json(VERSION 3.11.2
FORK nlohmann
PINNED_TAG v3.11.2
EXCLUDE_FROM_ALL YES)
| 0 |
rapidsai_public_repos/raft/cpp/cmake | rapidsai_public_repos/raft/cpp/cmake/thirdparty/get_ggnn.cmake | #=============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_ggnn)
set(oneValueArgs VERSION FORK PINNED_TAG EXCLUDE_FROM_ALL)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN} )
set ( EXTERNAL_INCLUDES_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/ )
if (NOT EXISTS ${EXTERNAL_INCLUDES_DIRECTORY}/_deps/ggnn-src/)
execute_process (
COMMAND git clone "https://github.com/${PKG_FORK}/ggnn" --branch ${PKG_PINNED_TAG} ggnn-src
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/_deps/ )
message("SOURCE ${CMAKE_CURRENT_SOURCE_DIR}")
execute_process (
COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/cmake/patches/ggnn.patch
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/_deps/ggnn-src
)
endif()
endfunction()
# Change pinned tag here to test a commit in CI
# To use a different RAFT locally, set the CMake variable
# CPM_raft_SOURCE=/path/to/local/raft
find_and_configure_ggnn(VERSION 0.5
FORK cgtuebingen
PINNED_TAG release_0.5
EXCLUDE_FROM_ALL YES)
| 0 |
rapidsai_public_repos/raft/cpp/cmake | rapidsai_public_repos/raft/cpp/cmake/thirdparty/get_rmm.cmake | #=============================================================================
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_rmm)
include(${rapids-cmake-dir}/cpm/rmm.cmake)
rapids_cpm_rmm(BUILD_EXPORT_SET raft-exports
INSTALL_EXPORT_SET raft-exports)
endfunction()
find_and_configure_rmm()
| 0 |
rapidsai_public_repos/raft/cpp/cmake | rapidsai_public_repos/raft/cpp/cmake/thirdparty/get_fmt.cmake | # =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Use CPM to find or clone fmt
function(find_and_configure_fmt)
include(${rapids-cmake-dir}/cpm/fmt.cmake)
rapids_cpm_fmt(INSTALL_EXPORT_SET rmm-exports BUILD_EXPORT_SET rmm-exports)
endfunction()
find_and_configure_fmt() | 0 |
rapidsai_public_repos/raft/cpp/cmake | rapidsai_public_repos/raft/cpp/cmake/thirdparty/get_faiss.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_faiss)
set(oneValueArgs VERSION REPOSITORY PINNED_TAG BUILD_STATIC_LIBS EXCLUDE_FROM_ALL ENABLE_GPU)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN} )
rapids_find_generate_module(faiss
HEADER_NAMES faiss/IndexFlat.h
LIBRARY_NAMES faiss
)
set(BUILD_SHARED_LIBS ON)
if (PKG_BUILD_STATIC_LIBS)
set(BUILD_SHARED_LIBS OFF)
set(CPM_DOWNLOAD_faiss ON)
endif()
include(cmake/modules/FindAVX.cmake)
# Link against AVX CPU lib if it exists
set(RAFT_FAISS_GLOBAL_TARGETS faiss::faiss)
set(RAFT_FAISS_EXPORT_GLOBAL_TARGETS faiss)
set(RAFT_FAISS_OPT_LEVEL "generic")
if(CXX_AVX_FOUND)
set(RAFT_FAISS_OPT_LEVEL "avx2")
list(APPEND RAFT_FAISS_GLOBAL_TARGETS faiss::faiss_avx2)
list(APPEND RAFT_FAISS_EXPORT_GLOBAL_TARGETS faiss_avx2)
endif()
rapids_cpm_find(faiss ${PKG_VERSION}
GLOBAL_TARGETS ${RAFT_FAISS_GLOBAL_TARGETS}
CPM_ARGS
GIT_REPOSITORY ${PKG_REPOSITORY}
GIT_TAG ${PKG_PINNED_TAG}
EXCLUDE_FROM_ALL ${PKG_EXCLUDE_FROM_ALL}
OPTIONS
"FAISS_ENABLE_GPU ${PKG_ENABLE_GPU}"
"FAISS_ENABLE_PYTHON OFF"
"FAISS_OPT_LEVEL ${RAFT_FAISS_OPT_LEVEL}"
"FAISS_USE_CUDA_TOOLKIT_STATIC ${CUDA_STATIC_RUNTIME}"
"BUILD_TESTING OFF"
"CMAKE_MESSAGE_LOG_LEVEL VERBOSE"
)
if(TARGET faiss AND NOT TARGET faiss::faiss)
add_library(faiss::faiss ALIAS faiss)
endif()
if(CXX_AVX_FOUND)
if(TARGET faiss_avx2 AND NOT TARGET faiss::faiss_avx2)
add_library(faiss::faiss_avx2 ALIAS faiss_avx2)
endif()
endif()
if(faiss_ADDED)
rapids_export(BUILD faiss
EXPORT_SET faiss-targets
GLOBAL_TARGETS ${RAFT_FAISS_EXPORT_GLOBAL_TARGETS}
NAMESPACE faiss::)
endif()
# We generate the faiss-config files when we built faiss locally, so always do `find_dependency`
rapids_export_package(BUILD OpenMP raft-ann-bench-exports) # faiss uses openMP but doesn't export a need for it
rapids_export_package(BUILD faiss raft-ann-bench-exports GLOBAL_TARGETS ${RAFT_FAISS_GLOBAL_TARGETS} ${RAFT_FAISS_EXPORT_GLOBAL_TARGETS})
rapids_export_package(INSTALL faiss raft-ann-bench-exports GLOBAL_TARGETS ${RAFT_FAISS_GLOBAL_TARGETS} ${RAFT_FAISS_EXPORT_GLOBAL_TARGETS})
# Tell cmake where it can find the generated faiss-config.cmake we wrote.
include("${rapids-cmake-dir}/export/find_package_root.cmake")
rapids_export_find_package_root(BUILD faiss [=[${CMAKE_CURRENT_LIST_DIR}]=]
EXPORT_SET raft-ann-bench-exports)
endfunction()
if(NOT RAFT_FAISS_GIT_TAG)
# TODO: Remove this once faiss supports FAISS_USE_CUDA_TOOLKIT_STATIC
# (https://github.com/facebookresearch/faiss/pull/2446)
set(RAFT_FAISS_GIT_TAG fea/statically-link-ctk)
# set(RAFT_FAISS_GIT_TAG bde7c0027191f29c9dadafe4f6e68ca0ee31fb30)
endif()
if(NOT RAFT_FAISS_GIT_REPOSITORY)
# TODO: Remove this once faiss supports FAISS_USE_CUDA_TOOLKIT_STATIC
# (https://github.com/facebookresearch/faiss/pull/2446)
set(RAFT_FAISS_GIT_REPOSITORY https://github.com/cjnolet/faiss.git)
# set(RAFT_FAISS_GIT_REPOSITORY https://github.com/facebookresearch/faiss.git)
endif()
find_and_configure_faiss(VERSION 1.7.4
REPOSITORY ${RAFT_FAISS_GIT_REPOSITORY}
PINNED_TAG ${RAFT_FAISS_GIT_TAG}
BUILD_STATIC_LIBS ${RAFT_USE_FAISS_STATIC}
EXCLUDE_FROM_ALL ${RAFT_EXCLUDE_FAISS_FROM_ALL}
ENABLE_GPU ${RAFT_FAISS_ENABLE_GPU})
| 0 |
rapidsai_public_repos/raft/cpp/src | rapidsai_public_repos/raft/cpp/src/util/memory_pool.cpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/util/memory_pool-inl.hpp>
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/random/rmat_rectangular_generator_int_double.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common.cuh"
namespace raft::runtime::random {
FUNC_DEF(int, double);
} // namespace raft::runtime::random
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/random/common.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/random/rmat_rectangular_generator.cuh>
#include <raft_runtime/random/rmat_rectangular_generator.hpp>
#define FUNC_DEF(IdxT, ProbT) \
void rmat_rectangular_gen(raft::resources const& handle, \
IdxT* out, \
IdxT* out_src, \
IdxT* out_dst, \
const ProbT* theta, \
IdxT r_scale, \
IdxT c_scale, \
IdxT n_edges, \
raft::random::RngState& r) \
{ \
raft::random::rmat_rectangular_gen<IdxT, ProbT>(out, \
out_src, \
out_dst, \
theta, \
r_scale, \
c_scale, \
n_edges, \
resource::get_cuda_stream(handle), \
r); \
}
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/random/rmat_rectangular_generator_int64_float.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common.cuh"
namespace raft::runtime::random {
FUNC_DEF(int64_t, float);
} // namespace raft::runtime::random
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/random/rmat_rectangular_generator_int_float.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common.cuh"
namespace raft::runtime::random {
FUNC_DEF(int, float);
} // namespace raft::runtime::random
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/random/rmat_rectangular_generator_int64_double.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common.cuh"
namespace raft::runtime::random {
FUNC_DEF(int64_t, double);
} // namespace raft::runtime::random
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/distance/fused_l2_min_arg.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/device_mdarray.hpp>
#include <raft/core/kvp.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/core/resources.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/distance/fused_l2_nn.cuh>
#include <raft/linalg/norm.cuh>
#include <thrust/for_each.h>
#include <thrust/tuple.h>
namespace raft::runtime::distance {
template <typename IndexT, typename DataT>
struct KeyValueIndexOp {
__host__ __device__ __forceinline__ IndexT
operator()(const raft::KeyValuePair<IndexT, DataT>& a) const
{
return a.key;
}
};
template <typename value_t, typename idx_t>
void compute_fused_l2_nn_min_arg(raft::resources const& handle,
idx_t* min,
const value_t* x,
const value_t* y,
idx_t m,
idx_t n,
idx_t k,
bool sqrt)
{
rmm::device_uvector<int> workspace(m, resource::get_cuda_stream(handle));
auto kvp = raft::make_device_vector<raft::KeyValuePair<idx_t, value_t>>(handle, m);
rmm::device_uvector<value_t> x_norms(m, resource::get_cuda_stream(handle));
rmm::device_uvector<value_t> y_norms(n, resource::get_cuda_stream(handle));
raft::linalg::rowNorm(
x_norms.data(), x, k, m, raft::linalg::L2Norm, true, resource::get_cuda_stream(handle));
raft::linalg::rowNorm(
y_norms.data(), y, k, n, raft::linalg::L2Norm, true, resource::get_cuda_stream(handle));
raft::distance::fusedL2NNMinReduce(kvp.data_handle(),
x,
y,
x_norms.data(),
y_norms.data(),
m,
n,
k,
(void*)workspace.data(),
sqrt,
true,
resource::get_cuda_stream(handle));
KeyValueIndexOp<idx_t, value_t> conversion_op;
thrust::transform(resource::get_thrust_policy(handle),
kvp.data_handle(),
kvp.data_handle() + m,
min,
conversion_op);
resource::sync_stream(handle);
}
void fused_l2_nn_min_arg(raft::resources const& handle,
int* min,
const float* x,
const float* y,
int m,
int n,
int k,
bool sqrt)
{
compute_fused_l2_nn_min_arg<float, int>(handle, min, x, y, m, n, k, sqrt);
}
void fused_l2_nn_min_arg(raft::resources const& handle,
int* min,
const double* x,
const double* y,
int m,
int n,
int k,
bool sqrt)
{
compute_fused_l2_nn_min_arg<double, int>(handle, min, x, y, m, n, k, sqrt);
}
} // end namespace raft::runtime::distance
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/distance/pairwise_distance.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/resources.hpp>
#include <raft/distance/distance.cuh>
#include <raft/distance/distance_types.hpp>
namespace raft::runtime::distance {
void pairwise_distance(raft::resources const& handle,
float* x,
float* y,
float* dists,
int m,
int n,
int k,
raft::distance::DistanceType metric,
bool isRowMajor,
float metric_arg)
{
raft::distance::pairwise_distance<float, int>(
handle, x, y, dists, m, n, k, metric, isRowMajor, metric_arg);
}
void pairwise_distance(raft::resources const& handle,
double* x,
double* y,
double* dists,
int m,
int n,
int k,
raft::distance::DistanceType metric,
bool isRowMajor,
float metric_arg)
{
raft::distance::pairwise_distance<double, int>(
handle, x, y, dists, m, n, k, metric, isRowMajor, metric_arg);
}
} // namespace raft::runtime::distance | 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/neighbors/refine_d_int64_t_int8_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/neighbors/refine.cuh>
namespace raft::runtime::neighbors {
void refine(raft::resources const& handle,
raft::device_matrix_view<const int8_t, int64_t, row_major> dataset,
raft::device_matrix_view<const int8_t, int64_t, row_major> queries,
raft::device_matrix_view<const int64_t, int64_t, row_major> neighbor_candidates,
raft::device_matrix_view<int64_t, int64_t, row_major> indices,
raft::device_matrix_view<float, int64_t, row_major> distances,
distance::DistanceType metric)
{
raft::neighbors::refine<int64_t, int8_t, float, int64_t>(
handle, dataset, queries, neighbor_candidates, indices, distances, metric);
}
} // namespace raft::runtime::neighbors
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/neighbors/brute_force_knn_int64_t_float.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
#include <raft/neighbors/brute_force.cuh>
#include <raft_runtime/neighbors/brute_force.hpp>
#include <vector>
namespace raft::runtime::neighbors::brute_force {
#define RAFT_INST_BFKNN(IDX_T, DATA_T, MATRIX_IDX_T, INDEX_LAYOUT, SEARCH_LAYOUT) \
void knn(raft::resources const& handle, \
raft::device_matrix_view<const DATA_T, MATRIX_IDX_T, INDEX_LAYOUT> index, \
raft::device_matrix_view<const DATA_T, MATRIX_IDX_T, SEARCH_LAYOUT> search, \
raft::device_matrix_view<IDX_T, MATRIX_IDX_T, row_major> indices, \
raft::device_matrix_view<DATA_T, MATRIX_IDX_T, row_major> distances, \
distance::DistanceType metric, \
std::optional<float> metric_arg, \
std::optional<IDX_T> global_id_offset) \
{ \
std::vector<raft::device_matrix_view<const DATA_T, MATRIX_IDX_T, INDEX_LAYOUT>> vec; \
vec.push_back(index); \
raft::neighbors::brute_force::knn( \
handle, vec, search, indices, distances, metric, metric_arg, global_id_offset); \
}
RAFT_INST_BFKNN(int64_t, float, int64_t, raft::row_major, raft::row_major);
#undef RAFT_INST_BFKNN
} // namespace raft::runtime::neighbors::brute_force
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/neighbors/ivfpq_search_float_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/neighbors/ivf_pq.cuh>
#include <raft_runtime/neighbors/ivf_pq.hpp>
namespace raft::runtime::neighbors::ivf_pq {
#define RAFT_SEARCH_INST(T, IdxT) \
void search(raft::resources const& handle, \
const raft::neighbors::ivf_pq::search_params& params, \
const raft::neighbors::ivf_pq::index<IdxT>& idx, \
raft::device_matrix_view<const T, IdxT, row_major> queries, \
raft::device_matrix_view<IdxT, IdxT, row_major> neighbors, \
raft::device_matrix_view<float, IdxT, row_major> distances) \
{ \
raft::neighbors::ivf_pq::search<T, IdxT>(handle, params, idx, queries, neighbors, distances); \
}
RAFT_SEARCH_INST(float, int64_t);
#undef RAFT_INST_SEARCH
} // namespace raft::runtime::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/neighbors/ivfpq_serialize.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/neighbors/ivf_pq.cuh>
#include <raft/neighbors/ivf_pq_serialize.cuh>
#include <raft_runtime/neighbors/ivf_pq.hpp>
namespace raft::runtime::neighbors::ivf_pq {
void serialize(raft::resources const& handle,
const std::string& filename,
const raft::neighbors::ivf_pq::index<int64_t>& index)
{
raft::neighbors::ivf_pq::serialize(handle, filename, index);
};
} // namespace raft::runtime::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/neighbors/ivfpq_search_uint8_t_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/neighbors/ivf_pq.cuh>
#include <raft_runtime/neighbors/ivf_pq.hpp>
namespace raft::runtime::neighbors::ivf_pq {
#define RAFT_SEARCH_INST(T, IdxT) \
void search(raft::resources const& handle, \
const raft::neighbors::ivf_pq::search_params& params, \
const raft::neighbors::ivf_pq::index<IdxT>& idx, \
raft::device_matrix_view<const T, IdxT, row_major> queries, \
raft::device_matrix_view<IdxT, IdxT, row_major> neighbors, \
raft::device_matrix_view<float, IdxT, row_major> distances) \
{ \
raft::neighbors::ivf_pq::search<T, IdxT>(handle, params, idx, queries, neighbors, distances); \
}
RAFT_SEARCH_INST(uint8_t, int64_t);
#undef RAFT_INST_SEARCH
} // namespace raft::runtime::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/neighbors/refine_h_int64_t_uint8_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/neighbors/refine.cuh>
namespace raft::runtime::neighbors {
void refine(raft::resources const& handle,
raft::host_matrix_view<const uint8_t, int64_t, row_major> dataset,
raft::host_matrix_view<const uint8_t, int64_t, row_major> queries,
raft::host_matrix_view<const int64_t, int64_t, row_major> neighbor_candidates,
raft::host_matrix_view<int64_t, int64_t, row_major> indices,
raft::host_matrix_view<float, int64_t, row_major> distances,
distance::DistanceType metric)
{
raft::neighbors::refine<int64_t, uint8_t, float, int64_t>(
handle, dataset, queries, neighbor_candidates, indices, distances, metric);
}
} // namespace raft::runtime::neighbors
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/neighbors/refine_h_int64_t_float.cu |
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/neighbors/refine.cuh>
namespace raft::runtime::neighbors {
void refine(raft::resources const& handle,
raft::host_matrix_view<const float, int64_t, row_major> dataset,
raft::host_matrix_view<const float, int64_t, row_major> queries,
raft::host_matrix_view<const int64_t, int64_t, row_major> neighbor_candidates,
raft::host_matrix_view<int64_t, int64_t, row_major> indices,
raft::host_matrix_view<float, int64_t, row_major> distances,
distance::DistanceType metric)
{
raft::neighbors::refine<int64_t, float, float, int64_t>(
handle, dataset, queries, neighbor_candidates, indices, distances, metric);
}
} // namespace raft::runtime::neighbors
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/neighbors/ivf_flat_serialize.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sstream>
#include <string>
#include <raft/core/device_resources.hpp>
#include <raft/neighbors/ivf_flat_serialize.cuh>
#include <raft/neighbors/ivf_flat_types.hpp>
#include <raft_runtime/neighbors/ivf_flat.hpp>
namespace raft::runtime::neighbors::ivf_flat {
#define RAFT_IVF_FLAT_SERIALIZE_INST(DTYPE) \
void serialize_file(raft::resources const& handle, \
const std::string& filename, \
const raft::neighbors::ivf_flat::index<DTYPE, int64_t>& index) \
{ \
raft::neighbors::ivf_flat::serialize(handle, filename, index); \
}; \
\
void deserialize_file(raft::resources const& handle, \
const std::string& filename, \
raft::neighbors::ivf_flat::index<DTYPE, int64_t>* index) \
{ \
if (!index) { RAFT_FAIL("Invalid index pointer"); } \
*index = raft::neighbors::ivf_flat::deserialize<DTYPE, int64_t>(handle, filename); \
}; \
void serialize(raft::resources const& handle, \
std::string& str, \
const raft::neighbors::ivf_flat::index<DTYPE, int64_t>& index) \
{ \
std::stringstream os; \
raft::neighbors::ivf_flat::serialize(handle, os, index); \
str = os.str(); \
} \
\
void deserialize(raft::resources const& handle, \
const std::string& str, \
raft::neighbors::ivf_flat::index<DTYPE, int64_t>* index) \
{ \
std::istringstream is(str); \
if (!index) { RAFT_FAIL("Invalid index pointer"); } \
*index = raft::neighbors::ivf_flat::deserialize<DTYPE, int64_t>(handle, is); \
}
RAFT_IVF_FLAT_SERIALIZE_INST(float);
RAFT_IVF_FLAT_SERIALIZE_INST(int8_t);
RAFT_IVF_FLAT_SERIALIZE_INST(uint8_t);
#undef RAFT_IVF_FLAT_SERIALIZE_INST
} // namespace raft::runtime::neighbors::ivf_flat
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/neighbors/refine_d_int64_t_float.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/neighbors/refine.cuh>
namespace raft::runtime::neighbors {
void refine(raft::resources const& handle,
raft::device_matrix_view<const float, int64_t, row_major> dataset,
raft::device_matrix_view<const float, int64_t, row_major> queries,
raft::device_matrix_view<const int64_t, int64_t, row_major> neighbor_candidates,
raft::device_matrix_view<int64_t, int64_t, row_major> indices,
raft::device_matrix_view<float, int64_t, row_major> distances,
distance::DistanceType metric)
{
raft::neighbors::refine<int64_t, float, float, int64_t>(
handle, dataset, queries, neighbor_candidates, indices, distances, metric);
}
} // namespace raft::runtime::neighbors
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/neighbors/ivfpq_build.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/neighbors/ivf_pq.cuh>
#include <raft_runtime/neighbors/ivf_pq.hpp>
namespace raft::runtime::neighbors::ivf_pq {
#define RAFT_INST_BUILD_EXTEND(T, IdxT) \
raft::neighbors::ivf_pq::index<IdxT> build( \
raft::resources const& handle, \
const raft::neighbors::ivf_pq::index_params& params, \
raft::device_matrix_view<const T, IdxT, row_major> dataset) \
{ \
return raft::neighbors::ivf_pq::build<T, IdxT>(handle, params, dataset); \
} \
void build(raft::resources const& handle, \
const raft::neighbors::ivf_pq::index_params& params, \
raft::device_matrix_view<const T, IdxT, row_major> dataset, \
raft::neighbors::ivf_pq::index<IdxT>* idx) \
{ \
*idx = raft::neighbors::ivf_pq::build<T, IdxT>(handle, params, dataset); \
} \
raft::neighbors::ivf_pq::index<IdxT> extend( \
raft::resources const& handle, \
raft::device_matrix_view<const T, IdxT, row_major> new_vectors, \
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices, \
const raft::neighbors::ivf_pq::index<IdxT>& idx) \
{ \
return raft::neighbors::ivf_pq::extend<T, IdxT>(handle, new_vectors, new_indices, idx); \
} \
void extend(raft::resources const& handle, \
raft::device_matrix_view<const T, IdxT, row_major> new_vectors, \
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices, \
raft::neighbors::ivf_pq::index<IdxT>* idx) \
{ \
raft::neighbors::ivf_pq::extend<T, IdxT>(handle, new_vectors, new_indices, idx); \
}
RAFT_INST_BUILD_EXTEND(float, int64_t);
RAFT_INST_BUILD_EXTEND(int8_t, int64_t);
RAFT_INST_BUILD_EXTEND(uint8_t, int64_t);
#undef RAFT_INST_BUILD_EXTEND
} // namespace raft::runtime::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/neighbors/cagra_build.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/neighbors/cagra.cuh>
#include <raft/neighbors/ivf_pq.cuh>
#include <raft/neighbors/ivf_pq_types.hpp>
#include <raft_runtime/neighbors/cagra.hpp>
namespace raft::runtime::neighbors::cagra {
#define RAFT_INST_CAGRA_BUILD(T, IdxT) \
auto build(raft::resources const& handle, \
const raft::neighbors::cagra::index_params& params, \
raft::device_matrix_view<const T, int64_t, row_major> dataset) \
->raft::neighbors::cagra::index<T, IdxT> \
{ \
return raft::neighbors::cagra::build<T, IdxT>(handle, params, dataset); \
} \
\
auto build(raft::resources const& handle, \
const raft::neighbors::cagra::index_params& params, \
raft::host_matrix_view<const T, int64_t, row_major> dataset) \
->raft::neighbors::cagra::index<T, IdxT> \
{ \
return raft::neighbors::cagra::build<T, IdxT>(handle, params, dataset); \
} \
\
void build_device(raft::resources const& handle, \
const raft::neighbors::cagra::index_params& params, \
raft::device_matrix_view<const T, int64_t, row_major> dataset, \
raft::neighbors::cagra::index<T, IdxT>& idx) \
{ \
idx = build(handle, params, dataset); \
} \
\
void build_host(raft::resources const& handle, \
const raft::neighbors::cagra::index_params& params, \
raft::host_matrix_view<const T, int64_t, row_major> dataset, \
raft::neighbors::cagra::index<T, IdxT>& idx) \
{ \
idx = build(handle, params, dataset); \
}
RAFT_INST_CAGRA_BUILD(float, uint32_t);
RAFT_INST_CAGRA_BUILD(int8_t, uint32_t);
RAFT_INST_CAGRA_BUILD(uint8_t, uint32_t);
#undef RAFT_INST_CAGRA_BUILD
#define RAFT_INST_CAGRA_OPTIMIZE(IdxT) \
void optimize_device(raft::resources const& handle, \
raft::device_matrix_view<IdxT, int64_t, row_major> knn_graph, \
raft::host_matrix_view<IdxT, int64_t, row_major> new_graph) \
{ \
raft::neighbors::cagra::optimize(handle, knn_graph, new_graph); \
} \
void optimize_host(raft::resources const& handle, \
raft::host_matrix_view<IdxT, int64_t, row_major> knn_graph, \
raft::host_matrix_view<IdxT, int64_t, row_major> new_graph) \
{ \
raft::neighbors::cagra::optimize(handle, knn_graph, new_graph); \
}
RAFT_INST_CAGRA_OPTIMIZE(uint32_t);
#undef RAFT_INST_CAGRA_OPTIMIZE
} // namespace raft::runtime::neighbors::cagra
| 0 |
rapidsai_public_repos/raft/cpp/src/raft_runtime | rapidsai_public_repos/raft/cpp/src/raft_runtime/neighbors/ivf_flat_search.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/neighbors/ivf_flat.cuh>
#include <raft_runtime/neighbors/ivf_flat.hpp>
namespace raft::runtime::neighbors::ivf_flat {
#define RAFT_INST_SEARCH(T, IdxT) \
void search(raft::resources const& handle, \
raft::neighbors::ivf_flat::search_params const& params, \
const raft::neighbors::ivf_flat::index<T, IdxT>& index, \
raft::device_matrix_view<const T, IdxT, row_major> queries, \
raft::device_matrix_view<IdxT, IdxT, row_major> neighbors, \
raft::device_matrix_view<float, IdxT, row_major> distances) \
{ \
raft::neighbors::ivf_flat::search<T, IdxT>( \
handle, params, index, queries, neighbors, distances); \
}
RAFT_INST_SEARCH(float, int64_t);
RAFT_INST_SEARCH(int8_t, int64_t);
RAFT_INST_SEARCH(uint8_t, int64_t);
#undef RAFT_INST_SEARCH
} // namespace raft::runtime::neighbors::ivf_flat
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.