repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/distance/distance_l1.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "distance_common.cuh"
namespace cuvs::bench::distance {
DIST_BENCH_REGISTER(DistanceL1, cuvs::distance::DistanceType::L1);
} // namespace cuvs::bench::distance
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/distance/masked_nn.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <common/benchmark.hpp>
#include <cuvs/distance/masked_nn.cuh>
#include <limits>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/handle.hpp>
#include <raft/linalg/norm.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace cuvs::bench::distance::masked_nn {
// Introduce various sparsity patterns
enum AdjacencyPattern {
checkerboard = 0,
checkerboard_4 = 1,
checkerboard_64 = 2,
all_true = 3,
all_false = 4
};
struct Params {
int m, n, k, num_groups;
AdjacencyPattern pattern;
}; // struct Params
RAFT_KERNEL init_adj(AdjacencyPattern pattern,
int n,
raft::device_matrix_view<bool, int, raft::layout_c_contiguous> adj,
raft::device_vector_view<int, int, raft::layout_c_contiguous> group_idxs)
{
int m = adj.extent(0);
int num_groups = adj.extent(1);
for (int idx_m = blockIdx.y * blockDim.y + threadIdx.y; idx_m < m;
idx_m += blockDim.y * gridDim.y) {
for (int idx_g = blockIdx.x * blockDim.x + threadIdx.x; idx_g < num_groups;
idx_g += blockDim.x * gridDim.x) {
switch (pattern) {
case checkerboard: adj(idx_m, idx_g) = (idx_m + idx_g) % 2; break;
case checkerboard_4: adj(idx_m, idx_g) = (idx_m / 4 + idx_g) % 2; break;
case checkerboard_64: adj(idx_m, idx_g) = (idx_m / 64 + idx_g) % 2; break;
case all_true: adj(idx_m, idx_g) = true; break;
case all_false: adj(idx_m, idx_g) = false; break;
default: assert(false && "unknown pattern");
}
}
}
// Each group is of size n / num_groups.
//
// - group_idxs[j] indicates the start of group j + 1 (i.e. is the inclusive
// scan of the group lengths)
//
// - The first group always starts at index zero, so we do not store it.
//
// - The group_idxs[num_groups - 1] should always equal n.
if (blockIdx.y == 0 && threadIdx.y == 0) {
const int g_stride = blockDim.x * gridDim.x;
for (int idx_g = blockIdx.x * blockDim.x + threadIdx.x; idx_g < num_groups; idx_g += g_stride) {
group_idxs(idx_g) = (idx_g + 1) * (n / num_groups);
}
group_idxs(num_groups - 1) = n;
}
}
template <typename T>
struct masked_l2_nn : public fixture {
using DataT = T;
using IdxT = int;
using OutT = raft::KeyValuePair<IdxT, DataT>;
using RedOpT = cuvs::distance::MinAndDistanceReduceOp<int, DataT>;
using PairRedOpT = cuvs::distance::KVPMinReduce<int, DataT>;
using ParamT = cuvs::distance::masked_l2_nn_params<RedOpT, PairRedOpT>;
// Parameters
Params params;
// Data
raft::device_vector<OutT, IdxT> out;
raft::device_matrix<T, IdxT> x, y;
raft::device_vector<DataT, IdxT> xn, yn;
raft::device_matrix<bool, IdxT> adj;
raft::device_vector<IdxT, IdxT> group_idxs;
masked_l2_nn(const Params& p)
: params(p),
out{raft::make_device_vector<OutT, IdxT>(handle, p.m)},
x{raft::make_device_matrix<DataT, IdxT>(handle, p.m, p.k)},
y{raft::make_device_matrix<DataT, IdxT>(handle, p.n, p.k)},
xn{raft::make_device_vector<DataT, IdxT>(handle, p.m)},
yn{raft::make_device_vector<DataT, IdxT>(handle, p.n)},
adj{raft::make_device_matrix<bool, IdxT>(handle, p.m, p.num_groups)},
group_idxs{raft::make_device_vector<IdxT, IdxT>(handle, p.num_groups)}
{
raft::random::RngState r(123456ULL);
uniform(handle, r, x.data_handle(), p.m * p.k, T(-1.0), T(1.0));
uniform(handle, r, y.data_handle(), p.n * p.k, T(-1.0), T(1.0));
raft::linalg::rowNorm(
xn.data_handle(), x.data_handle(), p.k, p.m, raft::linalg::L2Norm, true, stream);
raft::linalg::rowNorm(
yn.data_handle(), y.data_handle(), p.k, p.n, raft::linalg::L2Norm, true, stream);
cuvs::distance::initialize<T, raft::KeyValuePair<int, T>, int>(
handle, out.data_handle(), p.m, std::numeric_limits<T>::max(), RedOpT{});
dim3 block(32, 32);
dim3 grid(10, 10);
init_adj<<<grid, block, 0, stream>>>(p.pattern, p.n, adj.view(), group_idxs.view());
RAFT_CUDA_TRY(cudaGetLastError());
}
void run_benchmark(::benchmark::State& state) override
{
bool init_out = true;
bool sqrt = false;
ParamT masked_l2_params{RedOpT{}, PairRedOpT{}, sqrt, init_out};
loop_on_state(state, [this, masked_l2_params]() {
// It is sufficient to only benchmark the L2-squared metric
cuvs::distance::masked_l2_nn<DataT, OutT, IdxT>(handle,
masked_l2_params,
x.view(),
y.view(),
xn.view(),
yn.view(),
adj.view(),
group_idxs.view(),
out.view());
});
// Virtual flop count if no skipping had occurred.
size_t virtual_flops = size_t(2) * size_t(params.m) * size_t(params.n) * size_t(params.k);
int64_t read_elts = params.n * params.k + params.m * params.k;
int64_t write_elts = params.m;
// Virtual min flops is the number of flops that would have been executed if
// the algorithm had actually skipped each computation that it could have
// skipped.
size_t virtual_min_flops = 0;
switch (params.pattern) {
case checkerboard:
case checkerboard_4:
case checkerboard_64: virtual_min_flops = virtual_flops / 2; break;
case all_true: virtual_min_flops = virtual_flops; break;
case all_false: virtual_min_flops = 0; break;
default: assert(false && "unknown pattern");
}
// VFLOP/s is the "virtual" flop count that would have executed if there was
// no adjacency pattern. This is useful for comparing to fusedL2NN
state.counters["VFLOP/s"] = benchmark::Counter(virtual_flops,
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1000);
// Virtual min flops is the number of flops that would have been executed if
// the algorithm had actually skipped each computation that it could have
// skipped.
state.counters["VminFLOP/s"] = benchmark::Counter(virtual_min_flops,
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1000);
state.counters["BW Wr"] = benchmark::Counter(write_elts * sizeof(OutT),
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1000);
state.counters["BW Rd"] = benchmark::Counter(read_elts * sizeof(DataT),
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1000);
state.counters["m"] = benchmark::Counter(params.m);
state.counters["n"] = benchmark::Counter(params.n);
state.counters["k"] = benchmark::Counter(params.k);
state.counters["num_groups"] = benchmark::Counter(params.num_groups);
state.counters["group size"] = benchmark::Counter(params.n / params.num_groups);
state.counters["Pat"] = benchmark::Counter(static_cast<int>(params.pattern));
state.counters["SM count"] = raft::getMultiProcessorCount();
}
};
const std::vector<Params> masked_l2_nn_input_vecs = {
// Very fat matrices...
{32, 16384, 16384, 32, AdjacencyPattern::checkerboard},
{64, 16384, 16384, 32, AdjacencyPattern::checkerboard},
{128, 16384, 16384, 32, AdjacencyPattern::checkerboard},
{256, 16384, 16384, 32, AdjacencyPattern::checkerboard},
{512, 16384, 16384, 32, AdjacencyPattern::checkerboard},
{1024, 16384, 16384, 32, AdjacencyPattern::checkerboard},
{16384, 32, 16384, 32, AdjacencyPattern::checkerboard},
{16384, 64, 16384, 32, AdjacencyPattern::checkerboard},
{16384, 128, 16384, 32, AdjacencyPattern::checkerboard},
{16384, 256, 16384, 32, AdjacencyPattern::checkerboard},
{16384, 512, 16384, 32, AdjacencyPattern::checkerboard},
{16384, 1024, 16384, 32, AdjacencyPattern::checkerboard},
// Representative matrices...
{16384, 16384, 32, 32, AdjacencyPattern::checkerboard},
{16384, 16384, 64, 32, AdjacencyPattern::checkerboard},
{16384, 16384, 128, 32, AdjacencyPattern::checkerboard},
{16384, 16384, 256, 32, AdjacencyPattern::checkerboard},
{16384, 16384, 512, 32, AdjacencyPattern::checkerboard},
{16384, 16384, 1024, 32, AdjacencyPattern::checkerboard},
{16384, 16384, 16384, 32, AdjacencyPattern::checkerboard},
{16384, 16384, 32, 32, AdjacencyPattern::checkerboard_4},
{16384, 16384, 64, 32, AdjacencyPattern::checkerboard_4},
{16384, 16384, 128, 32, AdjacencyPattern::checkerboard_4},
{16384, 16384, 256, 32, AdjacencyPattern::checkerboard_4},
{16384, 16384, 512, 32, AdjacencyPattern::checkerboard_4},
{16384, 16384, 1024, 32, AdjacencyPattern::checkerboard_4},
{16384, 16384, 16384, 32, AdjacencyPattern::checkerboard_4},
{16384, 16384, 32, 32, AdjacencyPattern::checkerboard_64},
{16384, 16384, 64, 32, AdjacencyPattern::checkerboard_64},
{16384, 16384, 128, 32, AdjacencyPattern::checkerboard_64},
{16384, 16384, 256, 32, AdjacencyPattern::checkerboard_64},
{16384, 16384, 512, 32, AdjacencyPattern::checkerboard_64},
{16384, 16384, 1024, 32, AdjacencyPattern::checkerboard_64},
{16384, 16384, 16384, 32, AdjacencyPattern::checkerboard_64},
{16384, 16384, 32, 32, AdjacencyPattern::all_true},
{16384, 16384, 64, 32, AdjacencyPattern::all_true},
{16384, 16384, 128, 32, AdjacencyPattern::all_true},
{16384, 16384, 256, 32, AdjacencyPattern::all_true},
{16384, 16384, 512, 32, AdjacencyPattern::all_true},
{16384, 16384, 1024, 32, AdjacencyPattern::all_true},
{16384, 16384, 16384, 32, AdjacencyPattern::all_true},
{16384, 16384, 32, 32, AdjacencyPattern::all_false},
{16384, 16384, 64, 32, AdjacencyPattern::all_false},
{16384, 16384, 128, 32, AdjacencyPattern::all_false},
{16384, 16384, 256, 32, AdjacencyPattern::all_false},
{16384, 16384, 512, 32, AdjacencyPattern::all_false},
{16384, 16384, 1024, 32, AdjacencyPattern::all_false},
{16384, 16384, 16384, 32, AdjacencyPattern::all_false},
};
RAFT_BENCH_REGISTER(masked_l2_nn<float>, "", masked_l2_nn_input_vecs);
// We don't benchmark double to keep compile times in check when not using the
// distance library.
} // namespace cuvs::bench::distance::masked_nn
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/distance/distance_exp_l2.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "distance_common.cuh"
namespace cuvs::bench::distance {
DIST_BENCH_REGISTER(DistanceL2Sq, cuvs::distance::DistanceType::L2Expanded);
DIST_BENCH_REGISTER(DistanceL2Sqrt, cuvs::distance::DistanceType::L2SqrtExpanded);
} // namespace cuvs::bench::distance
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/distance/kernels.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <cuvs/distance/distance_types.hpp>
#include <cuvs/distance/kernels.cuh>
#include <memory>
#include <raft/core/device_resources.hpp>
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/random/rng.cuh>
#include <sstream>
#include <string>
#include <vector>
namespace cuvs::bench::distance::kernels {
using namespace cuvs::distance::kernels;
struct GramTestParams {
int m; // m parameter of the GEMM
int k; // k parameter of the GEMM
int n; // n parameter of the GEMM
KernelParams kernel_params;
bool is_row_major;
}; // struct GramTestParams
template <typename T>
struct GramMatrix : public fixture {
GramMatrix(const GramTestParams& p)
: params(p), handle(stream), A(0, stream), B(0, stream), C(0, stream)
{
kernel = std::unique_ptr<GramMatrixBase<T>>(
KernelFactory<T>::create(p.kernel_params, resource::get_cublas_handle(handle)));
A.resize(params.m * params.k, stream);
B.resize(params.k * params.n, stream);
C.resize(params.m * params.n, stream);
raft::random::RngState rng(123456ULL);
raft::random::uniform(handle, rng, A.data(), params.m * params.k, T(-1.0), T(1.0));
raft::random::uniform(handle, rng, B.data(), params.k * params.n, T(-1.0), T(1.0));
}
~GramMatrix()
{
A.release();
B.release();
C.release();
}
void run_benchmark(::benchmark::State& state) override
{
if (!this->kernel) { state.SkipWithError("Kernel matrix is not initialized"); }
loop_on_state(state, [this]() {
(*this->kernel)(A.data(),
this->params.m,
this->params.k,
B.data(),
this->params.n,
C.data(),
this->params.is_row_major,
this->stream);
});
}
private:
const raft::device_resources handle;
std::unique_ptr<GramMatrixBase<T>> kernel;
GramTestParams params;
rmm::device_uvector<T> A; // input matrix A, size [m * k]
rmm::device_uvector<T> B; // input matrix B, size [n * k]
rmm::device_uvector<T> C; // output matrix C, size [m*n]
};
static std::vector<GramTestParams> getInputs()
{
std::vector<GramTestParams> param_vec;
std::vector<KernelParams> kernel_params{KernelParams{LINEAR, 3, 1, 0},
KernelParams{POLYNOMIAL, 2, 1.3, 1},
KernelParams{TANH, 2, 0.5, 2.4},
KernelParams{RBF, 2, 0.5, 0}};
struct TestSize {
int m;
int k;
int n;
};
std::vector<TestSize> data_size{{4096, 10, 1024},
{4096, 100, 1024},
{4096, 1000, 1024},
{4096, 10000, 1024},
{100000, 10, 1024},
{100000, 100, 1024},
{100000, 1000, 1024}};
param_vec.reserve(kernel_params.size() * data_size.size());
for (TestSize s : data_size) {
for (auto kernel : kernel_params) {
for (bool row_major : {false, true}) {
param_vec.push_back(GramTestParams{s.m, s.k, s.n, kernel, row_major});
}
}
}
return param_vec;
}
RAFT_BENCH_REGISTER(GramMatrix<float>, "", getInputs());
RAFT_BENCH_REGISTER(GramMatrix<double>, "", getInputs());
} // namespace cuvs::bench::distance::kernels
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/distance/distance_common.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <cuvs/distance/distance.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace cuvs::bench::distance {
struct distance_params {
int m, n, k;
bool isRowMajor;
}; // struct distance_params
template <typename T, cuvs::distance::DistanceType DType>
struct distance : public fixture {
distance(const distance_params& p)
: params(p),
x(p.m * p.k, stream),
y(p.n * p.k, stream),
out(p.m * p.n, stream),
workspace(0, stream)
{
RAFT_CUDA_TRY(cudaMemsetAsync(x.data(), 0, x.size() * sizeof(T), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(y.data(), 0, y.size() * sizeof(T), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(out.data(), 0, out.size() * sizeof(T), stream));
worksize = cuvs::distance::getWorkspaceSize<DType, T, T, T>(
x.data(), y.data(), params.m, params.n, params.k);
workspace.resize(worksize, stream);
}
void run_benchmark(::benchmark::State& state) override
{
loop_on_state(state, [this]() {
cuvs::distance::distance<DType, T, T, T>(handle,
x.data(),
y.data(),
out.data(),
params.m,
params.n,
params.k,
(void*)workspace.data(),
worksize,
params.isRowMajor);
});
}
private:
distance_params params;
rmm::device_uvector<T> x, y, out;
rmm::device_uvector<char> workspace;
size_t worksize;
}; // struct Distance
const std::vector<distance_params> dist_input_vecs{
{32, 16384, 16384, true}, {64, 16384, 16384, true}, {128, 16384, 16384, true},
{256, 16384, 16384, true}, {512, 16384, 16384, true}, {1024, 16384, 16384, true},
{16384, 32, 16384, true}, {16384, 64, 16384, true}, {16384, 128, 16384, true},
{16384, 256, 16384, true}, {16384, 512, 16384, true}, {16384, 1024, 16384, true},
{16384, 16384, 32, true}, {16384, 16384, 64, true}, {16384, 16384, 128, true},
{16384, 16384, 256, true}, {16384, 16384, 512, true}, {16384, 16384, 1024, true},
{16384, 16384, 16384, true}, {32, 16384, 16384, false}, {64, 16384, 16384, false},
{128, 16384, 16384, false}, {256, 16384, 16384, false}, {512, 16384, 16384, false},
{1024, 16384, 16384, false}, {16384, 32, 16384, false}, {16384, 64, 16384, false},
{16384, 128, 16384, false}, {16384, 256, 16384, false}, {16384, 512, 16384, false},
{16384, 1024, 16384, false}, {16384, 16384, 32, false}, {16384, 16384, 64, false},
{16384, 16384, 128, false}, {16384, 16384, 256, false}, {16384, 16384, 512, false},
{16384, 16384, 1024, false}, {16384, 16384, 16384, false}
};
#define DIST_BENCH_REGISTER(Name, Metric) \
using Name##F = distance<float, Metric>; \
RAFT_BENCH_REGISTER(Name##F, "", dist_input_vecs); \
using Name##D = distance<double, Metric>; \
RAFT_BENCH_REGISTER(Name##D, "", dist_input_vecs);
} // namespace cuvs::bench::distance
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/distance/distance_cosine.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "distance_common.cuh"
namespace cuvs::bench::distance {
DIST_BENCH_REGISTER(DistanceCosine, cuvs::distance::DistanceType::CosineExpanded);
} // namespace cuvs::bench::distance
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/distance/fused_l2_nn.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <cuvs/distance/fused_l2_nn.cuh>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/norm.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace cuvs::bench::distance {
struct fusedl2nn_inputs {
int64_t m, n, k;
}; // struct fusedl2nn_inputs
inline auto operator<<(std::ostream& os, const fusedl2nn_inputs& p) -> std::ostream&
{
os << p.m << "#" << p.n << "#" << p.k;
return os;
}
template <typename DataT, typename IdxT, typename OutT>
struct fusedl2nn : public fixture {
fusedl2nn(const fusedl2nn_inputs& p)
: params(p),
workspace(this->handle),
x(this->handle),
y(this->handle),
x_norm(this->handle),
y_norm(this->handle),
out(this->handle)
{
}
void allocate_data(const ::benchmark::State& state) override
{
x = raft::make_device_matrix<DataT, IdxT>(handle, params.m, params.k);
y = raft::make_device_matrix<DataT, IdxT>(handle, params.n, params.k);
x_norm = raft::make_device_vector<DataT, IdxT>(handle, params.m);
y_norm = raft::make_device_vector<DataT, IdxT>(handle, params.n);
out = raft::make_device_vector<OutT, IdxT>(handle, params.m);
raft::random::RngState rng{1234};
raft::random::uniform(
handle, rng, x.data_handle(), params.m * params.k, (DataT)-1.0, (DataT)1.0);
raft::random::uniform(
handle, rng, y.data_handle(), params.n * params.k, (DataT)-1.0, (DataT)1.0);
// Pre-compute norms
raft::linalg::rowNorm(x_norm.data_handle(),
x.data_handle(),
params.k,
params.m,
raft::linalg::L2Norm,
true,
stream);
raft::linalg::rowNorm(y_norm.data_handle(),
y.data_handle(),
params.k,
params.n,
raft::linalg::L2Norm,
true,
stream);
resource::sync_stream(handle, stream);
}
void allocate_temp_buffers(const ::benchmark::State& state) override
{
workspace = raft::make_device_vector<char, IdxT>(handle, params.m * sizeof(IdxT));
}
void run_benchmark(::benchmark::State& state) override
{
std::ostringstream label_stream;
label_stream << params;
state.SetLabel(label_stream.str());
loop_on_state(state, [this]() {
cuvs::distance::fusedL2NNMinReduce<DataT, OutT, IdxT>(out.data_handle(),
x.data_handle(),
y.data_handle(),
x_norm.data_handle(),
y_norm.data_handle(),
static_cast<IdxT>(params.m),
static_cast<IdxT>(params.n),
static_cast<IdxT>(params.k),
(void*)workspace.data_handle(),
false,
true,
stream);
});
int64_t num_flops = 2 * params.m * params.n * params.k;
int64_t read_elts = params.n * params.k + params.m * params.k;
int64_t write_elts = params.m;
state.counters["FLOP/s"] = benchmark::Counter(
num_flops, benchmark::Counter::kIsIterationInvariantRate, benchmark::Counter::OneK::kIs1000);
state.counters["BW Wr"] = benchmark::Counter(write_elts * sizeof(OutT),
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1000);
state.counters["BW Rd"] = benchmark::Counter(read_elts * sizeof(DataT),
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1000);
}
private:
fusedl2nn_inputs params;
raft::device_matrix<DataT, IdxT> x, y;
raft::device_vector<DataT, IdxT> x_norm, y_norm;
raft::device_vector<OutT, IdxT> out;
raft::device_vector<char, IdxT> workspace;
}; // struct fusedl2nn
template <typename IdxT>
std::vector<fusedl2nn_inputs> getFusedL2NNInputs()
{
std::vector<fusedl2nn_inputs> inputs;
std::vector<int64_t> m_list = {100000, 1000000};
if constexpr (sizeof(IdxT) == 8) { m_list.push_back(10000000); }
std::vector<int64_t> n_list = {100, 1000, 10000};
std::vector<int64_t> k_list = {64, 128, 256};
for (auto m : m_list) {
for (auto n : n_list) {
for (auto k : k_list) {
inputs.push_back({m, n, k});
}
}
}
return inputs;
}
#define FUSEDL2NN_BENCH(DataT, IdxT, OutT) \
RAFT_BENCH_REGISTER((fusedl2nn<DataT, IdxT, RAFT_DEPAREN(OutT)>), "", getFusedL2NNInputs<IdxT>())
FUSEDL2NN_BENCH(float, int, float);
FUSEDL2NN_BENCH(double, int, double);
FUSEDL2NN_BENCH(float, int, (raft::KeyValuePair<int, float>));
FUSEDL2NN_BENCH(double, int, (raft::KeyValuePair<int, double>));
FUSEDL2NN_BENCH(float, int64_t, float);
FUSEDL2NN_BENCH(double, int64_t, double);
FUSEDL2NN_BENCH(float, int64_t, (raft::KeyValuePair<int64_t, float>));
FUSEDL2NN_BENCH(double, int64_t, (raft::KeyValuePair<int64_t, double>));
} // namespace cuvs::bench::distance
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/distance/distance_unexp_l2.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "distance_common.cuh"
namespace cuvs::bench::distance {
DIST_BENCH_REGISTER(DistanceUnexpL2Sq, cuvs::distance::DistanceType::L2Unexpanded);
DIST_BENCH_REGISTER(DistanceUnexpL2Sqrt, cuvs::distance::DistanceType::L2SqrtUnexpanded);
} // namespace cuvs::bench::distance
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro/distance | rapidsai_public_repos/cuvs/cpp/bench/micro/distance/tune_pairwise/kernel.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel.cuh"
#include <cuvs/distance/detail/pairwise_matrix/kernel_sm60.cuh> // pairwise_matrix_sm60_wrapper
#include <raft/linalg/contractions.cuh> // raft::linalg::Policy4x4
#include <raft/util/arch.cuh> // raft::util::arch::SM_compute_arch
namespace cuvs::bench::distance::tune {
// Distance op
using OpT = cuvs::distance::detail::ops::lp_unexp_distance_op<DataT, AccT, IdxT>;
constexpr float metric_arg = 2.0;
OpT distance_op{metric_arg};
// Kernel policy
constexpr int vec_len = 1;
using Policy = typename raft::linalg::Policy4x4<DataT, vec_len>::Policy;
// Architecture
namespace arch = raft::util::arch;
constexpr auto sm_compat_range = arch::SM_range(arch::SM_min(), arch::SM_future());
void launch_kernel(pairwise_matrix_params params, dim3 grid, cudaStream_t stream)
{
dim3 block(Policy::Nthreads);
int smem_size = OpT::shared_mem_size<Policy>();
// Obtain function pointer to kernel
auto kernel = cuvs::distance::detail::pairwise_matrix_kernel<Policy,
row_major,
decltype(sm_compat_range),
OpT,
IdxT,
DataT,
OutT,
FinOpT>;
kernel<<<grid, block, smem_size, stream>>>(distance_op, params);
RAFT_CUDA_TRY(cudaGetLastError());
}
void get_block_size(int& m, int& n, int& k)
{
m = Policy::Mblk;
n = Policy::Nblk;
k = Policy::Kblk;
}
void* get_kernel_ptr()
{
auto kernel = cuvs::distance::detail::pairwise_matrix_kernel<Policy,
row_major,
decltype(sm_compat_range),
OpT,
IdxT,
DataT,
OutT,
FinOpT>;
return reinterpret_cast<void*>(kernel);
}
int get_max_occupancy()
{
void* kernel_ptr = get_kernel_ptr();
int max_occupancy;
int smem_size = OpT::shared_mem_size<Policy>();
RAFT_CUDA_TRY(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_occupancy, kernel_ptr, Policy::Nthreads, smem_size));
return max_occupancy;
}
} // namespace cuvs::bench::distance::tune
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro/distance | rapidsai_public_repos/cuvs/cpp/bench/micro/distance/tune_pairwise/bench.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Tuning benchmarks.
//
// Goals:
//
// 1. Fast compile times to maintain iteration speed.
// 2. Create benchmarks that can inform the design of the kernels.
//
// Non-goals:
//
// 1. Measure every distance operation. Instead measures just one distance
// operation at the same time.
// 2. Be useful for finding performance regressions. This is handled by the
// normal benchmarks.
//
// So far, both goals are partly achieved.
//
// RE (1), COMPILE TIMES: kernel.cu is fast to compile. This file is not.
// When the internals of a pairwise distance kernel is changed, this file is not
// recompiled.
//
// RE 2, benchmarks with intent: this file contains a benchmark to check the
// maximal throughput of a kernel. Measuring other things, like performance on
// skinny or wide matrices is not yet implemented.
#include "kernel.cuh" // launch_kernel
#include <algorithm> // std::min
#include <common/benchmark.hpp> // RAFT_BENCH_REGISTER
#include <cuvs/distance/detail/pairwise_matrix/params.cuh> // pairwise_matrix_params
#include <rmm/device_uvector.hpp> // rmm::device_uvector
#include <vector> // std::vector
namespace cuvs::bench::distance::tune {
// Max throughput benchmark.
//
// Goal: Measure the maximum distances/sec that can be computed.
//
// To achieve this, we make sure that:
//
// - Input data size is a multiple of the block tile size.
//
// - Perfect distribution of work between SMs, i.e. the number of block tiles is
// a large multiple (num_waves) of the number of blocks (#SMs * occupancy).
//
// - Multiple iterations over Kblk are executed (num_k_iters).
struct throughput_param {
int num_waves;
int occupancy;
int num_k_iters;
};
const std::vector<throughput_param> throughput_params{
// 32 waves, requested occupancy of 4, and 32 k iterations typically achieves
// maximum throughput. No need to pick higher values.
{32, 4, 32},
};
struct throughput_bench : public fixture {
const throughput_param p;
throughput_bench(const throughput_param& p_) : p(p_) {}
void run_benchmark(::benchmark::State& state) override
{
// Get block size:
int block_m, block_n, block_k;
get_block_size(block_m, block_n, block_k);
// Determine number of blocks that will be launched. This informs the size
// of the inputs as well as the grid size.
const int num_sms = raft::getMultiProcessorCount();
const int max_occupancy = get_max_occupancy();
const int occupancy = std::min(p.occupancy, max_occupancy);
const int num_blocks = occupancy * num_sms;
dim3 grid(num_blocks);
// Create input sizes that are a multiple of the block tile size.
size_t m = block_m;
size_t n = block_n * p.num_waves * num_blocks;
size_t k = block_k * p.num_k_iters;
// DataT, OutT, IdxT, etc, are defined in tuned_kernel.cuh
rmm::device_uvector<DataT> x_vec(m * k, stream);
rmm::device_uvector<DataT> y_vec(n * k, stream);
rmm::device_uvector<DataT> x_norm_vec(m, stream);
rmm::device_uvector<DataT> y_norm_vec(n, stream);
rmm::device_uvector<OutT> out_vec(m * n, stream);
auto x = x_vec.data();
auto y = y_vec.data();
auto x_norm = x_norm_vec.data();
auto y_norm = y_norm_vec.data();
auto out = out_vec.data();
FinOpT fin_op{};
// Create kernel parameter struct. Flip x and y if column major.
IdxT ldx = row_major ? k : m;
IdxT ldy = row_major ? k : n;
IdxT ld_out = row_major ? n : m;
// Template parameters of pairwise_matrix_params are defined in kernel.cuh
pairwise_matrix_params kparams{
IdxT(m), IdxT(n), IdxT(k), ldx, ldy, ld_out, x, y, x_norm, y_norm, out, fin_op, row_major};
// Run benchmark
loop_on_state(state, [&]() { launch_kernel(kparams, grid, stream); });
// Report metrics. We don't report flop/s because we do not know for each
// distance operation how many flops it costs. For L2_unexp and l1, we can
// double this number to get the flop/s. For l2 expanded, core_ops/s should
// equal flop/s (modulo the sqrt and subtracting from the norm).
size_t num_core_ops = m * n * k;
size_t read_elts = n * k + m * k;
size_t write_elts = m * n;
state.counters["m"] = benchmark::Counter(m);
state.counters["n"] = benchmark::Counter(n);
state.counters["k"] = benchmark::Counter(k);
state.counters["occupancy"] = benchmark::Counter(occupancy);
state.counters["# waves"] = benchmark::Counter(p.num_waves);
state.counters["# k iters"] = benchmark::Counter(p.num_k_iters);
state.counters["core_ops/s"] = benchmark::Counter(num_core_ops,
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1000);
state.counters["BW"] = benchmark::Counter(write_elts * sizeof(OutT) + read_elts * sizeof(DataT),
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1000);
}
};
RAFT_BENCH_REGISTER(throughput_bench, "", throughput_params);
} // namespace cuvs::bench::distance::tune
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro/distance | rapidsai_public_repos/cuvs/cpp/bench/micro/distance/tune_pairwise/kernel.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/distance/detail/distance_ops/all_ops.cuh> // lp_unexp_distance_op
#include <cuvs/distance/detail/pairwise_matrix/params.cuh> // pairwise_matrix_params
namespace cuvs::bench::distance::tune {
// Launch one specific kernel with the following template parameters
constexpr bool row_major = true;
using DataT = float;
using AccT = float;
using OutT = DataT;
using IdxT = int;
using FinOpT = raft::identity_op;
using pairwise_matrix_params =
cuvs::distance::detail::pairwise_matrix_params<IdxT, DataT, OutT, FinOpT>;
// Launches kernel
void launch_kernel(pairwise_matrix_params, dim3, cudaStream_t);
// Describes the block size that is decided by the policy
void get_block_size(int& m, int& n, int& k);
int get_max_occupancy();
} // namespace cuvs::bench::distance::tune
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors/knn.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <common/benchmark.hpp>
#include <raft/core/resource/device_id.hpp>
#include <raft/random/rng.cuh>
#include <cuvs/neighbors/ivf_flat.cuh>
#include <cuvs/neighbors/ivf_pq.cuh>
#include <cuvs/neighbors/sample_filter.cuh>
#include <cuvs/spatial/knn/knn.cuh>
#include <raft/core/bitset.cuh>
#include <raft/util/itertools.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/host/new_delete_resource.hpp>
#include <rmm/mr/host/pinned_memory_resource.hpp>
#include <thrust/sequence.h>
#include <optional>
namespace cuvs::bench::spatial {
struct params {
/** Size of the dataset. */
size_t n_samples;
/** Number of dimensions in the dataset. */
size_t n_dims;
/** The batch size -- number of KNN searches. */
size_t n_queries;
/** Number of nearest neighbours to find for every probe. */
size_t k;
/** Ratio of removed indices. */
double removed_ratio;
};
inline auto operator<<(std::ostream& os, const params& p) -> std::ostream&
{
os << p.n_samples << "#" << p.n_dims << "#" << p.n_queries << "#" << p.k << "#"
<< p.removed_ratio;
return os;
}
enum class TransferStrategy { NO_COPY, COPY_PLAIN, COPY_PINNED, MAP_PINNED, MANAGED }; // NOLINT
enum class Scope { BUILD, SEARCH, BUILD_SEARCH }; // NOLINT
inline auto operator<<(std::ostream& os, const TransferStrategy& ts) -> std::ostream&
{
switch (ts) {
case TransferStrategy::NO_COPY: os << "NO_COPY"; break;
case TransferStrategy::COPY_PLAIN: os << "COPY_PLAIN"; break;
case TransferStrategy::COPY_PINNED: os << "COPY_PINNED"; break;
case TransferStrategy::MAP_PINNED: os << "MAP_PINNED"; break;
case TransferStrategy::MANAGED: os << "MANAGED"; break;
default: os << "UNKNOWN";
}
return os;
}
inline auto operator<<(std::ostream& os, const Scope& s) -> std::ostream&
{
switch (s) {
case Scope::BUILD: os << "BUILD"; break;
case Scope::SEARCH: os << "SEARCH"; break;
case Scope::BUILD_SEARCH: os << "BUILD_SEARCH"; break;
default: os << "UNKNOWN";
}
return os;
}
struct device_resource {
public:
explicit device_resource(bool managed) : managed_(managed)
{
if (managed_) {
res_ = new rmm::mr::managed_memory_resource();
} else {
res_ = rmm::mr::get_current_device_resource();
}
}
~device_resource()
{
if (managed_) { delete res_; }
}
[[nodiscard]] auto get() const -> rmm::mr::device_memory_resource* { return res_; }
private:
const bool managed_;
rmm::mr::device_memory_resource* res_;
};
template <typename T>
struct host_uvector {
host_uvector(size_t n, bool pinned) : n_(n)
{
if (pinned) {
res_ = new rmm::mr::pinned_memory_resource();
} else {
res_ = new rmm::mr::new_delete_resource();
}
arr_ = static_cast<T*>(res_->allocate(n_ * sizeof(T)));
}
~host_uvector() noexcept
{
res_->deallocate(arr_, n_ * sizeof(T));
delete res_;
}
auto data() -> T* { return arr_; }
[[nodiscard]] auto size() const -> size_t { return n_; }
private:
rmm::mr::host_memory_resource* res_;
size_t n_;
T* arr_;
};
template <typename ValT, typename IdxT>
struct ivf_flat_knn {
using dist_t = float;
std::optional<const cuvs::neighbors::ivf_flat::index<ValT, IdxT>> index;
cuvs::neighbors::ivf_flat::index_params index_params;
cuvs::neighbors::ivf_flat::search_params search_params;
params ps;
ivf_flat_knn(const raft::device_resources& handle, const params& ps, const ValT* data) : ps(ps)
{
index_params.n_lists = 4096;
index_params.metric = cuvs::distance::DistanceType::L2Expanded;
index.emplace(cuvs::neighbors::ivf_flat::build(
handle, index_params, data, IdxT(ps.n_samples), uint32_t(ps.n_dims)));
}
void search(const raft::device_resources& handle,
const ValT* search_items,
dist_t* out_dists,
IdxT* out_idxs)
{
search_params.n_probes = 20;
cuvs::neighbors::ivf_flat::search(
handle, search_params, *index, search_items, ps.n_queries, ps.k, out_idxs, out_dists);
}
};
template <typename ValT, typename IdxT>
struct ivf_pq_knn {
using dist_t = float;
std::optional<const cuvs::neighbors::ivf_pq::index<IdxT>> index;
cuvs::neighbors::ivf_pq::index_params index_params;
cuvs::neighbors::ivf_pq::search_params search_params;
params ps;
ivf_pq_knn(const raft::device_resources& handle, const params& ps, const ValT* data) : ps(ps)
{
index_params.n_lists = 4096;
index_params.metric = cuvs::distance::DistanceType::L2Expanded;
auto data_view = raft::make_device_matrix_view<const ValT, IdxT>(data, ps.n_samples, ps.n_dims);
index.emplace(cuvs::neighbors::ivf_pq::build(handle, index_params, data_view));
}
void search(const raft::device_resources& handle,
const ValT* search_items,
dist_t* out_dists,
IdxT* out_idxs)
{
search_params.n_probes = 20;
auto queries_view =
raft::make_device_matrix_view<const ValT, uint32_t>(search_items, ps.n_queries, ps.n_dims);
auto idxs_view = raft::make_device_matrix_view<IdxT, uint32_t>(out_idxs, ps.n_queries, ps.k);
auto dists_view =
raft::make_device_matrix_view<dist_t, uint32_t>(out_dists, ps.n_queries, ps.k);
cuvs::neighbors::ivf_pq::search(
handle, search_params, *index, queries_view, idxs_view, dists_view);
}
};
template <typename ValT, typename IdxT>
struct brute_force_knn {
using dist_t = ValT;
ValT* index;
params ps;
brute_force_knn(const raft::device_resources& handle, const params& ps, const ValT* data)
: index(const_cast<ValT*>(data)), ps(ps)
{
}
void search(const raft::device_resources& handle,
const ValT* search_items,
dist_t* out_dists,
IdxT* out_idxs)
{
std::vector<ValT*> input{index};
std::vector<size_t> sizes{ps.n_samples};
cuvs::spatial::knn::brute_force_knn<IdxT, ValT, size_t>(handle,
input,
sizes,
ps.n_dims,
const_cast<ValT*>(search_items),
ps.n_queries,
out_idxs,
out_dists,
ps.k);
}
};
template <typename ValT, typename IdxT>
struct ivf_flat_filter_knn {
using dist_t = float;
std::optional<const cuvs::neighbors::ivf_flat::index<ValT, IdxT>> index;
cuvs::neighbors::ivf_flat::index_params index_params;
cuvs::neighbors::ivf_flat::search_params search_params;
raft::core::bitset<std::uint32_t, IdxT> removed_indices_bitset_;
params ps;
ivf_flat_filter_knn(const raft::device_resources& handle, const params& ps, const ValT* data)
: ps(ps), removed_indices_bitset_(handle, ps.n_samples)
{
index_params.n_lists = 4096;
index_params.metric = cuvs::distance::DistanceType::L2Expanded;
index.emplace(cuvs::neighbors::ivf_flat::build(
handle, index_params, data, IdxT(ps.n_samples), uint32_t(ps.n_dims)));
auto removed_indices =
raft::make_device_vector<IdxT, int64_t>(handle, ps.removed_ratio * ps.n_samples);
thrust::sequence(
resource::get_thrust_policy(handle),
thrust::device_pointer_cast(removed_indices.data_handle()),
thrust::device_pointer_cast(removed_indices.data_handle() + removed_indices.extent(0)));
removed_indices_bitset_.set(handle, removed_indices.view());
}
void search(const raft::device_resources& handle,
const ValT* search_items,
dist_t* out_dists,
IdxT* out_idxs)
{
search_params.n_probes = 20;
auto queries_view =
raft::make_device_matrix_view<const ValT, IdxT>(search_items, ps.n_queries, ps.n_dims);
auto neighbors_view = raft::make_device_matrix_view<IdxT, IdxT>(out_idxs, ps.n_queries, ps.k);
auto distance_view = raft::make_device_matrix_view<dist_t, IdxT>(out_dists, ps.n_queries, ps.k);
auto filter = cuvs::neighbors::filtering::bitset_filter(removed_indices_bitset_.view());
if (ps.removed_ratio > 0) {
cuvs::neighbors::ivf_flat::search_with_filtering(
handle, search_params, *index, queries_view, neighbors_view, distance_view, filter);
} else {
cuvs::neighbors::ivf_flat::search(
handle, search_params, *index, queries_view, neighbors_view, distance_view);
}
}
};
template <typename ValT, typename IdxT>
struct ivf_pq_filter_knn {
using dist_t = float;
std::optional<const cuvs::neighbors::ivf_pq::index<IdxT>> index;
cuvs::neighbors::ivf_pq::index_params index_params;
cuvs::neighbors::ivf_pq::search_params search_params;
raft::core::bitset<std::uint32_t, IdxT> removed_indices_bitset_;
params ps;
ivf_pq_filter_knn(const raft::device_resources& handle, const params& ps, const ValT* data)
: ps(ps), removed_indices_bitset_(handle, ps.n_samples)
{
index_params.n_lists = 4096;
index_params.metric = cuvs::distance::DistanceType::L2Expanded;
auto data_view = raft::make_device_matrix_view<const ValT, IdxT>(data, ps.n_samples, ps.n_dims);
index.emplace(cuvs::neighbors::ivf_pq::build(handle, index_params, data_view));
auto removed_indices =
raft::make_device_vector<IdxT, int64_t>(handle, ps.removed_ratio * ps.n_samples);
thrust::sequence(
resource::get_thrust_policy(handle),
thrust::device_pointer_cast(removed_indices.data_handle()),
thrust::device_pointer_cast(removed_indices.data_handle() + removed_indices.extent(0)));
removed_indices_bitset_.set(handle, removed_indices.view());
}
void search(const raft::device_resources& handle,
const ValT* search_items,
dist_t* out_dists,
IdxT* out_idxs)
{
search_params.n_probes = 20;
auto queries_view =
raft::make_device_matrix_view<const ValT, uint32_t>(search_items, ps.n_queries, ps.n_dims);
auto neighbors_view =
raft::make_device_matrix_view<IdxT, uint32_t>(out_idxs, ps.n_queries, ps.k);
auto distance_view =
raft::make_device_matrix_view<dist_t, uint32_t>(out_dists, ps.n_queries, ps.k);
auto filter = cuvs::neighbors::filtering::bitset_filter(removed_indices_bitset_.view());
if (ps.removed_ratio > 0) {
cuvs::neighbors::ivf_pq::search_with_filtering(
handle, search_params, *index, queries_view, neighbors_view, distance_view, filter);
} else {
cuvs::neighbors::ivf_pq::search(
handle, search_params, *index, queries_view, neighbors_view, distance_view);
}
}
};
template <typename ValT, typename IdxT, typename ImplT>
struct knn : public fixture {
explicit knn(const params& p, const TransferStrategy& strategy, const Scope& scope)
: fixture(true),
params_(p),
strategy_(strategy),
scope_(scope),
dev_mem_res_(strategy == TransferStrategy::MANAGED),
data_host_(0),
search_items_(p.n_queries * p.n_dims, stream),
out_dists_(p.n_queries * p.k, stream),
out_idxs_(p.n_queries * p.k, stream)
{
raft::random::RngState state{42};
gen_data(state, search_items_, search_items_.size(), stream);
try {
size_t total_size = p.n_samples * p.n_dims;
data_host_.resize(total_size);
constexpr size_t kGenMinibatchSize = 1024 * 1024 * 1024;
rmm::device_uvector<ValT> d(std::min(kGenMinibatchSize, total_size), stream);
for (size_t offset = 0; offset < total_size; offset += kGenMinibatchSize) {
size_t actual_size = std::min(total_size - offset, kGenMinibatchSize);
gen_data(state, d, actual_size, stream);
copy(data_host_.data() + offset, d.data(), actual_size, stream);
}
} catch (std::bad_alloc& e) {
data_does_not_fit_ = true;
}
}
template <typename T>
void gen_data(raft::random::RngState& state, // NOLINT
rmm::device_uvector<T>& vec,
size_t n,
rmm::cuda_stream_view stream)
{
constexpr T kRangeMax = std::is_integral_v<T> ? std::numeric_limits<T>::max() : T(1);
constexpr T kRangeMin = std::is_integral_v<T> ? std::numeric_limits<T>::min() : T(-1);
if constexpr (std::is_integral_v<T>) {
raft::random::uniformInt(handle, state, vec.data(), n, kRangeMin, kRangeMax);
} else {
raft::random::uniform(handle, state, vec.data(), n, kRangeMin, kRangeMax);
}
}
void run_benchmark(::benchmark::State& state) override
{
if (data_does_not_fit_) {
state.SkipWithError("The data size is too big to fit into the host memory.");
}
if (scope_ == Scope::SEARCH && strategy_ != TransferStrategy::NO_COPY) {
state.SkipWithError(
"When benchmarking without index building (Scope::SEARCH), the data must be already on the "
"device (TransferStrategy::NO_COPY)");
}
try {
std::ostringstream label_stream;
label_stream << params_ << "#" << strategy_ << "#" << scope_;
state.SetLabel(label_stream.str());
raft::device_resources handle(stream);
std::optional<ImplT> index;
if (scope_ == Scope::SEARCH) { // also implies TransferStrategy::NO_COPY
rmm::device_uvector<ValT> data(data_host_.size(), stream);
copy(data.data(), data_host_.data(), data_host_.size(), stream);
index.emplace(handle, params_, data.data());
stream.synchronize();
}
// benchmark loop
for (auto _ : state) {
// managed or plain device memory initialized anew every time
rmm::device_uvector<ValT> data(data_host_.size(), stream, dev_mem_res_.get());
ValT* data_ptr = data.data();
size_t allocation_size = data_host_.size() * sizeof(ValT);
// Non-benchmarked part: using different methods to copy the data if necessary
switch (strategy_) {
case TransferStrategy::NO_COPY: // copy data to GPU before starting the timer.
copy(data_ptr, data_host_.data(), data_host_.size(), stream);
break;
case TransferStrategy::COPY_PINNED:
RAFT_CUDA_TRY(
cudaHostRegister(data_host_.data(), allocation_size, cudaHostRegisterDefault));
break;
case TransferStrategy::MAP_PINNED:
RAFT_CUDA_TRY(
cudaHostRegister(data_host_.data(), allocation_size, cudaHostRegisterMapped));
RAFT_CUDA_TRY(cudaHostGetDevicePointer(&data_ptr, data_host_.data(), 0));
break;
case TransferStrategy::MANAGED: // sic! using std::memcpy rather than cuda copy
RAFT_CUDA_TRY(cudaMemAdvise(data_ptr,
allocation_size,
cudaMemAdviseSetPreferredLocation,
resource::get_device_id(handle)));
RAFT_CUDA_TRY(cudaMemAdvise(data_ptr,
allocation_size,
cudaMemAdviseSetAccessedBy,
resource::get_device_id(handle)));
RAFT_CUDA_TRY(cudaMemAdvise(data_ptr,
allocation_size,
cudaMemAdviseSetReadMostly,
resource::get_device_id(handle)));
std::memcpy(data_ptr, data_host_.data(), allocation_size);
break;
default: break;
}
flush_L2_cache();
{
// Timer synchronizes the stream, so all prior gpu work should be done before it sets off.
cuda_event_timer timer(state, stream);
switch (strategy_) {
case TransferStrategy::COPY_PLAIN:
case TransferStrategy::COPY_PINNED:
copy(data_ptr, data_host_.data(), data_host_.size(), stream);
default: break;
}
if (scope_ != Scope::SEARCH) { index.emplace(handle, params_, data_ptr); }
if (scope_ != Scope::BUILD) {
index->search(handle, search_items_.data(), out_dists_.data(), out_idxs_.data());
}
}
if (scope_ != Scope::SEARCH) { index.reset(); }
switch (strategy_) {
case TransferStrategy::COPY_PINNED:
case TransferStrategy::MAP_PINNED:
RAFT_CUDA_TRY(cudaHostUnregister(data_host_.data()));
break;
default: break;
}
}
} catch (raft::exception& e) {
state.SkipWithError(e.what());
} catch (std::bad_alloc& e) {
state.SkipWithError(e.what());
}
}
private:
const params params_;
const TransferStrategy strategy_;
const Scope scope_;
device_resource dev_mem_res_;
bool data_does_not_fit_ = false;
std::vector<ValT> data_host_;
rmm::device_uvector<ValT> search_items_;
rmm::device_uvector<typename ImplT::dist_t> out_dists_;
rmm::device_uvector<IdxT> out_idxs_;
};
inline const std::vector<params> kInputs{
{2000000, 128, 1000, 32, 0}, {10000000, 128, 1000, 32, 0}, {10000, 8192, 1000, 32, 0}};
const std::vector<params> kInputsFilter =
raft::util::itertools::product<params>({size_t(10000000)}, // n_samples
{size_t(128)}, // n_dim
{size_t(1000)}, // n_queries
{size_t(255)}, // k
{0.0, 0.02, 0.04, 0.08, 0.16, 0.32, 0.64} // removed_ratio
);
inline const std::vector<TransferStrategy> kAllStrategies{
TransferStrategy::NO_COPY, TransferStrategy::MAP_PINNED, TransferStrategy::MANAGED};
inline const std::vector<TransferStrategy> kNoCopyOnly{TransferStrategy::NO_COPY};
inline const std::vector<Scope> kScopeFull{Scope::BUILD_SEARCH};
inline const std::vector<Scope> kAllScopes{Scope::BUILD_SEARCH, Scope::SEARCH, Scope::BUILD};
#define KNN_REGISTER(ValT, IdxT, ImplT, inputs, strats, scope) \
namespace BENCHMARK_PRIVATE_NAME(knn) { \
using KNN = knn<ValT, IdxT, ImplT<ValT, IdxT>>; \
RAFT_BENCH_REGISTER(KNN, #ValT "/" #IdxT "/" #ImplT, inputs, strats, scope); \
}
} // namespace cuvs::bench::spatial
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors/refine.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs_internal/neighbors/refine_helper.cuh>
#include <common/benchmark.hpp>
#include <cuvs/distance/distance_types.hpp>
#include <cuvs/neighbors/detail/refine.cuh>
#include <cuvs/neighbors/refine.cuh>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/random/rng.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <iostream>
#include <sstream>
using namespace cuvs::neighbors;
namespace cuvs::bench::neighbors {
template <typename IdxT>
inline auto operator<<(std::ostream& os, const RefineInputs<IdxT>& p) -> std::ostream&
{
os << p.n_rows << "#" << p.dim << "#" << p.n_queries << "#" << p.k0 << "#" << p.k << "#"
<< (p.host_data ? "host" : "device");
return os;
}
template <typename DataT, typename DistanceT, typename IdxT>
class RefineAnn : public fixture {
public:
RefineAnn(RefineInputs<IdxT> p) : data(handle_, p) {}
void run_benchmark(::benchmark::State& state) override
{
std::ostringstream label_stream;
label_stream << data.p;
state.SetLabel(label_stream.str());
auto old_mr = rmm::mr::get_current_device_resource();
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> pool_mr(old_mr);
rmm::mr::set_current_device_resource(&pool_mr);
if (data.p.host_data) {
loop_on_state(state, [this]() {
cuvs::neighbors::refine<IdxT, DataT, DistanceT, IdxT>(handle_,
data.dataset_host.view(),
data.queries_host.view(),
data.candidates_host.view(),
data.refined_indices_host.view(),
data.refined_distances_host.view(),
data.p.metric);
});
} else {
loop_on_state(state, [&]() {
cuvs::neighbors::refine<IdxT, DataT, DistanceT, IdxT>(handle_,
data.dataset.view(),
data.queries.view(),
data.candidates.view(),
data.refined_indices.view(),
data.refined_distances.view(),
data.p.metric);
});
}
rmm::mr::set_current_device_resource(old_mr);
}
private:
raft::device_resources handle_;
RefineHelper<DataT, DistanceT, IdxT> data;
};
template <typename T>
std::vector<RefineInputs<T>> getInputs()
{
std::vector<RefineInputs<T>> out;
cuvs::distance::DistanceType metric = cuvs::distance::DistanceType::L2Expanded;
for (bool host_data : {true, false}) {
for (T n_queries : {1000, 10000}) {
for (T dim : {128, 512}) {
out.push_back(RefineInputs<T>{n_queries, 2000000, dim, 32, 128, metric, host_data});
out.push_back(RefineInputs<T>{n_queries, 2000000, dim, 10, 40, metric, host_data});
}
}
}
return out;
}
} // namespace cuvs::bench::neighbors
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors/cagra_bench.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <common/benchmark.hpp>
#include <cuvs/neighbors/cagra.cuh>
#include <cuvs/neighbors/sample_filter.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/itertools.hpp>
#include <thrust/sequence.h>
#include <optional>
namespace cuvs::bench::neighbors {
struct params {
/** Size of the dataset. */
size_t n_samples;
/** Number of dimensions in the dataset. */
int n_dims;
/** The batch size -- number of KNN searches. */
int n_queries;
/** Number of nearest neighbours to find for every probe. */
int k;
/** kNN graph degree*/
int degree;
int itopk_size;
int block_size;
int search_width;
int max_iterations;
/** Ratio of removed indices. */
double removed_ratio;
};
template <typename T, typename IdxT>
struct CagraBench : public fixture {
explicit CagraBench(const params& ps)
: fixture(true),
params_(ps),
queries_(make_device_matrix<T, int64_t>(handle, ps.n_queries, ps.n_dims)),
dataset_(make_device_matrix<T, int64_t>(handle, ps.n_samples, ps.n_dims)),
knn_graph_(make_device_matrix<IdxT, int64_t>(handle, ps.n_samples, ps.degree)),
removed_indices_bitset_(handle, ps.n_samples)
{
// Generate random dataset and queriees
raft::random::RngState state{42};
constexpr T kRangeMax = std::is_integral_v<T> ? std::numeric_limits<T>::max() : T(1);
constexpr T kRangeMin = std::is_integral_v<T> ? std::numeric_limits<T>::min() : T(-1);
if constexpr (std::is_integral_v<T>) {
raft::random::uniformInt(
handle, state, dataset_.data_handle(), dataset_.size(), kRangeMin, kRangeMax);
raft::random::uniformInt(
handle, state, queries_.data_handle(), queries_.size(), kRangeMin, kRangeMax);
} else {
raft::random::uniform(
handle, state, dataset_.data_handle(), dataset_.size(), kRangeMin, kRangeMax);
raft::random::uniform(
handle, state, queries_.data_handle(), queries_.size(), kRangeMin, kRangeMax);
}
// Generate random knn graph
raft::random::uniformInt<IdxT>(
handle, state, knn_graph_.data_handle(), knn_graph_.size(), 0, ps.n_samples - 1);
auto metric = cuvs::distance::DistanceType::L2Expanded;
auto removed_indices =
raft::make_device_vector<IdxT, int64_t>(handle, ps.removed_ratio * ps.n_samples);
thrust::sequence(
resource::get_thrust_policy(handle),
thrust::device_pointer_cast(removed_indices.data_handle()),
thrust::device_pointer_cast(removed_indices.data_handle() + removed_indices.extent(0)));
removed_indices_bitset_.set(handle, removed_indices.view());
index_.emplace(cuvs::neighbors::cagra::index<T, IdxT>(
handle, metric, make_const_mdspan(dataset_.view()), make_const_mdspan(knn_graph_.view())));
}
void run_benchmark(::benchmark::State& state) override
{
cuvs::neighbors::cagra::search_params search_params;
search_params.max_queries = 1024;
search_params.itopk_size = params_.itopk_size;
search_params.team_size = 0;
search_params.thread_block_size = params_.block_size;
search_params.search_width = params_.search_width;
auto indices = make_device_matrix<IdxT, int64_t>(handle, params_.n_queries, params_.k);
auto distances = make_device_matrix<float, int64_t>(handle, params_.n_queries, params_.k);
auto ind_v = make_device_matrix_view<IdxT, int64_t, row_major>(
indices.data_handle(), params_.n_queries, params_.k);
auto dist_v = make_device_matrix_view<float, int64_t, row_major>(
distances.data_handle(), params_.n_queries, params_.k);
auto queries_v = make_const_mdspan(queries_.view());
if (params_.removed_ratio > 0) {
auto filter = cuvs::neighbors::filtering::bitset_filter(removed_indices_bitset_.view());
loop_on_state(state, [&]() {
cuvs::neighbors::cagra::search_with_filtering(
this->handle, search_params, *this->index_, queries_v, ind_v, dist_v, filter);
});
} else {
loop_on_state(state, [&]() {
cuvs::neighbors::cagra::search(
this->handle, search_params, *this->index_, queries_v, ind_v, dist_v);
});
}
double data_size = params_.n_samples * params_.n_dims * sizeof(T);
double graph_size = params_.n_samples * params_.degree * sizeof(IdxT);
int iterations = params_.max_iterations;
if (iterations == 0) {
// see search_plan_impl::adjust_search_params()
double r = params_.itopk_size / static_cast<float>(params_.search_width);
iterations = 1 + std::min(r * 1.1, r + 10);
}
state.counters["dataset (GiB)"] = data_size / (1 << 30);
state.counters["graph (GiB)"] = graph_size / (1 << 30);
state.counters["n_rows"] = params_.n_samples;
state.counters["n_cols"] = params_.n_dims;
state.counters["degree"] = params_.degree;
state.counters["n_queries"] = params_.n_queries;
state.counters["k"] = params_.k;
state.counters["itopk_size"] = params_.itopk_size;
state.counters["block_size"] = params_.block_size;
state.counters["search_width"] = params_.search_width;
state.counters["iterations"] = iterations;
state.counters["removed_ratio"] = params_.removed_ratio;
}
private:
const params params_;
std::optional<const cuvs::neighbors::cagra::index<T, IdxT>> index_;
raft::device_matrix<T, int64_t, row_major> queries_;
raft::device_matrix<T, int64_t, row_major> dataset_;
raft::device_matrix<IdxT, int64_t, row_major> knn_graph_;
raft::core::bitset<std::uint32_t, IdxT> removed_indices_bitset_;
};
inline const std::vector<params> generate_inputs()
{
std::vector<params> inputs =
raft::util::itertools::product<params>({2000000ull}, // n_samples
{128, 256, 512, 1024}, // dataset dim
{1000}, // n_queries
{32}, // k
{64}, // knn graph degree
{64}, // itopk_size
{0}, // block_size
{1}, // search_width
{0}, // max_iterations
{0.0} // removed_ratio
);
auto inputs2 = raft::util::itertools::product<params>({2000000ull, 10000000ull}, // n_samples
{128}, // dataset dim
{1000}, // n_queries
{32}, // k
{64}, // knn graph degree
{64}, // itopk_size
{64, 128, 256, 512, 1024}, // block_size
{1}, // search_width
{0}, // max_iterations
{0.0} // removed_ratio
);
inputs.insert(inputs.end(), inputs2.begin(), inputs2.end());
inputs2 = raft::util::itertools::product<params>(
{2000000ull, 10000000ull}, // n_samples
{128}, // dataset dim
{1, 10, 10000}, // n_queries
{255}, // k
{64}, // knn graph degree
{300}, // itopk_size
{256}, // block_size
{2}, // search_width
{0}, // max_iterations
{0.0, 0.02, 0.04, 0.08, 0.16, 0.32, 0.64} // removed_ratio
);
inputs.insert(inputs.end(), inputs2.begin(), inputs2.end());
return inputs;
}
const std::vector<params> kCagraInputs = generate_inputs();
#define CAGRA_REGISTER(ValT, IdxT, inputs) \
namespace BENCHMARK_PRIVATE_NAME(knn) { \
using AnnCagra = CagraBench<ValT, IdxT>; \
RAFT_BENCH_REGISTER(AnnCagra, #ValT "/" #IdxT, inputs); \
}
} // namespace cuvs::bench::neighbors
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors/refine_uint8_t_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "refine.cuh"
#include <common/benchmark.hpp>
using namespace cuvs::neighbors;
namespace cuvs::bench::neighbors {
using refine_uint8_int64 = RefineAnn<uint8_t, float, int64_t>;
RAFT_BENCH_REGISTER(refine_uint8_int64, "", getInputs<int64_t>());
} // namespace cuvs::bench::neighbors
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors/refine_float_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "refine.cuh"
#include <common/benchmark.hpp>
using namespace cuvs::neighbors;
namespace cuvs::bench::neighbors {
using refine_float_int64 = RefineAnn<float, float, int64_t>;
RAFT_BENCH_REGISTER(refine_float_int64, "", getInputs<int64_t>());
} // namespace cuvs::bench::neighbors
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors | rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors/knn/brute_force_float_uint32_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../knn.cuh"
namespace cuvs::bench::spatial {
KNN_REGISTER(float, uint32_t, brute_force_knn, kInputs, kAllStrategies, kScopeFull);
} // namespace cuvs::bench::spatial
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors | rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors/knn/ivf_pq_float_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../knn.cuh"
namespace cuvs::bench::spatial {
KNN_REGISTER(float, int64_t, ivf_pq_knn, kInputs, kNoCopyOnly, kAllScopes);
} // namespace cuvs::bench::spatial
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors | rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors/knn/ivf_pq_filter_float_int64_t.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#undef RAFT_EXPLICIT_INSTANTIATE_ONLY // Enable instantiation of search with filter
#include "../knn.cuh"
namespace cuvs::bench::spatial {
KNN_REGISTER(float, int64_t, ivf_pq_filter_knn, kInputsFilter, kNoCopyOnly, kScopeFull);
} // namespace cuvs::bench::spatial
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors | rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors/knn/ivf_pq_uint8_t_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../knn.cuh"
namespace cuvs::bench::spatial {
KNN_REGISTER(uint8_t, int64_t, ivf_pq_knn, kInputs, kNoCopyOnly, kAllScopes);
} // namespace cuvs::bench::spatial
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors | rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors/knn/brute_force_float_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../knn.cuh"
namespace cuvs::bench::spatial {
KNN_REGISTER(float, int64_t, brute_force_knn, kInputs, kAllStrategies, kScopeFull);
} // namespace cuvs::bench::spatial
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors | rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors/knn/cagra_float_uint32_t.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../cagra_bench.cuh"
namespace cuvs::bench::neighbors {
CAGRA_REGISTER(float, uint32_t, kCagraInputs);
} // namespace cuvs::bench::neighbors
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors | rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors/knn/ivf_flat_float_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../knn.cuh"
namespace cuvs::bench::spatial {
KNN_REGISTER(float, int64_t, ivf_flat_knn, kInputs, kNoCopyOnly, kAllScopes);
} // namespace cuvs::bench::spatial
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors | rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors/knn/ivf_flat_filter_float_int64_t.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#undef RAFT_EXPLICIT_INSTANTIATE_ONLY // Enable instantiation of search with filter
#include "../knn.cuh"
namespace cuvs::bench::spatial {
KNN_REGISTER(float, int64_t, ivf_flat_filter_knn, kInputsFilter, kNoCopyOnly, kScopeFull);
} // namespace cuvs::bench::spatial
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors | rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors/knn/ivf_flat_uint8_t_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../knn.cuh"
namespace cuvs::bench::spatial {
KNN_REGISTER(uint8_t, int64_t, ivf_flat_knn, kInputs, kNoCopyOnly, kAllScopes);
} // namespace cuvs::bench::spatial
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors | rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors/knn/ivf_pq_int8_t_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../knn.cuh"
namespace cuvs::bench::spatial {
KNN_REGISTER(int8_t, int64_t, ivf_pq_knn, kInputs, kNoCopyOnly, kAllScopes);
} // namespace cuvs::bench::spatial
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors | rapidsai_public_repos/cuvs/cpp/bench/micro/neighbors/knn/ivf_flat_int8_t_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../knn.cuh"
namespace cuvs::bench::spatial {
KNN_REGISTER(int8_t, int64_t, ivf_flat_knn, kInputs, kNoCopyOnly, kAllScopes);
} // namespace cuvs::bench::spatial
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/matrix/select_k.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuvs_internal/matrix/select_k.cuh>
#include <common/benchmark.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/nvtx.hpp>
#include <raft/random/rng.cuh>
#include <raft/sparse/detail/utils.h>
#include <raft/util/cudart_utils.hpp>
#include <raft/matrix/detail/select_radix.cuh>
#include <raft/matrix/detail/select_warpsort.cuh>
#include <raft/matrix/select_k.cuh>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <cstdint>
#include <cstring>
#include <type_traits>
namespace raft::matrix {
using namespace cuvs::bench; // NOLINT
template <typename KeyT>
struct replace_with_mask {
KeyT replacement;
int64_t line_length;
int64_t spared_inputs;
constexpr auto inline operator()(int64_t offset, KeyT x, uint8_t mask) -> KeyT
{
auto i = offset % line_length;
// don't replace all the inputs, spare a few elements at the beginning of the input
return (mask && i >= spared_inputs) ? replacement : x;
}
};
template <typename KeyT, typename IdxT, select::Algo Algo>
struct selection : public fixture {
explicit selection(const select::params& p)
: fixture(p.use_memory_pool),
params_(p),
in_dists_(p.batch_size * p.len, stream),
in_ids_(p.batch_size * p.len, stream),
out_dists_(p.batch_size * p.k, stream),
out_ids_(p.batch_size * p.k, stream)
{
raft::sparse::iota_fill(in_ids_.data(), IdxT(p.batch_size), IdxT(p.len), stream);
raft::random::RngState state{42};
KeyT min_value = -1.0;
KeyT max_value = 1.0;
if (p.use_same_leading_bits) {
if constexpr (std::is_same_v<KeyT, float>) {
uint32_t min_bits = 0x3F800000; // 1.0
uint32_t max_bits = 0x3F8000FF; // 1.00003
memcpy(&min_value, &min_bits, sizeof(KeyT));
memcpy(&max_value, &max_bits, sizeof(KeyT));
} else if constexpr (std::is_same_v<KeyT, double>) {
uint64_t min_bits = 0x3FF0000000000000; // 1.0
uint64_t max_bits = 0x3FF0000FFFFFFFFF; // 1.000015
memcpy(&min_value, &min_bits, sizeof(KeyT));
memcpy(&max_value, &max_bits, sizeof(KeyT));
}
}
raft::random::uniform(handle, state, in_dists_.data(), in_dists_.size(), min_value, max_value);
if (p.frac_infinities > 0.0) {
rmm::device_uvector<uint8_t> mask_buf(p.batch_size * p.len, stream);
auto mask = make_device_vector_view<uint8_t, size_t>(mask_buf.data(), mask_buf.size());
raft::random::bernoulli(handle, state, mask, p.frac_infinities);
KeyT bound = p.select_min ? raft::upper_bound<KeyT>() : raft::lower_bound<KeyT>();
auto mask_in =
make_device_vector_view<const uint8_t, size_t>(mask_buf.data(), mask_buf.size());
auto dists_in = make_device_vector_view<const KeyT>(in_dists_.data(), in_dists_.size());
auto dists_out = make_device_vector_view<KeyT>(in_dists_.data(), in_dists_.size());
raft::linalg::map_offset(handle,
dists_out,
replace_with_mask<KeyT>{bound, int64_t(p.len), int64_t(p.k / 2)},
dists_in,
mask_in);
}
}
void run_benchmark(::benchmark::State& state) override // NOLINT
{
try {
std::ostringstream label_stream;
label_stream << params_.batch_size << "#" << params_.len << "#" << params_.k;
if (params_.use_same_leading_bits) { label_stream << "#same-leading-bits"; }
if (params_.frac_infinities > 0) { label_stream << "#infs-" << params_.frac_infinities; }
state.SetLabel(label_stream.str());
common::nvtx::range case_scope("%s - %s", state.name().c_str(), label_stream.str().c_str());
int iter = 0;
loop_on_state(state, [&iter, this]() {
common::nvtx::range lap_scope("lap-", iter++);
select::select_k_impl<KeyT, IdxT>(handle,
Algo,
in_dists_.data(),
params_.use_index_input ? in_ids_.data() : NULL,
params_.batch_size,
params_.len,
params_.k,
out_dists_.data(),
out_ids_.data(),
params_.select_min);
});
} catch (raft::exception& e) {
state.SkipWithError(e.what());
}
}
private:
const select::params params_;
rmm::device_uvector<KeyT> in_dists_, out_dists_;
rmm::device_uvector<IdxT> in_ids_, out_ids_;
};
const std::vector<select::params> kInputs{
{20000, 500, 1, true},
{20000, 500, 2, true},
{20000, 500, 4, true},
{20000, 500, 8, true},
{20000, 500, 16, true},
{20000, 500, 32, true},
{20000, 500, 64, true},
{20000, 500, 128, true},
{20000, 500, 256, true},
{1000, 10000, 1, true},
{1000, 10000, 2, true},
{1000, 10000, 4, true},
{1000, 10000, 8, true},
{1000, 10000, 16, true},
{1000, 10000, 32, true},
{1000, 10000, 64, true},
{1000, 10000, 128, true},
{1000, 10000, 256, true},
{100, 100000, 1, true},
{100, 100000, 2, true},
{100, 100000, 4, true},
{100, 100000, 8, true},
{100, 100000, 16, true},
{100, 100000, 32, true},
{100, 100000, 64, true},
{100, 100000, 128, true},
{100, 100000, 256, true},
{10, 1000000, 1, true},
{10, 1000000, 2, true},
{10, 1000000, 4, true},
{10, 1000000, 8, true},
{10, 1000000, 16, true},
{10, 1000000, 32, true},
{10, 1000000, 64, true},
{10, 1000000, 128, true},
{10, 1000000, 256, true},
{10, 1000000, 1, true, false, true},
{10, 1000000, 2, true, false, true},
{10, 1000000, 4, true, false, true},
{10, 1000000, 8, true, false, true},
{10, 1000000, 16, true, false, true},
{10, 1000000, 32, true, false, true},
{10, 1000000, 64, true, false, true},
{10, 1000000, 128, true, false, true},
{10, 1000000, 256, true, false, true},
{10, 1000000, 1, true, false, false, true, 0.1},
{10, 1000000, 16, true, false, false, true, 0.1},
{10, 1000000, 64, true, false, false, true, 0.1},
{10, 1000000, 128, true, false, false, true, 0.1},
{10, 1000000, 256, true, false, false, true, 0.1},
{10, 1000000, 1, true, false, false, true, 0.9},
{10, 1000000, 16, true, false, false, true, 0.9},
{10, 1000000, 64, true, false, false, true, 0.9},
{10, 1000000, 128, true, false, false, true, 0.9},
{10, 1000000, 256, true, false, false, true, 0.9},
{1000, 10000, 1, true, false, false, true, 0.9},
{1000, 10000, 16, true, false, false, true, 0.9},
{1000, 10000, 64, true, false, false, true, 0.9},
{1000, 10000, 128, true, false, false, true, 0.9},
{1000, 10000, 256, true, false, false, true, 0.9},
{10, 1000000, 1, true, false, false, true, 1.0},
{10, 1000000, 16, true, false, false, true, 1.0},
{10, 1000000, 64, true, false, false, true, 1.0},
{10, 1000000, 128, true, false, false, true, 1.0},
{10, 1000000, 256, true, false, false, true, 1.0},
{1000, 10000, 1, true, false, false, true, 1.0},
{1000, 10000, 16, true, false, false, true, 1.0},
{1000, 10000, 64, true, false, false, true, 1.0},
{1000, 10000, 128, true, false, false, true, 1.0},
{1000, 10000, 256, true, false, false, true, 1.0},
{1000, 10000, 256, true, false, false, true, 0.999},
};
#define SELECTION_REGISTER(KeyT, IdxT, A) \
namespace BENCHMARK_PRIVATE_NAME(selection) { \
using SelectK = selection<KeyT, IdxT, select::Algo::A>; \
RAFT_BENCH_REGISTER(SelectK, #KeyT "/" #IdxT "/" #A, kInputs); \
}
SELECTION_REGISTER(float, uint32_t, kPublicApi); // NOLINT
SELECTION_REGISTER(float, uint32_t, kRadix8bits); // NOLINT
SELECTION_REGISTER(float, uint32_t, kRadix11bits); // NOLINT
SELECTION_REGISTER(float, uint32_t, kRadix11bitsExtraPass); // NOLINT
SELECTION_REGISTER(float, uint32_t, kWarpAuto); // NOLINT
SELECTION_REGISTER(float, uint32_t, kWarpImmediate); // NOLINT
SELECTION_REGISTER(float, uint32_t, kWarpFiltered); // NOLINT
SELECTION_REGISTER(float, uint32_t, kWarpDistributed); // NOLINT
SELECTION_REGISTER(float, uint32_t, kWarpDistributedShm); // NOLINT
SELECTION_REGISTER(double, uint32_t, kRadix8bits); // NOLINT
SELECTION_REGISTER(double, uint32_t, kRadix11bits); // NOLINT
SELECTION_REGISTER(double, uint32_t, kRadix11bitsExtraPass); // NOLINT
SELECTION_REGISTER(double, uint32_t, kWarpAuto); // NOLINT
SELECTION_REGISTER(double, int64_t, kRadix8bits); // NOLINT
SELECTION_REGISTER(double, int64_t, kRadix11bits); // NOLINT
SELECTION_REGISTER(double, int64_t, kRadix11bitsExtraPass); // NOLINT
SELECTION_REGISTER(double, int64_t, kWarpImmediate); // NOLINT
SELECTION_REGISTER(double, int64_t, kWarpFiltered); // NOLINT
SELECTION_REGISTER(double, int64_t, kWarpDistributed); // NOLINT
SELECTION_REGISTER(double, int64_t, kWarpDistributedShm); // NOLINT
// For learning a heuristic of which selection algorithm to use, we
// have a couple of additional constraints when generating the dataset:
// 1. We want these benchmarks to be optionally enabled from the commandline -
// there are thousands of them, and the run-time is non-trivial. This should be opt-in only
// 2. We test out larger k values - that won't work for all algorithms. This requires filtering
// the input parameters per algorithm.
// This makes the code to generate this dataset different from the code above to
// register other benchmarks
#define SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, A, input) \
{ \
using SelectK = selection<KeyT, IdxT, select::Algo::A>; \
std::stringstream name; \
name << "SelectKDataset/" << #KeyT "/" #IdxT "/" #A << "/" << input.batch_size << "/" \
<< input.len << "/" << input.k << "/" << input.use_index_input << "/" \
<< input.use_memory_pool; \
auto* b = ::benchmark::internal::RegisterBenchmarkInternal( \
new cuvs::bench::internal::Fixture<SelectK, select::params>(name.str(), input)); \
b->UseManualTime(); \
b->Unit(benchmark::kMillisecond); \
}
const static size_t MAX_MEMORY = 16 * 1024 * 1024 * 1024ULL;
// registers the input for all algorithms
#define SELECTION_REGISTER_INPUT(KeyT, IdxT, input) \
{ \
size_t mem = input.batch_size * input.len * (sizeof(KeyT) + sizeof(IdxT)); \
if (mem < MAX_MEMORY) { \
SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, kRadix8bits, input) \
SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, kRadix11bits, input) \
SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, kRadix11bitsExtraPass, input) \
if (input.k <= raft::matrix::detail::select::warpsort::kMaxCapacity) { \
SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, kWarpImmediate, input) \
SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, kWarpFiltered, input) \
SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, kWarpDistributed, input) \
SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, kWarpDistributedShm, input) \
} \
if (input.k <= cuvs::neighbors::detail::kFaissMaxK<IdxT, KeyT>()) { \
SELECTION_REGISTER_ALGO_INPUT(KeyT, IdxT, kFaissBlockSelect, input) \
} \
} \
}
void add_select_k_dataset_benchmarks()
{
// define a uniform grid
std::vector<select::params> inputs;
size_t grid_increment = 1;
std::vector<int> k_vals;
for (size_t k = 0; k < 13; k += grid_increment) {
k_vals.push_back(1 << k);
}
// Add in values just past the limit for warp/faiss select
k_vals.push_back(257);
k_vals.push_back(2049);
const static bool select_min = true;
const static bool use_ids = false;
for (size_t row = 0; row < 13; row += grid_increment) {
for (size_t col = 10; col < 28; col += grid_increment) {
for (auto k : k_vals) {
inputs.push_back(
select::params{size_t(1 << row), size_t(1 << col), k, select_min, use_ids});
}
}
}
// also add in some random values
std::default_random_engine rng(42);
std::uniform_real_distribution<> row_dist(0, 13);
std::uniform_real_distribution<> col_dist(10, 28);
std::uniform_real_distribution<> k_dist(0, 13);
for (size_t i = 0; i < 1024; ++i) {
auto row = static_cast<size_t>(pow(2, row_dist(rng)));
auto col = static_cast<size_t>(pow(2, col_dist(rng)));
auto k = static_cast<int>(pow(2, k_dist(rng)));
inputs.push_back(select::params{row, col, k, select_min, use_ids});
}
for (auto& input : inputs) {
SELECTION_REGISTER_INPUT(double, int64_t, input);
SELECTION_REGISTER_INPUT(double, uint32_t, input);
SELECTION_REGISTER_INPUT(float, int64_t, input);
SELECTION_REGISTER_INPUT(float, uint32_t, input);
}
// also try again without a memory pool to see if there are significant differences
for (auto input : inputs) {
input.use_memory_pool = false;
SELECTION_REGISTER_INPUT(double, int64_t, input);
SELECTION_REGISTER_INPUT(double, uint32_t, input);
SELECTION_REGISTER_INPUT(float, int64_t, input);
SELECTION_REGISTER_INPUT(float, uint32_t, input);
}
}
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/matrix/main.cpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmark/benchmark.h>
#include <cstring>
namespace raft::matrix {
void add_select_k_dataset_benchmarks();
}
int main(int argc, char** argv)
{
// if we're passed a 'select_k_dataset' flag, add in extra benchmarks
for (int i = 1; i < argc; ++i) {
if (strcmp(argv[i], "--select_k_dataset") == 0) {
raft::matrix::add_select_k_dataset_benchmarks();
// pop off the cmdline argument from argc/argv
for (int j = i; j < argc - 1; ++j)
argv[j] = argv[j + 1];
argc--;
break;
}
}
benchmark::Initialize(&argc, argv);
if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1;
benchmark::RunSpecifiedBenchmarks();
}
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/common/benchmark.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <memory>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/detail/macros.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/interruptible.hpp>
#include <raft/random/make_blobs.cuh>
#include <raft/util/cudart_utils.hpp>
#include <benchmark/benchmark.h>
#include <rmm/cuda_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
namespace cuvs::bench {
/**
* RAII way to temporary set the pooling memory allocator in rmm.
* This may be useful for benchmarking functions that do some memory allocations.
*/
struct using_pool_memory_res {
private:
rmm::mr::device_memory_resource* orig_res_;
rmm::mr::cuda_memory_resource cuda_res_;
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> pool_res_;
public:
using_pool_memory_res(size_t initial_size, size_t max_size)
: orig_res_(rmm::mr::get_current_device_resource()),
pool_res_(&cuda_res_, initial_size, max_size)
{
rmm::mr::set_current_device_resource(&pool_res_);
}
using_pool_memory_res() : orig_res_(rmm::mr::get_current_device_resource()), pool_res_(&cuda_res_)
{
rmm::mr::set_current_device_resource(&pool_res_);
}
~using_pool_memory_res() { rmm::mr::set_current_device_resource(orig_res_); }
};
/**
* RAII way of timing cuda calls. This has been shamelessly copied from the
* cudf codebase via cuml codebase. So, credits for this class goes to cudf developers.
*/
struct cuda_event_timer {
private:
::benchmark::State* state_;
rmm::cuda_stream_view stream_;
cudaEvent_t start_;
cudaEvent_t stop_;
public:
/**
* @param state the benchmark::State whose timer we are going to update.
* @param stream CUDA stream we are measuring time on.
*/
cuda_event_timer(::benchmark::State& state, rmm::cuda_stream_view stream)
: state_(&state), stream_(stream)
{
RAFT_CUDA_TRY(cudaEventCreate(&start_));
RAFT_CUDA_TRY(cudaEventCreate(&stop_));
raft::interruptible::synchronize(stream_);
RAFT_CUDA_TRY(cudaEventRecord(start_, stream_));
}
cuda_event_timer() = delete;
/**
* @brief The dtor stops the timer and performs a synchroniazation. Time of
* the benchmark::State object provided to the ctor will be set to the
* value given by `cudaEventElapsedTime()`.
*/
~cuda_event_timer()
{
RAFT_CUDA_TRY_NO_THROW(cudaEventRecord(stop_, stream_));
raft::interruptible::synchronize(stop_);
float milliseconds = 0.0f;
RAFT_CUDA_TRY_NO_THROW(cudaEventElapsedTime(&milliseconds, start_, stop_));
state_->SetIterationTime(milliseconds / 1000.f);
RAFT_CUDA_TRY_NO_THROW(cudaEventDestroy(start_));
RAFT_CUDA_TRY_NO_THROW(cudaEventDestroy(stop_));
}
};
/** Main fixture to be inherited and used by all other c++ benchmarks */
class fixture {
private:
rmm::device_buffer scratch_buf_;
public:
raft::device_resources handle;
rmm::cuda_stream_view stream;
fixture(bool use_pool_memory_resource = false) : stream{resource::get_cuda_stream(handle)}
{
// Cache memory pool between test runs, since it is expensive to create.
// This speeds up the time required to run the select_k bench by over 3x.
// This is part of the fixture class here so that the pool will get cleaned
// up, rather than outliving the benchmarks that require it.
static std::unique_ptr<using_pool_memory_res> memory_pool;
if (use_pool_memory_resource) {
if (!memory_pool) { memory_pool.reset(new using_pool_memory_res()); }
} else if (memory_pool) {
memory_pool.reset();
}
int l2_cache_size = 0;
int device_id = 0;
RAFT_CUDA_TRY(cudaGetDevice(&device_id));
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&l2_cache_size, cudaDevAttrL2CacheSize, device_id));
scratch_buf_ = rmm::device_buffer(l2_cache_size * 3, stream);
}
// every benchmark should be overriding this
virtual void run_benchmark(::benchmark::State& state) = 0;
virtual void generate_metrics(::benchmark::State& state) {}
virtual void allocate_data(const ::benchmark::State& state) {}
virtual void deallocate_data(const ::benchmark::State& state) {}
virtual void allocate_temp_buffers(const ::benchmark::State& state) {}
virtual void deallocate_temp_buffers(const ::benchmark::State& state) {}
protected:
/** The helper that writes zeroes to some buffer in GPU memory to flush the L2 cache. */
void flush_L2_cache()
{
RAFT_CUDA_TRY(cudaMemsetAsync(scratch_buf_.data(), 0, scratch_buf_.size(), stream));
}
/**
* The helper to be used inside `run_benchmark`, to loop over the state and record time using the
* cuda_event_timer.
*/
template <typename Lambda>
void loop_on_state(::benchmark::State& state, Lambda benchmark_func, bool flush_L2 = true)
{
for (auto _ : state) {
if (flush_L2) { flush_L2_cache(); }
cuda_event_timer timer(state, stream);
benchmark_func();
}
}
};
/** Indicates the dataset size. */
struct DatasetParams {
size_t rows;
size_t cols;
bool row_major;
};
/** Holds params needed to generate blobs dataset */
struct BlobsParams {
int n_clusters;
double cluster_std;
bool shuffle;
double center_box_min, center_box_max;
uint64_t seed;
};
/** Fixture for cluster benchmarks using make_blobs */
template <typename T, typename IndexT = int>
class BlobsFixture : public fixture {
public:
BlobsFixture(const DatasetParams dp, const BlobsParams bp)
: data_params(dp), blobs_params(bp), X(this->handle)
{
}
virtual void run_benchmark(::benchmark::State& state) = 0;
void allocate_data(const ::benchmark::State& state) override
{
auto labels_ref = raft::make_device_vector<IndexT, IndexT>(this->handle, data_params.rows);
X = raft::make_device_matrix<T, IndexT>(this->handle, data_params.rows, data_params.cols);
raft::random::make_blobs<T, IndexT>(X.data_handle(),
labels_ref.data_handle(),
(IndexT)data_params.rows,
(IndexT)data_params.cols,
(IndexT)blobs_params.n_clusters,
stream,
data_params.row_major,
nullptr,
nullptr,
(T)blobs_params.cluster_std,
blobs_params.shuffle,
(T)blobs_params.center_box_min,
(T)blobs_params.center_box_max,
blobs_params.seed);
resource::sync_stream(this->handle, stream);
}
protected:
DatasetParams data_params;
BlobsParams blobs_params;
raft::device_matrix<T, IndexT> X;
};
namespace internal {
template <typename Class, typename... Params>
class Fixture : public ::benchmark::Fixture {
using State = ::benchmark::State;
public:
explicit Fixture(const std::string name, const Params&... params)
: ::benchmark::Fixture(), params_(params...), name_(name)
{
SetName(name_.c_str());
}
Fixture() = delete;
void SetUp(const State& state) override
{
fixture_ =
std::apply([](const Params&... ps) { return std::make_unique<Class>(ps...); }, params_);
fixture_->allocate_data(state);
fixture_->allocate_temp_buffers(state);
}
void TearDown(const State& state) override
{
fixture_->deallocate_temp_buffers(state);
fixture_->deallocate_data(state);
fixture_.reset();
}
void SetUp(State& st) override { SetUp(const_cast<const State&>(st)); }
void TearDown(State& st) override { TearDown(const_cast<const State&>(st)); }
private:
std::unique_ptr<Class> fixture_;
std::tuple<Params...> params_;
const std::string name_;
protected:
void BenchmarkCase(State& state) override
{
fixture_->run_benchmark(state);
fixture_->generate_metrics(state);
}
}; // class Fixture
/**
* A helper struct to create a fixture for every combination of input vectors.
* Use with care, this can blow up quickly!
*/
template <typename Class, typename... Params>
struct cartesian_registrar {
template <typename... Fixed>
static void run(const std::string case_name,
const std::vector<Params>&... params,
const Fixed&... fixed);
};
template <typename Class>
struct cartesian_registrar<Class> {
template <typename... Fixed>
static void run(const std::string case_name, const Fixed&... fixed)
{
auto* b = ::benchmark::internal::RegisterBenchmarkInternal(
new Fixture<Class, Fixed...>(case_name, fixed...));
b->UseManualTime();
b->Unit(benchmark::kMillisecond);
}
};
template <typename Class, typename Param, typename... Params>
struct cartesian_registrar<Class, Param, Params...> {
template <typename... Fixed>
static void run(const std::string case_name,
const std::vector<Param>& param,
const std::vector<Params>&... params,
const Fixed&... fixed)
{
int param_len = param.size();
for (int i = 0; i < param_len; i++) {
cartesian_registrar<Class, Params...>::run(
case_name + "/" + std::to_string(i), params..., fixed..., param[i]);
}
}
};
template <typename Class>
struct registrar {
/**
* Register a fixture `Class` named `testClass` for every combination of input `params`.
*
* @param test_class
* A string representation of the `Class` name.
* @param test_name
* Optional test name. Leave empty, if you don't need it.
* @param params
* Zero or more vectors of parameters.
* The generated test cases are a cartesian product of these vectors.
* Use with care, this can blow up quickly!
*/
template <typename... Params>
registrar(const std::string& test_class,
const std::string& test_name,
const std::vector<Params>&... params)
{
std::stringstream name_stream;
name_stream << test_class;
if (!test_name.empty()) { name_stream << "/" << test_name; }
cartesian_registrar<Class, Params...>::run(name_stream.str(), params...);
}
};
}; // namespace internal
#define RAFT_BENCH_REGISTER_INTERNAL(TestClass, ...) \
static cuvs::bench::internal::registrar<TestClass> BENCHMARK_PRIVATE_NAME(registrar)( \
RAFT_STRINGIFY(TestClass), __VA_ARGS__)
/**
* This is the entry point macro for all benchmarks. This needs to be called
* for the set of benchmarks to be registered so that the main harness inside
* google bench can find these benchmarks and run them.
*
* @param TestClass child class of `cuvs::bench::Fixture` which contains
* the logic to generate the dataset and run training on it
* for a given algo. Ideally, once such struct is needed for
* every algo to be benchmarked
* @param test_name a unique string to identify these tests at the end of run
* This is optional and if choose not to use this, pass an
* empty string
* @param params... zero or more lists of params upon which to benchmark.
*/
#define RAFT_BENCH_REGISTER(TestClass, ...) \
RAFT_BENCH_REGISTER_INTERNAL(RAFT_DEPAREN(TestClass), __VA_ARGS__)
} // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/cluster/kmeans.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <cuvs/cluster/kmeans.cuh>
#include <cuvs/cluster/kmeans_types.hpp>
namespace cuvs::bench::cluster {
struct KMeansBenchParams {
DatasetParams data;
BlobsParams blobs;
cuvs::cluster::KMeansParams kmeans;
};
inline auto operator<<(std::ostream& os, const KMeansBenchParams& p) -> std::ostream&
{
os << p.data.rows << "#" << p.data.cols << "#" << p.kmeans.n_clusters;
return os;
}
template <typename T, typename IndexT = int>
struct KMeans : public BlobsFixture<T, IndexT> {
KMeans(const KMeansBenchParams& p)
: BlobsFixture<T, IndexT>(p.data, p.blobs),
params(p),
centroids(this->handle),
labels(this->handle)
{
}
void run_benchmark(::benchmark::State& state) override
{
std::ostringstream label_stream;
label_stream << params;
state.SetLabel(label_stream.str());
raft::device_matrix_view<const T, IndexT> X_view = this->X.view();
std::optional<raft::device_vector_view<const T, IndexT>> opt_weights_view = std::nullopt;
std::optional<raft::device_matrix_view<T, IndexT>> centroids_view =
std::make_optional<raft::device_matrix_view<T, IndexT>>(centroids.view());
raft::device_vector_view<IndexT, IndexT> labels_view = labels.view();
raft::host_scalar_view<T> inertia_view = raft::make_host_scalar_view<T>(&inertia);
raft::host_scalar_view<IndexT> n_iter_view = raft::make_host_scalar_view<IndexT>(&n_iter);
this->loop_on_state(state, [&]() {
cuvs::cluster::kmeans_fit_predict<T, IndexT>(this->handle,
params.kmeans,
X_view,
opt_weights_view,
centroids_view,
labels_view,
inertia_view,
n_iter_view);
});
}
void allocate_temp_buffers(const ::benchmark::State& state) override
{
centroids =
raft::make_device_matrix<T, IndexT>(this->handle, params.kmeans.n_clusters, params.data.cols);
labels = raft::make_device_vector<IndexT, IndexT>(this->handle, params.data.rows);
}
private:
KMeansBenchParams params;
raft::device_matrix<T, IndexT> centroids;
raft::device_vector<IndexT, IndexT> labels;
T inertia;
IndexT n_iter;
}; // struct KMeans
std::vector<KMeansBenchParams> getKMeansInputs()
{
std::vector<KMeansBenchParams> out;
KMeansBenchParams p;
p.data.row_major = true;
p.blobs.cluster_std = 1.0;
p.blobs.shuffle = false;
p.blobs.center_box_min = -10.0;
p.blobs.center_box_max = 10.0;
p.blobs.seed = 12345ULL;
p.kmeans.init = cuvs::cluster::KMeansParams::KMeansPlusPlus;
p.kmeans.max_iter = 300;
p.kmeans.tol = 1e-4;
p.kmeans.verbosity = RAFT_LEVEL_INFO;
p.kmeans.metric = cuvs::distance::DistanceType::L2Expanded;
p.kmeans.inertia_check = true;
std::vector<std::tuple<int, int, int>> row_cols_k = {
{1000000, 20, 1000},
{3000000, 50, 20},
{10000000, 50, 5},
};
for (auto& rck : row_cols_k) {
p.data.rows = std::get<0>(rck);
p.data.cols = std::get<1>(rck);
p.blobs.n_clusters = std::get<2>(rck);
p.kmeans.n_clusters = std::get<2>(rck);
out.push_back(p);
}
return out;
}
// note(lsugy): commenting out int64_t because the templates are not compiled in the distance
// library, resulting in long compilation times.
RAFT_BENCH_REGISTER((KMeans<float, int>), "", getKMeansInputs());
RAFT_BENCH_REGISTER((KMeans<double, int>), "", getKMeansInputs());
// RAFT_BENCH_REGISTER((KMeans<float, int64_t>), "", getKMeansInputs());
// RAFT_BENCH_REGISTER((KMeans<double, int64_t>), "", getKMeansInputs());
} // namespace cuvs::bench::cluster
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/micro | rapidsai_public_repos/cuvs/cpp/bench/micro/cluster/kmeans_balanced.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <cuvs/cluster/kmeans_balanced.cuh>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/random/rng.cuh>
namespace cuvs::bench::cluster {
struct KMeansBalancedBenchParams {
DatasetParams data;
uint32_t n_lists;
cuvs::cluster::kmeans_balanced_params kb_params;
};
template <typename T, typename IndexT = int>
struct KMeansBalanced : public fixture {
KMeansBalanced(const KMeansBalancedBenchParams& p) : params(p), X(handle), centroids(handle) {}
void run_benchmark(::benchmark::State& state) override
{
this->loop_on_state(state, [this]() {
raft::device_matrix_view<const T, IndexT> X_view = this->X.view();
raft::device_matrix_view<T, IndexT> centroids_view = this->centroids.view();
cuvs::cluster::kmeans_balanced::fit(
this->handle, this->params.kb_params, X_view, centroids_view);
});
}
void allocate_data(const ::benchmark::State& state) override
{
X = raft::make_device_matrix<T, IndexT>(handle, params.data.rows, params.data.cols);
raft::random::RngState rng{1234};
constexpr T kRangeMax = std::is_integral_v<T> ? std::numeric_limits<T>::max() : T(1);
constexpr T kRangeMin = std::is_integral_v<T> ? std::numeric_limits<T>::min() : T(-1);
if constexpr (std::is_integral_v<T>) {
raft::random::uniformInt(
handle, rng, X.data_handle(), params.data.rows * params.data.cols, kRangeMin, kRangeMax);
} else {
raft::random::uniform(
handle, rng, X.data_handle(), params.data.rows * params.data.cols, kRangeMin, kRangeMax);
}
resource::sync_stream(handle, stream);
}
void allocate_temp_buffers(const ::benchmark::State& state) override
{
centroids =
raft::make_device_matrix<float, IndexT>(this->handle, params.n_lists, params.data.cols);
}
private:
KMeansBalancedBenchParams params;
raft::device_matrix<T, IndexT> X;
raft::device_matrix<float, IndexT> centroids;
}; // struct KMeansBalanced
std::vector<KMeansBalancedBenchParams> getKMeansBalancedInputs()
{
std::vector<KMeansBalancedBenchParams> out;
KMeansBalancedBenchParams p;
p.data.row_major = true;
p.kb_params.n_iters = 20;
p.kb_params.metric = cuvs::distance::DistanceType::L2Expanded;
std::vector<std::pair<int, int>> row_cols = {
{100000, 128}, {1000000, 128}, {10000000, 128},
// The following dataset sizes are too large for most GPUs.
// {100000000, 128},
};
for (auto& rc : row_cols) {
p.data.rows = rc.first;
p.data.cols = rc.second;
for (auto n_lists : std::vector<int>({1000, 10000, 100000})) {
p.n_lists = n_lists;
out.push_back(p);
}
}
return out;
}
// Note: the datasets sizes are too large for 32-bit index types.
RAFT_BENCH_REGISTER((KMeansBalanced<float, int64_t>), "", getKMeansBalancedInputs());
} // namespace cuvs::bench::cluster
| 0 |
rapidsai_public_repos/cuvs/cpp | rapidsai_public_repos/cuvs/cpp/scripts/analyze_nvcc_log.py | #!/usr/bin/env python3
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
from matplotlib import colors
def main(input_path):
input_path = Path(input_path)
print("-- loading data")
df = pd.read_csv(input_path)
print("-- analyzing data")
# Strip spaces from column names
df = df.rename(columns=str.strip)
df["seconds"] = df["metric"] / 1000
df["file"] = df["source file name"]
df["phase"] = df["phase name"].str.strip()
dfp = (df
# Remove nvcc driver entries. They don't contain a source file name
.query("phase!='nvcc (driver)'")
# Make a pivot table containing files as row, phase (preprocessing,
# cicc, etc.) as column and the total times as table entries. NOTE:
# if compiled for multiple archs, the archs will be summed.
.pivot_table(index="file", values="seconds", columns="phase", aggfunc='sum'))
dfp_sum = dfp.sum(axis="columns")
df_fraction = dfp.divide(dfp_sum, axis="index")
df_fraction["total time"] = dfp_sum
df_fraction = df_fraction.melt(ignore_index=False, id_vars="total time", var_name="phase", value_name="fraction")
dfp["total time"] = dfp_sum
df_absolute = dfp.melt(ignore_index=False, id_vars="total time", var_name="phase", value_name="seconds")
# host: light red to dark red (preprocessing, cudafe, gcc (compiling))
# device: ligt green to dark green (preprocessing, cicc, ptxas)
palette = {
"gcc (preprocessing 4)": colors.hsv_to_rgb((0, 1, 1)),
'cudafe++': colors.hsv_to_rgb((0, 1, .75)),
'gcc (compiling)': colors.hsv_to_rgb((0, 1, .4)),
"gcc (preprocessing 1)": colors.hsv_to_rgb((.33, 1, 1)),
'cicc': colors.hsv_to_rgb((.33, 1, 0.75)),
'ptxas': colors.hsv_to_rgb((.33, 1, 0.4)),
'fatbinary': "grey",
}
print("-- Ten longest translation units:")
colwidth = pd.get_option('display.max_colwidth') - 1
dfp = dfp.reset_index()
dfp["file"] = dfp["file"].apply(lambda s: s[-colwidth:])
print(dfp.sort_values("total time", ascending=False).reset_index().loc[:10])
print("-- Plotting absolute compile times")
abs_out_path = f"{input_path}.absolute.compile_times.png"
sns.displot(
df_absolute.sort_values("total time").reset_index(),
y="file",
hue="phase",
hue_order=reversed(
["gcc (preprocessing 4)", 'cudafe++', 'gcc (compiling)',
"gcc (preprocessing 1)", 'cicc', 'ptxas',
'fatbinary',
]),
palette=palette,
weights="seconds",
multiple="stack",
kind="hist",
height=20,
)
plt.xlabel("seconds");
plt.savefig(abs_out_path)
print(f"-- Wrote absolute compile time plot to {abs_out_path}")
print("-- Plotting relative compile times")
rel_out_path = f"{input_path}.relative.compile_times.png"
sns.displot(
df_fraction.sort_values('total time').reset_index(),
y="file",
hue="phase",
hue_order=reversed(["gcc (preprocessing 4)", 'cudafe++', 'gcc (compiling)',
"gcc (preprocessing 1)", 'cicc', 'ptxas',
'fatbinary',
]),
palette=palette,
weights="fraction",
multiple="stack",
kind="hist",
height=15,
)
plt.xlabel("fraction");
plt.savefig(rel_out_path)
print(f"-- Wrote relative compile time plot to {rel_out_path}")
if __name__ == "__main__":
if len(sys.argv) != 2:
printf("""NVCC log analyzer
Analyzes nvcc logs and outputs a figure with highest ranking translation
units.
Usage:
python analyze_nvcc_log.py <nvcc_log_file.csv>
cpp/scripts/analyze_nvcc_log.py <nvcc_log_file.csv>
Generate the nvcc log file by adding:
list(APPEND CUVS_CUDA_FLAGS "--time=CMakeFiles/nvcc_compile_log.csv")
to cpp/cmake/modules/ConfigureCUDA.cmake.
""")
input_path = Path(sys.argv[1])
if not input_path.exists():
print(f"Path {input_path} does not exist.")
else:
main(input_path)
| 0 |
rapidsai_public_repos/cuvs/cpp | rapidsai_public_repos/cuvs/cpp/scripts/__clang_cuda_additional_intrinsics.h | // Copyright (c) 2022-2023, NVIDIA CORPORATION.
#ifndef __CLANG_CUDA_ADDITIONAL_INTRINSICS_H__
#define __CLANG_CUDA_ADDITIONAL_INTRINSICS_H__
#ifndef __CUDA__
#error "This file is for CUDA compilation only."
#endif
// for some of these macros, see cuda_fp16.hpp
#if defined(__cplusplus) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320))
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
#define __LDG_PTR "l"
#define __LBITS "64"
#else
#define __LDG_PTR "r"
#define __LBITS "32"
#endif // (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
#define __NOARG
#define __MAKE_LD(cop, c_typ, int_typ, ptx_typ, inl_typ, mem) \
__device__ __forceinline__ c_typ __ld##cop(const c_typ* addr) \
{ \
int_typ out; \
asm("ld." #cop "." ptx_typ " %0, [%1];" : "=" inl_typ(out) : __LDG_PTR(addr) mem); \
return (c_typ)out; \
}
#define __MAKE_LD2(cop, c_typ, int_typ, ptx_typ, inl_typ, mem) \
__device__ __forceinline__ c_typ __ld##cop(const c_typ* addr) \
{ \
int_typ out1, out2; \
asm("ld." #cop ".v2." ptx_typ " {%0, %1}, [%2];" \
: "=" inl_typ(out1), "=" inl_typ(out2) \
: __LDG_PTR(addr) mem); \
c_typ out; \
out.x = out1; \
out.y = out2; \
return out; \
}
#define __MAKE_LD4(cop, c_typ, int_typ, ptx_typ, inl_typ, mem) \
__device__ __forceinline__ c_typ __ld##cop(const c_typ* addr) \
{ \
int_typ out1, out2, out3, out4; \
asm("ld." #cop ".v4." ptx_typ " {%0, %1, %2, %3}, [%4];" \
: "=" inl_typ(out1), "=" inl_typ(out2), "=" inl_typ(out3), "=" inl_typ(out4) \
: __LDG_PTR(addr) mem); \
c_typ out; \
out.x = out1; \
out.y = out2; \
out.z = out3; \
out.w = out4; \
return out; \
}
__MAKE_LD(cg, char, short, "s8", "h", __NOARG)
__MAKE_LD(cg, signed char, short, "s8", "h", __NOARG)
__MAKE_LD(cg, unsigned char, short, "u8", "h", __NOARG)
__MAKE_LD(cg, short, short, "s16", "h", __NOARG)
__MAKE_LD(cg, unsigned short, unsigned short, "u16", "h", __NOARG)
__MAKE_LD(cg, int, int, "s32", "r", __NOARG)
__MAKE_LD(cg, unsigned int, unsigned int, "u32", "r", __NOARG)
__MAKE_LD(cg, long, long, "s" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(cg, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(cg, long long, long long, "s64", "l", __NOARG)
__MAKE_LD(cg, unsigned long long, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD(cg, float, float, "f32", "f", __NOARG)
__MAKE_LD(cg, double, double, "f64", "d", __NOARG)
__MAKE_LD2(cg, char2, short, "s8", "h", __NOARG)
__MAKE_LD2(cg, uchar2, short, "u8", "h", __NOARG)
__MAKE_LD2(cg, short2, short, "s16", "h", __NOARG)
__MAKE_LD2(cg, ushort2, unsigned short, "u16", "h", __NOARG)
__MAKE_LD2(cg, int2, int, "s32", "r", __NOARG)
__MAKE_LD2(cg, uint2, unsigned int, "u32", "r", __NOARG)
__MAKE_LD2(cg, longlong2, long long, "s64", "l", __NOARG)
__MAKE_LD2(cg, ulonglong2, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD2(cg, float2, float, "f32", "f", __NOARG)
__MAKE_LD2(cg, double2, double, "f64", "d", __NOARG)
__MAKE_LD4(cg, char4, short, "s8", "h", __NOARG)
__MAKE_LD4(cg, uchar4, short, "u8", "h", __NOARG)
__MAKE_LD4(cg, short4, short, "s16", "h", __NOARG)
__MAKE_LD4(cg, ushort4, unsigned short, "u16", "h", __NOARG)
__MAKE_LD4(cg, int4, int, "s32", "r", __NOARG)
__MAKE_LD4(cg, uint4, unsigned int, "u32", "r", __NOARG)
__MAKE_LD4(cg, float4, float, "f32", "f", __NOARG)
__MAKE_LD(ca, char, short, "s8", "h", __NOARG)
__MAKE_LD(ca, signed char, short, "s8", "h", __NOARG)
__MAKE_LD(ca, unsigned char, short, "u8", "h", __NOARG)
__MAKE_LD(ca, short, short, "s16", "h", __NOARG)
__MAKE_LD(ca, unsigned short, unsigned short, "u16", "h", __NOARG)
__MAKE_LD(ca, int, int, "s32", "r", __NOARG)
__MAKE_LD(ca, unsigned int, unsigned int, "u32", "r", __NOARG)
__MAKE_LD(ca, long, long, "s" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(ca, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(ca, long long, long long, "s64", "l", __NOARG)
__MAKE_LD(ca, unsigned long long, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD(ca, float, float, "f32", "f", __NOARG)
__MAKE_LD(ca, double, double, "f64", "d", __NOARG)
__MAKE_LD2(ca, char2, short, "s8", "h", __NOARG)
__MAKE_LD2(ca, uchar2, short, "u8", "h", __NOARG)
__MAKE_LD2(ca, short2, short, "s16", "h", __NOARG)
__MAKE_LD2(ca, ushort2, unsigned short, "u16", "h", __NOARG)
__MAKE_LD2(ca, int2, int, "s32", "r", __NOARG)
__MAKE_LD2(ca, uint2, unsigned int, "u32", "r", __NOARG)
__MAKE_LD2(ca, longlong2, long long, "s64", "l", __NOARG)
__MAKE_LD2(ca, ulonglong2, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD2(ca, float2, float, "f32", "f", __NOARG)
__MAKE_LD2(ca, double2, double, "f64", "d", __NOARG)
__MAKE_LD4(ca, char4, short, "s8", "h", __NOARG)
__MAKE_LD4(ca, uchar4, short, "u8", "h", __NOARG)
__MAKE_LD4(ca, short4, short, "s16", "h", __NOARG)
__MAKE_LD4(ca, ushort4, unsigned short, "u16", "h", __NOARG)
__MAKE_LD4(ca, int4, int, "s32", "r", __NOARG)
__MAKE_LD4(ca, uint4, unsigned int, "u32", "r", __NOARG)
__MAKE_LD4(ca, float4, float, "f32", "f", __NOARG)
__MAKE_LD(cs, char, short, "s8", "h", __NOARG)
__MAKE_LD(cs, signed char, short, "s8", "h", __NOARG)
__MAKE_LD(cs, unsigned char, short, "u8", "h", __NOARG)
__MAKE_LD(cs, short, short, "s16", "h", __NOARG)
__MAKE_LD(cs, unsigned short, unsigned short, "u16", "h", __NOARG)
__MAKE_LD(cs, int, int, "s32", "r", __NOARG)
__MAKE_LD(cs, unsigned int, unsigned int, "u32", "r", __NOARG)
__MAKE_LD(cs, long, long, "s" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(cs, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(cs, long long, long long, "s64", "l", __NOARG)
__MAKE_LD(cs, unsigned long long, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD(cs, float, float, "f32", "f", __NOARG)
__MAKE_LD(cs, double, double, "f64", "d", __NOARG)
__MAKE_LD2(cs, char2, short, "s8", "h", __NOARG)
__MAKE_LD2(cs, uchar2, short, "u8", "h", __NOARG)
__MAKE_LD2(cs, short2, short, "s16", "h", __NOARG)
__MAKE_LD2(cs, ushort2, unsigned short, "u16", "h", __NOARG)
__MAKE_LD2(cs, int2, int, "s32", "r", __NOARG)
__MAKE_LD2(cs, uint2, unsigned int, "u32", "r", __NOARG)
__MAKE_LD2(cs, longlong2, long long, "s64", "l", __NOARG)
__MAKE_LD2(cs, ulonglong2, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD2(cs, float2, float, "f32", "f", __NOARG)
__MAKE_LD2(cs, double2, double, "f64", "d", __NOARG)
__MAKE_LD4(cs, char4, short, "s8", "h", __NOARG)
__MAKE_LD4(cs, uchar4, short, "u8", "h", __NOARG)
__MAKE_LD4(cs, short4, short, "s16", "h", __NOARG)
__MAKE_LD4(cs, ushort4, unsigned short, "u16", "h", __NOARG)
__MAKE_LD4(cs, int4, int, "s32", "r", __NOARG)
__MAKE_LD4(cs, uint4, unsigned int, "u32", "r", __NOARG)
__MAKE_LD4(cs, float4, float, "f32", "f", __NOARG)
__MAKE_LD(lu, char, short, "s8", "h", : "memory")
__MAKE_LD(lu, signed char, short, "s8", "h", : "memory")
__MAKE_LD(lu, unsigned char, short, "u8", "h", : "memory")
__MAKE_LD(lu, short, short, "s16", "h", : "memory")
__MAKE_LD(lu, unsigned short, unsigned short, "u16", "h", : "memory")
__MAKE_LD(lu, int, int, "s32", "r", : "memory")
__MAKE_LD(lu, unsigned int, unsigned int, "u32", "r", : "memory")
__MAKE_LD(lu, long, long, "s" __LBITS, __LDG_PTR, : "memory")
__MAKE_LD(lu, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR, : "memory")
__MAKE_LD(lu, long long, long long, "s64", "l", : "memory")
__MAKE_LD(lu, unsigned long long, unsigned long long, "u64", "l", : "memory")
__MAKE_LD(lu, float, float, "f32", "f", : "memory")
__MAKE_LD(lu, double, double, "f64", "d", : "memory")
__MAKE_LD2(lu, char2, short, "s8", "h", : "memory")
__MAKE_LD2(lu, uchar2, short, "u8", "h", : "memory")
__MAKE_LD2(lu, short2, short, "s16", "h", : "memory")
__MAKE_LD2(lu, ushort2, unsigned short, "u16", "h", : "memory")
__MAKE_LD2(lu, int2, int, "s32", "r", : "memory")
__MAKE_LD2(lu, uint2, unsigned int, "u32", "r", : "memory")
__MAKE_LD2(lu, longlong2, long long, "s64", "l", : "memory")
__MAKE_LD2(lu, ulonglong2, unsigned long long, "u64", "l", : "memory")
__MAKE_LD2(lu, float2, float, "f32", "f", : "memory")
__MAKE_LD2(lu, double2, double, "f64", "d", : "memory")
__MAKE_LD4(lu, char4, short, "s8", "h", : "memory")
__MAKE_LD4(lu, uchar4, short, "u8", "h", : "memory")
__MAKE_LD4(lu, short4, short, "s16", "h", : "memory")
__MAKE_LD4(lu, ushort4, unsigned short, "u16", "h", : "memory")
__MAKE_LD4(lu, int4, int, "s32", "r", : "memory")
__MAKE_LD4(lu, uint4, unsigned int, "u32", "r", : "memory")
__MAKE_LD4(lu, float4, float, "f32", "f", : "memory")
__MAKE_LD(cv, char, short, "s8", "h", : "memory")
__MAKE_LD(cv, signed char, short, "s8", "h", : "memory")
__MAKE_LD(cv, unsigned char, short, "u8", "h", : "memory")
__MAKE_LD(cv, short, short, "s16", "h", : "memory")
__MAKE_LD(cv, unsigned short, unsigned short, "u16", "h", : "memory")
__MAKE_LD(cv, int, int, "s32", "r", : "memory")
__MAKE_LD(cv, unsigned int, unsigned int, "u32", "r", : "memory")
__MAKE_LD(cv, long, long, "s" __LBITS, __LDG_PTR, : "memory")
__MAKE_LD(cv, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR, : "memory")
__MAKE_LD(cv, long long, long long, "s64", "l", : "memory")
__MAKE_LD(cv, unsigned long long, unsigned long long, "u64", "l", : "memory")
__MAKE_LD(cv, float, float, "f32", "f", : "memory")
__MAKE_LD(cv, double, double, "f64", "d", : "memory")
__MAKE_LD2(cv, char2, short, "s8", "h", : "memory")
__MAKE_LD2(cv, uchar2, short, "u8", "h", : "memory")
__MAKE_LD2(cv, short2, short, "s16", "h", : "memory")
__MAKE_LD2(cv, ushort2, unsigned short, "u16", "h", : "memory")
__MAKE_LD2(cv, int2, int, "s32", "r", : "memory")
__MAKE_LD2(cv, uint2, unsigned int, "u32", "r", : "memory")
__MAKE_LD2(cv, longlong2, long long, "s64", "l", : "memory")
__MAKE_LD2(cv, ulonglong2, unsigned long long, "u64", "l", : "memory")
__MAKE_LD2(cv, float2, float, "f32", "f", : "memory")
__MAKE_LD2(cv, double2, double, "f64", "d", : "memory")
__MAKE_LD4(cv, char4, short, "s8", "h", : "memory")
__MAKE_LD4(cv, uchar4, short, "u8", "h", : "memory")
__MAKE_LD4(cv, short4, short, "s16", "h", : "memory")
__MAKE_LD4(cv, ushort4, unsigned short, "u16", "h", : "memory")
__MAKE_LD4(cv, int4, int, "s32", "r", : "memory")
__MAKE_LD4(cv, uint4, unsigned int, "u32", "r", : "memory")
__MAKE_LD4(cv, float4, float, "f32", "f", : "memory")
#define __MAKE_ST(cop, c_typ, int_typ, ptx_typ, inl_typ) \
__device__ __forceinline__ void __st##cop(c_typ* addr, c_typ v) \
{ \
asm("st." #cop "." ptx_typ " [%0], %1;" ::__LDG_PTR(addr), inl_typ((int_typ)v) : "memory"); \
}
#define __MAKE_ST2(cop, c_typ, int_typ, ptx_typ, inl_typ) \
__device__ __forceinline__ void __st##cop(c_typ* addr, c_typ v) \
{ \
int_typ v1 = v.x, v2 = v.y; \
asm("st." #cop ".v2." ptx_typ " [%0], {%1, %2};" ::__LDG_PTR(addr), inl_typ(v1), inl_typ(v2) \
: "memory"); \
}
#define __MAKE_ST4(cop, c_typ, int_typ, ptx_typ, inl_typ) \
__device__ __forceinline__ c_typ __st##cop(c_typ* addr, c_typ v) \
{ \
int_typ v1 = v.x, v2 = v.y, v3 = v.z, v4 = v.w; \
asm("st." #cop ".v4." ptx_typ " [%0], {%1, %2, %3, %4};" ::__LDG_PTR(addr), \
inl_typ(v1), \
inl_typ(v2), \
inl_typ(v3), \
inl_typ(v4) \
: "memory"); \
}
__MAKE_ST(wb, char, short, "s8", "h")
__MAKE_ST(wb, signed char, short, "s8", "h")
__MAKE_ST(wb, unsigned char, short, "u8", "h")
__MAKE_ST(wb, short, short, "s16", "h")
__MAKE_ST(wb, unsigned short, unsigned short, "u16", "h")
__MAKE_ST(wb, int, int, "s32", "r")
__MAKE_ST(wb, unsigned int, unsigned int, "u32", "r")
__MAKE_ST(wb, long, long, "s" __LBITS, __LDG_PTR)
__MAKE_ST(wb, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR)
__MAKE_ST(wb, long long, long long, "s64", "l")
__MAKE_ST(wb, unsigned long long, unsigned long long, "u64", "l")
__MAKE_ST(wb, float, float, "f32", "f")
__MAKE_ST(wb, double, double, "f64", "d")
__MAKE_ST2(wb, char2, short, "s8", "h")
__MAKE_ST2(wb, uchar2, short, "u8", "h")
__MAKE_ST2(wb, short2, short, "s16", "h")
__MAKE_ST2(wb, ushort2, unsigned short, "u16", "h")
__MAKE_ST2(wb, int2, int, "s32", "r")
__MAKE_ST2(wb, uint2, unsigned int, "u32", "r")
__MAKE_ST2(wb, longlong2, long long, "s64", "l")
__MAKE_ST2(wb, ulonglong2, unsigned long long, "u64", "l")
__MAKE_ST2(wb, float2, float, "f32", "f")
__MAKE_ST2(wb, double2, double, "f64", "d")
__MAKE_ST4(wb, char4, short, "s8", "h")
__MAKE_ST4(wb, uchar4, short, "u8", "h")
__MAKE_ST4(wb, short4, short, "s16", "h")
__MAKE_ST4(wb, ushort4, unsigned short, "u16", "h")
__MAKE_ST4(wb, int4, int, "s32", "r")
__MAKE_ST4(wb, uint4, unsigned int, "u32", "r")
__MAKE_ST4(wb, float4, float, "f32", "f")
__MAKE_ST(cg, char, short, "s8", "h")
__MAKE_ST(cg, signed char, short, "s8", "h")
__MAKE_ST(cg, unsigned char, short, "u8", "h")
__MAKE_ST(cg, short, short, "s16", "h")
__MAKE_ST(cg, unsigned short, unsigned short, "u16", "h")
__MAKE_ST(cg, int, int, "s32", "r")
__MAKE_ST(cg, unsigned int, unsigned int, "u32", "r")
__MAKE_ST(cg, long, long, "s" __LBITS, __LDG_PTR)
__MAKE_ST(cg, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR)
__MAKE_ST(cg, long long, long long, "s64", "l")
__MAKE_ST(cg, unsigned long long, unsigned long long, "u64", "l")
__MAKE_ST(cg, float, float, "f32", "f")
__MAKE_ST(cg, double, double, "f64", "d")
__MAKE_ST2(cg, char2, short, "s8", "h")
__MAKE_ST2(cg, uchar2, short, "u8", "h")
__MAKE_ST2(cg, short2, short, "s16", "h")
__MAKE_ST2(cg, ushort2, unsigned short, "u16", "h")
__MAKE_ST2(cg, int2, int, "s32", "r")
__MAKE_ST2(cg, uint2, unsigned int, "u32", "r")
__MAKE_ST2(cg, longlong2, long long, "s64", "l")
__MAKE_ST2(cg, ulonglong2, unsigned long long, "u64", "l")
__MAKE_ST2(cg, float2, float, "f32", "f")
__MAKE_ST2(cg, double2, double, "f64", "d")
__MAKE_ST4(cg, char4, short, "s8", "h")
__MAKE_ST4(cg, uchar4, short, "u8", "h")
__MAKE_ST4(cg, short4, short, "s16", "h")
__MAKE_ST4(cg, ushort4, unsigned short, "u16", "h")
__MAKE_ST4(cg, int4, int, "s32", "r")
__MAKE_ST4(cg, uint4, unsigned int, "u32", "r")
__MAKE_ST4(cg, float4, float, "f32", "f")
__MAKE_ST(cs, char, short, "s8", "h")
__MAKE_ST(cs, signed char, short, "s8", "h")
__MAKE_ST(cs, unsigned char, short, "u8", "h")
__MAKE_ST(cs, short, short, "s16", "h")
__MAKE_ST(cs, unsigned short, unsigned short, "u16", "h")
__MAKE_ST(cs, int, int, "s32", "r")
__MAKE_ST(cs, unsigned int, unsigned int, "u32", "r")
__MAKE_ST(cs, long, long, "s" __LBITS, __LDG_PTR)
__MAKE_ST(cs, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR)
__MAKE_ST(cs, long long, long long, "s64", "l")
__MAKE_ST(cs, unsigned long long, unsigned long long, "u64", "l")
__MAKE_ST(cs, float, float, "f32", "f")
__MAKE_ST(cs, double, double, "f64", "d")
__MAKE_ST2(cs, char2, short, "s8", "h")
__MAKE_ST2(cs, uchar2, short, "u8", "h")
__MAKE_ST2(cs, short2, short, "s16", "h")
__MAKE_ST2(cs, ushort2, unsigned short, "u16", "h")
__MAKE_ST2(cs, int2, int, "s32", "r")
__MAKE_ST2(cs, uint2, unsigned int, "u32", "r")
__MAKE_ST2(cs, longlong2, long long, "s64", "l")
__MAKE_ST2(cs, ulonglong2, unsigned long long, "u64", "l")
__MAKE_ST2(cs, float2, float, "f32", "f")
__MAKE_ST2(cs, double2, double, "f64", "d")
__MAKE_ST4(cs, char4, short, "s8", "h")
__MAKE_ST4(cs, uchar4, short, "u8", "h")
__MAKE_ST4(cs, short4, short, "s16", "h")
__MAKE_ST4(cs, ushort4, unsigned short, "u16", "h")
__MAKE_ST4(cs, int4, int, "s32", "r")
__MAKE_ST4(cs, uint4, unsigned int, "u32", "r")
__MAKE_ST4(cs, float4, float, "f32", "f")
__MAKE_ST(wt, char, short, "s8", "h")
__MAKE_ST(wt, signed char, short, "s8", "h")
__MAKE_ST(wt, unsigned char, short, "u8", "h")
__MAKE_ST(wt, short, short, "s16", "h")
__MAKE_ST(wt, unsigned short, unsigned short, "u16", "h")
__MAKE_ST(wt, int, int, "s32", "r")
__MAKE_ST(wt, unsigned int, unsigned int, "u32", "r")
__MAKE_ST(wt, long, long, "s" __LBITS, __LDG_PTR)
__MAKE_ST(wt, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR)
__MAKE_ST(wt, long long, long long, "s64", "l")
__MAKE_ST(wt, unsigned long long, unsigned long long, "u64", "l")
__MAKE_ST(wt, float, float, "f32", "f")
__MAKE_ST(wt, double, double, "f64", "d")
__MAKE_ST2(wt, char2, short, "s8", "h")
__MAKE_ST2(wt, uchar2, short, "u8", "h")
__MAKE_ST2(wt, short2, short, "s16", "h")
__MAKE_ST2(wt, ushort2, unsigned short, "u16", "h")
__MAKE_ST2(wt, int2, int, "s32", "r")
__MAKE_ST2(wt, uint2, unsigned int, "u32", "r")
__MAKE_ST2(wt, longlong2, long long, "s64", "l")
__MAKE_ST2(wt, ulonglong2, unsigned long long, "u64", "l")
__MAKE_ST2(wt, float2, float, "f32", "f")
__MAKE_ST2(wt, double2, double, "f64", "d")
__MAKE_ST4(wt, char4, short, "s8", "h")
__MAKE_ST4(wt, uchar4, short, "u8", "h")
__MAKE_ST4(wt, short4, short, "s16", "h")
__MAKE_ST4(wt, ushort4, unsigned short, "u16", "h")
__MAKE_ST4(wt, int4, int, "s32", "r")
__MAKE_ST4(wt, uint4, unsigned int, "u32", "r")
__MAKE_ST4(wt, float4, float, "f32", "f")
#undef __MAKE_ST4
#undef __MAKE_ST2
#undef __MAKE_ST
#undef __MAKE_LD4
#undef __MAKE_LD2
#undef __MAKE_LD
#undef __NOARG
#undef __LBITS
#undef __LDG_PTR
#endif // defined(__cplusplus) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320))
#endif // defined(__CLANG_CUDA_ADDITIONAL_INTRINSICS_H__)
| 0 |
rapidsai_public_repos/cuvs/cpp | rapidsai_public_repos/cuvs/cpp/scripts/include_checker.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import re
import os
import subprocess
import argparse
IncludeRegex = re.compile(r"\s*#include\s*(\S+)")
RemoveComments = re.compile(r"//.*")
exclusion_regex = re.compile(r".*thirdparty.*")
def parse_args():
argparser = argparse.ArgumentParser(
"Checks for a consistent '#include' syntax")
argparser.add_argument("--regex", type=str,
default=r"[.](cu|cuh|h|hpp|hxx|cpp)$",
help="Regex string to filter in sources")
argparser.add_argument("dirs", type=str, nargs="*",
help="List of dirs where to find sources")
args = argparser.parse_args()
args.regex_compiled = re.compile(args.regex)
return args
def list_all_source_file(file_regex, srcdirs):
all_files = []
for srcdir in srcdirs:
for root, dirs, files in os.walk(srcdir):
for f in files:
if not re.search(exclusion_regex, root) and re.search(file_regex, f):
src = os.path.join(root, f)
all_files.append(src)
return all_files
def check_includes_in(src):
errs = []
dir = os.path.dirname(src)
for line_number, line in enumerate(open(src)):
line = RemoveComments.sub("", line)
match = IncludeRegex.search(line)
if match is None:
continue
val = match.group(1)
inc_file = val[1:-1] # strip out " or <
full_path = os.path.join(dir, inc_file)
line_num = line_number + 1
if val[0] == "\"" and not os.path.exists(full_path):
errs.append("Line:%d use #include <...>" % line_num)
elif val[0] == "<" and os.path.exists(full_path):
errs.append("Line:%d use #include \"...\"" % line_num)
return errs
def main():
args = parse_args()
all_files = list_all_source_file(args.regex_compiled, args.dirs)
all_errs = {}
for f in all_files:
errs = check_includes_in(f)
if len(errs) > 0:
all_errs[f] = errs
if len(all_errs) == 0:
print("include-check PASSED")
else:
print("include-check FAILED! See below for errors...")
for f, errs in all_errs.items():
print("File: %s" % f)
for e in errs:
print(" %s" % e)
sys.exit(-1)
return
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/cuvs/cpp | rapidsai_public_repos/cuvs/cpp/scripts/gitutils.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import os
import re
def isFileEmpty(f):
return os.stat(f).st_size == 0
def __git(*opts):
"""Runs a git command and returns its output"""
cmd = "git " + " ".join(list(opts))
ret = subprocess.check_output(cmd, shell=True)
return ret.decode("UTF-8").rstrip("\n")
def __gitdiff(*opts):
"""Runs a git diff command with no pager set"""
return __git("--no-pager", "diff", *opts)
def branch():
"""Returns the name of the current branch"""
name = __git("rev-parse", "--abbrev-ref", "HEAD")
name = name.rstrip()
return name
def repo_version():
"""
Determines the version of the repo by using `git describe`
Returns
-------
str
The full version of the repo in the format 'v#.#.#{a|b|rc}'
"""
return __git("describe", "--tags", "--abbrev=0")
def repo_version_major_minor():
"""
Determines the version of the repo using `git describe` and returns only
the major and minor portion
Returns
-------
str
The partial version of the repo in the format '{major}.{minor}'
"""
full_repo_version = repo_version()
match = re.match(r"^v?(?P<major>[0-9]+)(?:\.(?P<minor>[0-9]+))?",
full_repo_version)
if (match is None):
print(" [DEBUG] Could not determine repo major minor version. "
f"Full repo version: {full_repo_version}.")
return None
out_version = match.group("major")
if (match.group("minor")):
out_version += "." + match.group("minor")
return out_version
def determine_merge_commit(current_branch="HEAD"):
"""
When running outside of CI, this will estimate the target merge commit hash
of `current_branch` by finding a common ancestor with the remote branch
'branch-{major}.{minor}' where {major} and {minor} are determined from the
repo version.
Parameters
----------
current_branch : str, optional
Which branch to consider as the current branch, by default "HEAD"
Returns
-------
str
The common commit hash ID
"""
try:
# Try to determine the target branch from the most recent tag
head_branch = __git("describe",
"--all",
"--tags",
"--match='branch-*'",
"--abbrev=0")
except subprocess.CalledProcessError:
print(" [DEBUG] Could not determine target branch from most recent "
"tag. Falling back to 'branch-{major}.{minor}.")
head_branch = None
if (head_branch is not None):
# Convert from head to branch name
head_branch = __git("name-rev", "--name-only", head_branch)
else:
# Try and guess the target branch as "branch-<major>.<minor>"
version = repo_version_major_minor()
if (version is None):
return None
head_branch = "branch-{}".format(version)
try:
# Now get the remote tracking branch
remote_branch = __git("rev-parse",
"--abbrev-ref",
"--symbolic-full-name",
head_branch + "@{upstream}")
except subprocess.CalledProcessError:
print(" [DEBUG] Could not remote tracking reference for "
f"branch {head_branch}.")
remote_branch = None
if (remote_branch is None):
return None
print(f" [DEBUG] Determined TARGET_BRANCH as: '{remote_branch}'. "
"Finding common ancestor.")
common_commit = __git("merge-base", remote_branch, current_branch)
return common_commit
def uncommittedFiles():
"""
Returns a list of all changed files that are not yet committed. This
means both untracked/unstaged as well as uncommitted files too.
"""
files = __git("status", "-u", "-s")
ret = []
for f in files.splitlines():
f = f.strip(" ")
f = re.sub("\s+", " ", f) # noqa: W605
tmp = f.split(" ", 1)
# only consider staged files or uncommitted files
# in other words, ignore untracked files
if tmp[0] == "M" or tmp[0] == "A":
ret.append(tmp[1])
return ret
def changedFilesBetween(baseName, branchName, commitHash):
"""
Returns a list of files changed between branches baseName and latest commit
of branchName.
"""
current = branch()
# checkout "base" branch
__git("checkout", "--force", baseName)
# checkout branch for comparing
__git("checkout", "--force", branchName)
# checkout latest commit from branch
__git("checkout", "-fq", commitHash)
files = __gitdiff("--name-only",
"--ignore-submodules",
f"{baseName}..{branchName}")
# restore the original branch
__git("checkout", "--force", current)
return files.splitlines()
def changesInFileBetween(file, b1, b2, filter=None):
"""Filters the changed lines to a file between the branches b1 and b2"""
current = branch()
__git("checkout", "--quiet", b1)
__git("checkout", "--quiet", b2)
diffs = __gitdiff("--ignore-submodules",
"-w",
"--minimal",
"-U0",
"%s...%s" % (b1, b2),
"--",
file)
__git("checkout", "--quiet", current)
lines = []
for line in diffs.splitlines():
if filter is None or filter(line):
lines.append(line)
return lines
def modifiedFiles(pathFilter=None):
"""
If inside a CI-env (ie. TARGET_BRANCH and COMMIT_HASH are defined, and
current branch is "current-pr-branch"), then lists out all files modified
between these 2 branches. Locally, TARGET_BRANCH will try to be determined
from the current repo version and finding a corresponding branch named
'branch-{major}.{minor}'. If this fails, this function will list out all
the uncommitted files in the current branch.
Such utility function is helpful while putting checker scripts as part of
cmake, as well as CI process. This way, during development, only the files
touched (but not yet committed) by devs can be checked. But, during the CI
process ALL files modified by the dev, as submiited in the PR, will be
checked. This happens, all the while using the same script.
"""
targetBranch = os.environ.get("TARGET_BRANCH")
commitHash = os.environ.get("COMMIT_HASH")
currentBranch = branch()
print(
f" [DEBUG] TARGET_BRANCH={targetBranch}, COMMIT_HASH={commitHash}, "
f"currentBranch={currentBranch}")
if targetBranch and commitHash and (currentBranch == "current-pr-branch"):
print(" [DEBUG] Assuming a CI environment.")
allFiles = changedFilesBetween(targetBranch, currentBranch, commitHash)
else:
print(" [DEBUG] Did not detect CI environment. "
"Determining TARGET_BRANCH locally.")
common_commit = determine_merge_commit(currentBranch)
if (common_commit is not None):
# Now get the diff. Use --staged to get both diff between
# common_commit..HEAD and any locally staged files
allFiles = __gitdiff("--name-only",
"--ignore-submodules",
"--staged",
f"{common_commit}").splitlines()
else:
# Fallback to just uncommitted files
allFiles = uncommittedFiles()
files = []
for f in allFiles:
if pathFilter is None or pathFilter(f):
files.append(f)
filesToCheckString = "\n\t".join(files) if files else "<None>"
print(f" [DEBUG] Found files to check:\n\t{filesToCheckString}\n")
return files
def listAllFilesInDir(folder):
"""Utility function to list all files/subdirs in the input folder"""
allFiles = []
for root, dirs, files in os.walk(folder):
for name in files:
allFiles.append(os.path.join(root, name))
return allFiles
def listFilesToCheck(filesDirs, filter=None):
"""
Utility function to filter the input list of files/dirs based on the input
filter method and returns all the files that need to be checked
"""
allFiles = []
for f in filesDirs:
if os.path.isfile(f):
if filter is None or filter(f):
allFiles.append(f)
elif os.path.isdir(f):
files = listAllFilesInDir(f)
for f_ in files:
if filter is None or filter(f_):
allFiles.append(f_)
return allFiles
| 0 |
rapidsai_public_repos/cuvs/cpp | rapidsai_public_repos/cuvs/cpp/scripts/run-clang-compile.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# IMPORTANT DISCLAIMER: #
# This file is experimental and may not run successfully on the entire repo! #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
from __future__ import print_function
import argparse
import json
import multiprocessing as mp
import os
import re
import shutil
import subprocess
CMAKE_COMPILER_REGEX = re.compile(
r"^\s*CMAKE_CXX_COMPILER:FILEPATH=(.+)\s*$", re.MULTILINE)
CLANG_COMPILER = "clang++"
GPU_ARCH_REGEX = re.compile(r"sm_(\d+)")
SPACES = re.compile(r"\s+")
XCOMPILER_FLAG = re.compile(r"-((Xcompiler)|(-compiler-options))=?")
XPTXAS_FLAG = re.compile(r"-((Xptxas)|(-ptxas-options))=?")
# any options that may have equal signs in nvcc but not in clang
# add those options here if you find any
OPTIONS_NO_EQUAL_SIGN = ['-isystem']
SEPARATOR = "-" * 8
END_SEPARATOR = "*" * 64
def parse_args():
argparser = argparse.ArgumentParser("Runs clang++ on a project instead of nvcc")
argparser.add_argument(
"-cdb", type=str, default="compile_commands.json",
help="Path to cmake-generated compilation database")
argparser.add_argument(
"-ignore", type=str, default=None,
help="Regex used to ignore files from checking")
argparser.add_argument(
"-select", type=str, default=None,
help="Regex used to select files for checking")
argparser.add_argument(
"-j", type=int, default=-1, help="Number of parallel jobs to launch.")
argparser.add_argument(
"-build_dir", type=str, default=None,
help="Directory from which compile commands should be called. "
"By default, directory of compile_commands.json file.")
args = argparser.parse_args()
if args.j <= 0:
args.j = mp.cpu_count()
args.ignore_compiled = re.compile(args.ignore) if args.ignore else None
args.select_compiled = re.compile(args.select) if args.select else None
# we don't check clang's version, it should be OK with any clang
# recent enough to handle CUDA >= 11
if not os.path.exists(args.cdb):
raise Exception("Compilation database '%s' missing" % args.cdb)
if args.build_dir is None:
args.build_dir = os.path.dirname(args.cdb)
return args
def get_gcc_root(build_dir):
# first try to determine GCC based on CMakeCache
cmake_cache = os.path.join(build_dir, "CMakeCache.txt")
if os.path.isfile(cmake_cache):
with open(cmake_cache) as f:
content = f.read()
match = CMAKE_COMPILER_REGEX.search(content)
if match:
return os.path.dirname(os.path.dirname(match.group(1)))
# first fall-back to CONDA prefix if we have a build sysroot there
conda_prefix = os.environ.get("CONDA_PREFIX", "")
conda_sysroot = os.environ.get("CONDA_BUILD_SYSROOT", "")
if conda_prefix and conda_sysroot:
return conda_prefix
# second fall-back to default g++ install
default_gxx = shutil.which("g++")
if default_gxx:
return os.path.dirname(os.path.dirname(default_gxx))
raise Exception("Cannot find any g++ install on the system.")
def list_all_cmds(cdb):
with open(cdb, "r") as fp:
return json.load(fp)
def get_gpu_archs(command):
# clang only accepts a single architecture, so first determine the lowest
archs = []
for loc in range(len(command)):
if (command[loc] != "-gencode" and command[loc] != "--generate-code"
and not command[loc].startswith("--generate-code=")):
continue
if command[loc].startswith("--generate-code="):
arch_flag = command[loc][len("--generate-code="):]
else:
arch_flag = command[loc + 1]
match = GPU_ARCH_REGEX.search(arch_flag)
if match is not None:
archs.append(int(match.group(1)))
return ["--cuda-gpu-arch=sm_%d" % min(archs)]
def get_index(arr, item_options):
return set(i for i, s in enumerate(arr) for item in item_options
if s == item)
def remove_items(arr, item_options):
for i in sorted(get_index(arr, item_options), reverse=True):
del arr[i]
def remove_items_plus_one(arr, item_options):
for i in sorted(get_index(arr, item_options), reverse=True):
if i < len(arr) - 1:
del arr[i + 1]
del arr[i]
idx = set(i for i, s in enumerate(arr) for item in item_options
if s.startswith(item + "="))
for i in sorted(idx, reverse=True):
del arr[i]
def add_cuda_path(command, nvcc):
nvcc_path = shutil.which(nvcc)
if not nvcc_path:
raise Exception("Command %s has invalid compiler %s" % (command, nvcc))
cuda_root = os.path.dirname(os.path.dirname(nvcc_path))
command.append('--cuda-path=%s' % cuda_root)
def get_clang_args(cmd, build_dir):
command, file = cmd["command"], cmd["file"]
is_cuda = file.endswith(".cu")
command = re.split(SPACES, command)
# get original compiler
cc_orig = command[0]
# compiler is always clang++!
command[0] = "clang++"
# remove compilation and output targets from the original command
remove_items_plus_one(command, ["--compile", "-c"])
remove_items_plus_one(command, ["--output-file", "-o"])
if is_cuda:
# replace nvcc's "-gencode ..." with clang's "--cuda-gpu-arch ..."
archs = get_gpu_archs(command)
command.extend(archs)
# provide proper cuda path to clang
add_cuda_path(command, cc_orig)
# remove all kinds of nvcc flags clang doesn't know about
remove_items_plus_one(command, [
"--generate-code",
"-gencode",
"--x",
"-x",
"--compiler-bindir",
"-ccbin",
"--diag_suppress",
"-diag-suppress",
"--default-stream",
"-default-stream",
])
remove_items(command, [
"-extended-lambda",
"--extended-lambda",
"-expt-extended-lambda",
"--expt-extended-lambda",
"-expt-relaxed-constexpr",
"--expt-relaxed-constexpr",
"--device-debug",
"-G",
"--generate-line-info",
"-lineinfo",
])
# "-x cuda" is the right usage in clang
command.extend(["-x", "cuda"])
# we remove -Xcompiler flags: here we basically have to hope for the
# best that clang++ will accept any flags which nvcc passed to gcc
for i, c in reversed(list(enumerate(command))):
new_c = XCOMPILER_FLAG.sub('', c)
if new_c == c:
continue
command[i:i + 1] = new_c.split(',')
# we also change -Xptxas to -Xcuda-ptxas, always adding space here
for i, c in reversed(list(enumerate(command))):
if XPTXAS_FLAG.search(c):
if not c.endswith("=") and i < len(command) - 1:
del command[i + 1]
command[i] = '-Xcuda-ptxas'
command.insert(i + 1, XPTXAS_FLAG.sub('', c))
# several options like isystem don't expect `=`
for opt in OPTIONS_NO_EQUAL_SIGN:
opt_eq = opt + '='
# make sure that we iterate from back to front here for insert
for i, c in reversed(list(enumerate(command))):
if not c.startswith(opt_eq):
continue
x = c.split('=')
# we only care about the first `=`
command[i] = x[0]
command.insert(i + 1, '='.join(x[1:]))
# use extensible whole program, to avoid ptx resolution/linking
command.extend(["-Xcuda-ptxas", "-ewp"])
# for libcudacxx, we need to allow variadic functions
command.extend(["-Xclang", "-fcuda-allow-variadic-functions"])
# add some additional CUDA intrinsics
cuda_intrinsics_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"__clang_cuda_additional_intrinsics.h")
command.extend(["-include", cuda_intrinsics_file])
# somehow this option gets onto the commandline, it is unrecognized by clang
remove_items(command, [
"--forward-unknown-to-host-compiler",
"-forward-unknown-to-host-compiler"
])
# do not treat warnings as errors here !
for i, x in reversed(list(enumerate(command))):
if x.startswith("-Werror"):
del command[i]
# try to figure out which GCC CMAKE used, and tell clang all about it
command.append("--gcc-toolchain=%s" % get_gcc_root(build_dir))
return command
def run_clang_command(clang_cmd, cwd):
cmd = " ".join(clang_cmd)
result = subprocess.run(cmd, check=False, shell=True, cwd=cwd,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result.stdout = result.stdout.decode("utf-8").strip()
out = "CMD: " + cmd + "\n"
out += "CWD: " + cwd + "\n"
out += "EXIT-CODE: %d\n" % result.returncode
status = result.returncode == 0
out += result.stdout
return status, out
class LockContext(object):
def __init__(self, lock=None) -> None:
self._lock = lock
def __enter__(self):
if self._lock:
self._lock.acquire()
return self
def __exit__(self, _, __, ___):
if self._lock:
self._lock.release()
return False # we don't handle exceptions
def print_result(passed, stdout, file):
status_str = "PASSED" if passed else "FAILED"
print("%s File:%s %s %s" % (SEPARATOR, file, status_str, SEPARATOR))
if not passed and stdout:
print(stdout)
print("%s\n" % END_SEPARATOR)
def run_clang(cmd, args):
command = get_clang_args(cmd, args.build_dir)
# compile only and dump output to /dev/null
command.extend(["-c", cmd["file"], "-o", os.devnull])
status, out = run_clang_command(command, args.build_dir)
# we immediately print the result since this is more interactive for user
with lock:
print_result(status, out, cmd["file"])
return status
# mostly used for debugging purposes
def run_sequential(args, all_files):
# lock must be defined as in `run_parallel`
global lock
lock = LockContext()
results = []
for cmd in all_files:
# skip files that we don't want to look at
if args.ignore_compiled is not None and \
re.search(args.ignore_compiled, cmd["file"]) is not None:
continue
if args.select_compiled is not None and \
re.search(args.select_compiled, cmd["file"]) is None:
continue
results.append(run_clang(cmd, args))
return all(results)
def copy_lock(init_lock):
# this is required to pass locks to pool workers
# see https://stackoverflow.com/questions/25557686/
# python-sharing-a-lock-between-processes
global lock
lock = init_lock
def run_parallel(args, all_files):
init_lock = LockContext(mp.Lock())
pool = mp.Pool(args.j, initializer=copy_lock, initargs=(init_lock,))
results = []
for cmd in all_files:
# skip files that we don't want to look at
if args.ignore_compiled is not None and \
re.search(args.ignore_compiled, cmd["file"]) is not None:
continue
if args.select_compiled is not None and \
re.search(args.select_compiled, cmd["file"]) is None:
continue
results.append(pool.apply_async(run_clang, args=(cmd, args)))
results_final = [r.get() for r in results]
pool.close()
pool.join()
return all(results_final)
def main():
args = parse_args()
all_files = list_all_cmds(args.cdb)
# ensure that we use only the real paths
for cmd in all_files:
cmd["file"] = os.path.realpath(os.path.expanduser(cmd["file"]))
if args.j == 1:
status = run_sequential(args, all_files)
else:
status = run_parallel(args, all_files)
if not status:
raise Exception("clang++ failed! Refer to the errors above.")
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/cuvs/cpp | rapidsai_public_repos/cuvs/cpp/scripts/run-cmake-format.sh | #!/bin/bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
# This script is a wrapper for cmakelang that may be used with pre-commit. The
# wrapping is necessary because RAPIDS libraries split configuration for
# cmakelang linters between a local config file and a second config file that's
# shared across all of RAPIDS via rapids-cmake. In order to keep it up to date
# this file is only maintained in one place (the rapids-cmake repo) and
# pulled down during builds. We need a way to invoke CMake linting commands
# without causing pre-commit failures (which could block local commits or CI),
# while also being sufficiently flexible to allow users to maintain the config
# file independently of a build directory.
#
# This script provides the minimal functionality to enable those use cases. It
# searches in a number of predefined locations for the rapids-cmake config file
# and exits gracefully if the file is not found. If a user wishes to specify a
# config file at a nonstandard location, they may do so by setting the
# environment variable RAPIDS_CMAKE_FORMAT_FILE.
#
# This script can be invoked directly anywhere within the project repository.
# Alternatively, it may be invoked as a pre-commit hook via
# `pre-commit run (cmake-format)|(cmake-lint)`.
#
# Usage:
# bash run-cmake-format.sh {cmake-format,cmake-lint} infile [infile ...]
status=0
if [ -z ${RAFT_ROOT:+PLACEHOLDER} ]; then
RAFT_BUILD_DIR=$(git rev-parse --show-toplevel 2>&1)/cpp/build
status=$?
else
RAFT_BUILD_DIR=${RAFT_ROOT}
fi
if ! [ ${status} -eq 0 ]; then
if [[ ${RAFT_BUILD_DIR} == *"not a git repository"* ]]; then
echo "This script must be run inside the raft repository, or the RAFT_ROOT environment variable must be set."
else
echo "Script failed with unknown error attempting to determine project root:"
echo ${RAFT_BUILD_DIR}
fi
exit 1
fi
DEFAULT_FORMAT_FILE_LOCATIONS=(
"${RAFT_BUILD_DIR:-${HOME}}/_deps/rapids-cmake-src/cmake-format-rapids-cmake.json"
)
if [ -z ${RAPIDS_CMAKE_FORMAT_FILE:+PLACEHOLDER} ]; then
for file_path in ${DEFAULT_FORMAT_FILE_LOCATIONS[@]}; do
if [ -f ${file_path} ]; then
RAPIDS_CMAKE_FORMAT_FILE=${file_path}
break
fi
done
fi
if [ -z ${RAPIDS_CMAKE_FORMAT_FILE:+PLACEHOLDER} ]; then
echo "The rapids-cmake cmake-format configuration file was not found at any of the default search locations: "
echo ""
( IFS=$'\n'; echo "${DEFAULT_FORMAT_FILE_LOCATIONS[*]}" )
echo ""
echo "Try setting the environment variable RAPIDS_CMAKE_FORMAT_FILE to the path to the config file."
exit 0
else
echo "Using format file ${RAPIDS_CMAKE_FORMAT_FILE}"
fi
if [[ $1 == "cmake-format" ]]; then
cmake-format -i --config-files cpp/cmake/config.json ${RAPIDS_CMAKE_FORMAT_FILE} -- ${@:2}
elif [[ $1 == "cmake-lint" ]]; then
# Since the pre-commit hook is verbose, we have to be careful to only
# present cmake-lint's output (which is quite verbose) if we actually
# observe a failure.
OUTPUT=$(cmake-lint --config-files cpp/cmake/config.json ${RAPIDS_CMAKE_FORMAT_FILE} -- ${@:2})
status=$?
if ! [ ${status} -eq 0 ]; then
echo "${OUTPUT}"
fi
exit ${status}
fi
| 0 |
rapidsai_public_repos/cuvs/cpp | rapidsai_public_repos/cuvs/cpp/scripts/run-clang-tidy.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# IMPORTANT DISCLAIMER: #
# This file is experimental and may not run successfully on the entire repo! #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
from __future__ import print_function
import argparse
import json
import multiprocessing as mp
import os
import re
import shutil
import subprocess
EXPECTED_VERSIONS = ("16.0.6",)
VERSION_REGEX = re.compile(r"clang version ([0-9.]+)")
CMAKE_COMPILER_REGEX = re.compile(
r"^\s*CMAKE_CXX_COMPILER:FILEPATH=(.+)\s*$", re.MULTILINE)
CLANG_COMPILER = "clang++"
GPU_ARCH_REGEX = re.compile(r"sm_(\d+)")
SPACES = re.compile(r"\s+")
XCOMPILER_FLAG = re.compile(r"-((Xcompiler)|(-compiler-options))=?")
XPTXAS_FLAG = re.compile(r"-((Xptxas)|(-ptxas-options))=?")
# any options that may have equal signs in nvcc but not in clang
# add those options here if you find any
OPTIONS_NO_EQUAL_SIGN = ['-isystem']
SEPARATOR = "-" * 8
END_SEPARATOR = "*" * 64
def parse_args():
argparser = argparse.ArgumentParser("Runs clang-tidy on a project")
argparser.add_argument(
"-cdb", type=str, default="compile_commands.json",
help="Path to cmake-generated compilation database")
argparser.add_argument(
"-exe", type=str, default="clang-tidy", help="Path to clang-tidy exe")
argparser.add_argument(
"-ignore", type=str, default=None,
help="Regex used to ignore files from checking")
argparser.add_argument(
"-select", type=str, default=None,
help="Regex used to select files for checking")
argparser.add_argument(
"-j", type=int, default=-1, help="Number of parallel jobs to launch.")
argparser.add_argument(
"-root", type=str, default=None,
help="Repo root path to filter headers correctly, CWD by default.")
argparser.add_argument(
"-thrust_dir", type=str, default=None,
help="Pass the directory to a THRUST git repo recent enough for clang.")
argparser.add_argument(
"-build_dir", type=str, default=None,
help="Directory from which compile commands should be called. "
"By default, directory of compile_commands.json file.")
args = argparser.parse_args()
if args.j <= 0:
args.j = mp.cpu_count()
args.ignore_compiled = re.compile(args.ignore) if args.ignore else None
args.select_compiled = re.compile(args.select) if args.select else None
# we check clang's version so that it will work in CI
ret = subprocess.check_output("%s --version" % CLANG_COMPILER, shell=True)
ret = ret.decode("utf-8")
version = VERSION_REGEX.match(ret)
if version is None:
raise Exception("Failed to figure out clang compiler version!")
version = version.group(1)
if version not in EXPECTED_VERSIONS:
raise Exception("clang compiler version must be in %s found '%s'" %
(EXPECTED_VERSIONS, version))
if not os.path.exists(args.cdb):
raise Exception("Compilation database '%s' missing" % args.cdb)
# we assume that this script is run from repo root
if args.root is None:
args.root = os.getcwd()
args.root = os.path.realpath(os.path.expanduser(args.root))
# we need to have a recent enough cub version for clang to compile
if args.thrust_dir is None:
args.thrust_dir = os.path.join(
os.path.dirname(args.cdb), "thrust_1.15", "src", "thrust_1.15")
if args.build_dir is None:
args.build_dir = os.path.dirname(args.cdb)
if not os.path.isdir(args.thrust_dir):
raise Exception("Cannot find custom thrust dir '%s" % args.thrust_dir)
return args
def get_gcc_root(args):
# first try to determine GCC based on CMakeCache
cmake_cache = os.path.join(args.build_dir, "CMakeCache.txt")
if os.path.isfile(cmake_cache):
with open(cmake_cache) as f:
content = f.read()
match = CMAKE_COMPILER_REGEX.search(content)
if match:
return os.path.dirname(os.path.dirname(match.group(1)))
# first fall-back to CONDA prefix if we have a build sysroot there
conda_prefix = os.environ.get("CONDA_PREFIX", "")
conda_sysroot = os.environ.get("CONDA_BUILD_SYSROOT", "")
if conda_prefix and conda_sysroot:
return conda_prefix
# second fall-back to default g++ install
default_gxx = shutil.which("g++")
if default_gxx:
return os.path.dirname(os.path.dirname(default_gxx))
raise Exception("Cannot find any g++ install on the system.")
def list_all_cmds(cdb):
with open(cdb, "r") as fp:
return json.load(fp)
def get_gpu_archs(command):
# clang only accepts a single architecture, so first determine the lowest
archs = []
for loc in range(len(command)):
if (command[loc] != "-gencode" and command[loc] != "--generate-code"
and not command[loc].startswith("--generate-code=")):
continue
if command[loc].startswith("--generate-code="):
arch_flag = command[loc][len("--generate-code="):]
else:
arch_flag = command[loc + 1]
match = GPU_ARCH_REGEX.search(arch_flag)
if match is not None:
archs.append(int(match.group(1)))
return ["--cuda-gpu-arch=sm_%d" % min(archs)]
def get_index(arr, item_options):
return set(i for i, s in enumerate(arr) for item in item_options
if s == item)
def remove_items(arr, item_options):
for i in sorted(get_index(arr, item_options), reverse=True):
del arr[i]
def remove_items_plus_one(arr, item_options):
for i in sorted(get_index(arr, item_options), reverse=True):
if i < len(arr) - 1:
del arr[i + 1]
del arr[i]
idx = set(i for i, s in enumerate(arr) for item in item_options
if s.startswith(item + "="))
for i in sorted(idx, reverse=True):
del arr[i]
def add_cuda_path(command, nvcc):
nvcc_path = shutil.which(nvcc)
if not nvcc_path:
raise Exception("Command %s has invalid compiler %s" % (command, nvcc))
cuda_root = os.path.dirname(os.path.dirname(nvcc_path))
command.append('--cuda-path=%s' % cuda_root)
def get_tidy_args(cmd, args):
command, file = cmd["command"], cmd["file"]
is_cuda = file.endswith(".cu")
command = re.split(SPACES, command)
# get original compiler
cc_orig = command[0]
# compiler is always clang++!
command[0] = "clang++"
# remove compilation and output targets from the original command
remove_items_plus_one(command, ["--compile", "-c"])
remove_items_plus_one(command, ["--output-file", "-o"])
if is_cuda:
# include our own cub before anything else
# (left-most should have highest priority)
command.insert(1, "-I%s" % args.thrust_dir)
# replace nvcc's "-gencode ..." with clang's "--cuda-gpu-arch ..."
archs = get_gpu_archs(command)
command.extend(archs)
# provide proper cuda path to clang
add_cuda_path(command, cc_orig)
# remove all kinds of nvcc flags clang doesn't know about
remove_items_plus_one(command, [
"--generate-code",
"-gencode",
"--x",
"-x",
"--compiler-bindir",
"-ccbin",
"--diag_suppress",
"-diag-suppress",
"--default-stream",
"-default-stream",
])
remove_items(command, [
"-extended-lambda",
"--extended-lambda",
"-expt-extended-lambda",
"--expt-extended-lambda",
"-expt-relaxed-constexpr",
"--expt-relaxed-constexpr",
"--device-debug",
"-G",
"--generate-line-info",
"-lineinfo",
])
# "-x cuda" is the right usage in clang
command.extend(["-x", "cuda"])
# we remove -Xcompiler flags: here we basically have to hope for the
# best that clang++ will accept any flags which nvcc passed to gcc
for i, c in reversed(list(enumerate(command))):
new_c = XCOMPILER_FLAG.sub('', c)
if new_c == c:
continue
command[i:i + 1] = new_c.split(',')
# we also change -Xptxas to -Xcuda-ptxas, always adding space here
for i, c in reversed(list(enumerate(command))):
if XPTXAS_FLAG.search(c):
if not c.endswith("=") and i < len(command) - 1:
del command[i + 1]
command[i] = '-Xcuda-ptxas'
command.insert(i + 1, XPTXAS_FLAG.sub('', c))
# several options like isystem don't expect `=`
for opt in OPTIONS_NO_EQUAL_SIGN:
opt_eq = opt + '='
# make sure that we iterate from back to front here for insert
for i, c in reversed(list(enumerate(command))):
if not c.startswith(opt_eq):
continue
x = c.split('=')
# we only care about the first `=`
command[i] = x[0]
command.insert(i + 1, '='.join(x[1:]))
# use extensible whole program, to avoid ptx resolution/linking
command.extend(["-Xcuda-ptxas", "-ewp"])
# for libcudacxx, we need to allow variadic functions
command.extend(["-Xclang", "-fcuda-allow-variadic-functions"])
# add some additional CUDA intrinsics
cuda_intrinsics_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"__clang_cuda_additional_intrinsics.h")
command.extend(["-include", cuda_intrinsics_file])
# somehow this option gets onto the commandline, it is unrecognized by tidy
remove_items(command, [
"--forward-unknown-to-host-compiler",
"-forward-unknown-to-host-compiler"
])
# do not treat warnings as errors here !
for i, x in reversed(list(enumerate(command))):
if x.startswith("-Werror"):
del command[i]
# try to figure out which GCC CMAKE used, and tell clang all about it
command.append("--gcc-toolchain=%s" % get_gcc_root(args))
return command, is_cuda
def check_output_for_errors(output):
# there shouldn't really be any allowed errors
warnings_found = 0
errors = []
for line in output.splitlines():
if line.find("error:") >= 0:
errors.append(line)
if line.find("warning:") >= 0:
warnings_found += 1
return warnings_found, errors
def run_clang_tidy_command(tidy_cmd, cwd):
cmd = " ".join(tidy_cmd)
result = subprocess.run(cmd, check=False, shell=True, cwd=cwd,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result.stdout = result.stdout.decode("utf-8").strip()
out = "CMD: " + cmd + "\n"
out += "EXIT-CODE: %d\n" % result.returncode
n_warnings, errors = check_output_for_errors(result.stdout)
status = n_warnings == 0 and not errors
out += result.stdout
return status, out, errors
class LockContext(object):
def __init__(self, lock=None) -> None:
self._lock = lock
def __enter__(self):
if self._lock:
self._lock.acquire()
return self
def __exit__(self, _, __, ___):
if self._lock:
self._lock.release()
return False # we don't handle exceptions
def print_result(passed, stdout, file, errors):
if any(errors):
raise Exception(
"File %s: got %d errors:\n%s" % (file, len(errors), stdout))
status_str = "PASSED" if passed else "FAILED"
print("%s File:%s %s %s" % (SEPARATOR, file, status_str, SEPARATOR))
if not passed and stdout:
print(stdout)
print("%s\n" % END_SEPARATOR)
return stdout.splitlines()
return []
def run_clang_tidy(cmd, args):
command, is_cuda = get_tidy_args(cmd, args)
header_path_any = os.path.join(os.path.basename(args.root), "cpp", ".*")
header_filter = "-header-filter='.*%s[.](cuh|h|hpp)$'" % header_path_any
tidy_cmd = [args.exe, header_filter, cmd["file"], "--"]
tidy_cmd.extend(command)
status = True
out = ""
if is_cuda:
tidy_cmd.append("--cuda-device-only")
tidy_cmd.append(cmd["file"])
ret, out1, errors1 = run_clang_tidy_command(tidy_cmd, args.build_dir)
out += out1
out += "\n%s\n" % SEPARATOR
status = status and ret
tidy_cmd[-2] = "--cuda-host-only"
ret, out1, errors2 = run_clang_tidy_command(tidy_cmd, args.build_dir)
status = status and ret
out += out1
errors = errors1 + errors2
else:
tidy_cmd.append(cmd["file"])
ret, out1, errors = run_clang_tidy_command(tidy_cmd, args.build_dir)
status = status and ret
out += out1
# we immediately print the result since this is more interactive for user
with lock:
lines = print_result(status, out, cmd["file"], errors)
return status, lines
def parse_results(results):
return all(r[0] for r in results), [s for r in results for s in r[1]]
# mostly used for debugging purposes
def run_sequential(args, all_files):
# lock must be defined as in `run_parallel`
global lock
lock = LockContext()
results = []
# actual tidy checker
for cmd in all_files:
# skip files that we don't want to look at
if args.ignore_compiled is not None and \
re.search(args.ignore_compiled, cmd["file"]) is not None:
continue
if args.select_compiled is not None and \
re.search(args.select_compiled, cmd["file"]) is None:
continue
results.append(run_clang_tidy(cmd, args))
return parse_results(results)
def copy_lock(init_lock):
# this is required to pass locks to pool workers
# see https://stackoverflow.com/questions/25557686/
# python-sharing-a-lock-between-processes
global lock
lock = init_lock
def run_parallel(args, all_files):
init_lock = LockContext(mp.Lock())
pool = mp.Pool(args.j, initializer=copy_lock, initargs=(init_lock,))
results = []
# actual tidy checker
for cmd in all_files:
# skip files that we don't want to look at
if args.ignore_compiled is not None and \
re.search(args.ignore_compiled, cmd["file"]) is not None:
continue
if args.select_compiled is not None and \
re.search(args.select_compiled, cmd["file"]) is None:
continue
results.append(pool.apply_async(run_clang_tidy, args=(cmd, args)))
results_final = [r.get() for r in results]
pool.close()
pool.join()
return parse_results(results_final)
def main():
args = parse_args()
# Attempt to making sure that we run this script from root of repo always
if not os.path.exists(".git"):
raise Exception("This needs to always be run from the root of repo")
all_files = list_all_cmds(args.cdb)
# ensure that we use only the real paths
for cmd in all_files:
cmd["file"] = os.path.realpath(os.path.expanduser(cmd["file"]))
if args.j == 1:
status, lines = run_sequential(args, all_files)
else:
status, lines = run_parallel(args, all_files)
if not status:
# first get a list of all checks that were run
ret = subprocess.check_output(args.exe + " --list-checks", shell=True)
ret = ret.decode("utf-8")
checks = [line.strip() for line in ret.splitlines()
if line.startswith(' ' * 4)]
max_check_len = max(len(c) for c in checks)
check_counts = dict()
content = os.linesep.join(lines)
for check in checks:
check_counts[check] = content.count(check)
sorted_counts = sorted(
check_counts.items(), key=lambda x: x[1], reverse=True)
print("Failed {} check(s) in total. Counts as per below:".format(
sum(1 for _, count in sorted_counts if count > 0)))
for check, count in sorted_counts:
if count <= 0:
break
n_space = max_check_len - len(check) + 4
print("{}:{}{}".format(check, ' ' * n_space, count))
raise Exception("clang-tidy failed! Refer to the errors above.")
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/cuvs/cpp/scripts/heuristics | rapidsai_public_repos/cuvs/cpp/scripts/heuristics/select_k/generate_plots.ipynb | from collections import defaultdict
import pandas as pd
import json
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme()from select_k_dataset import load_dataframe, get_dataset
df = load_dataframe("select_k_times.json")
df = df[(df.use_memory_pool == True)]
df = df[(df.index_type == 'int64_t') & (df.key_type == 'float')]
dfdef generate_plot(df, x_axis="col", title=""):
fig, ax = plt.subplots()
for algo in sorted(set(df.algo)):
current = df[(df.algo == algo) & (df.time < np.inf)]
ax.plot(current[x_axis], current["time"], label=algo)
ax.set_xscale('log', base=2)
ax.set_yscale('log', base=2)
ax.set_xlabel(x_axis)
ax.set_ylabel("time(s)")
ax.set_title(title)
fig.set_dpi(200)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=4)
# fig.legend()
plt.show()
def generate_k_plot(df, col, row):
return generate_plot(df[(df.col == col) & (df.row == row)], "k", f"#cols={col}, #rows={row}")
def generate_col_plot(df, row, k):
return generate_plot(df[(df.row == row) & (df.k == k)], "col", f"#rows={row}, k={k}")
def generate_row_plot(df, col, k):
return generate_plot(df[(df.col == col) & (df.k == k)], "row", f"#cols={col}, k={k}")generate_col_plot(df, 256, 32)generate_k_plot(df, 2**20, 256) | 0 |
rapidsai_public_repos/cuvs/cpp/scripts/heuristics | rapidsai_public_repos/cuvs/cpp/scripts/heuristics/select_k/generate_heuristic.ipynb | from select_k_dataset import load_dataframe, get_dataset
import sklearn.tree
import numpy as np# load up the timings from the MATRIX_BENCH script into a pandas dataframe
df = load_dataframe("select_k_times.json")
# we're limiting down to 3 different select_k methods - chosen by
# the 'algorithm_selection.ipynb' script here
df = df[df.algo.isin(['kWarpImmediate', 'kRadix11bitsExtraPass', 'kRadix11bits', 'kWarpDistributedShm'])]
# we're also assuming we have a memory pool for now
df = df[(df.use_memory_pool == True)]
# df = df[(df.index_type == 'int64_t') & (df.key_type == 'float')]
df# break down into a train/set set
X, y, weights = get_dataset(df)
train_test_sets = sklearn.model_selection.train_test_split(X, y, weights, test_size=0.15, random_state=1)
X_train, X_test, y_train, y_test, weights_train, weights_test = train_test_sets
X_train.shape, X_test.shapemodel = sklearn.tree.DecisionTreeClassifier(max_depth=4, max_leaf_nodes=8)
model.fit(X_train, y_train) #, weights_train)model.score(X_train, y_train, weights_train)model.score(X_test, y_test, weights_test)# print(sklearn.tree.export_text(model, feature_names=["k", "rows", "cols"]))import matplotlib.pyplot as plt
plt.figure(figsize=(12,12))
viz = sklearn.tree.plot_tree(model, fontsize=8, class_names=list(model.classes_), feature_names=["k", "rows", "cols", "use_memory_pool"], impurity=True)def convert_model_to_code(model):
classes = model.classes_
tree = model.tree_
feature_names = ["k", "rows", "cols", "use_memory_pool"]
def _get_label(nodeid):
""" returns the most frequent class name for the node """
return classes[np.argsort(tree.value[nodeid, 0])[-1]]
def _is_leaf_node(nodeid):
""" returns whether or not the node is a leaf node in the tree"""
# negative values here indicate we're a leaf
if tree.feature[nodeid] < 0:
return True
# some nodes have both branches with the same label, combine those
left, right = tree.children_left[nodeid], tree.children_right[nodeid]
if (_is_leaf_node(left) and
_is_leaf_node(right) and
_get_label(left) == _get_label(right)):
return True
return False
code = []
def _convert_node(nodeid, indent):
if _is_leaf_node(nodeid):
# we're a leaf node, just output the label of the most frequent algorithm
class_name = _get_label(nodeid)
code.append(" " * indent + f"return Algo::{class_name};")
else:
feature = feature_names[tree.feature[nodeid]]
threshold = int(np.floor(tree.threshold[nodeid]))
code.append(" " * indent + f"if ({feature} > {threshold}) " + "{")
_convert_node(tree.children_right[nodeid], indent + 2)
code.append(" " * indent + "} else {")
_convert_node(tree.children_left[nodeid], indent + 2)
code.append(" " * indent + "}")
code.append("inline Algo choose_select_k_algorithm(size_t rows, size_t cols, int k)")
code.append("{")
_convert_node(0, indent=2)
code.append("}")
return "\n".join(code)
code = convert_model_to_code(model)
print(code)# also update the source code in raft/matrix/detail/select_k.cuh
import pathlib
select_k_path = pathlib.Path.cwd() / ".." / ".." / ".." / "include" / "raft" / "matrix" / "detail" / "select_k-inl.cuh"
source_lines = open(select_k_path.resolve()).read().split("\n")
# figure out the location of the code snippet in the file, and splice it in
code_lines = code.split("\n")
first_line = source_lines.index(code_lines[0])
last_line = source_lines.index(code_lines[-1], first_line)
new_source = source_lines[:first_line] + code_lines + source_lines[last_line+1:]
open(select_k_path.resolve(), "w").write("\n".join(new_source)) | 0 |
rapidsai_public_repos/cuvs/cpp/scripts/heuristics | rapidsai_public_repos/cuvs/cpp/scripts/heuristics/select_k/algorithm_selection.ipynb | from select_k_dataset import load_dataframe, get_dataset
df = load_dataframe("select_k_times.json")
dffrom collections import Counter
def rank_algos(df, use_relative_speedup=False):
_, y, weights = get_dataset(df)
times = Counter()
for algo, speedup in zip(y, weights):
times[algo] += speedup if use_relative_speedup else 1
return sorted(times.items(), key=lambda x:-x[-1])# show the number of times each algorithm is fastest for a given k/# of rows/# of cols / dtype / memory pool etc
rank_algos(df)# kRadix8bits seems to have a performance issue with 64 bit index types, it is one
# of the worst performing algorithms for 64bit indices, but one of the top 3 for 32 bit
rank_algos(df[df.index_type == "int64_t"])rank_algos(df[df.index_type == "uint32_t"])# do an algorithm selection pass, repeatedly remove the lowest performing algorithm
#
# The idea here is that we can simplify the decision logic, reduce the binary size
# and speed up the compilation time by only including a subset of selection algorithms.
# we're aiming to get algorithms that perform well in different situations, and complement
# each other - so to do this, we're iteratively removing the worst performing algorithm,
# after which algorithms are re-evaluated on their speedups relative to the remaining
# algorithms. This gets us a minimum spanning set of selection algorithms that performs
# well over diverse inputs.
#
# note: the lowest performing algorithm here might actually be pretty good, but
# just not provide much benefit over another similar algorithm.
# As an example, kWarpDistributed is an excellent selection algorithm, but in testing
# kWarpDistributedShm is slightly faster than it in situations where it does well,
# meaning that it gets removed early on in this loop
current = df[df.use_memory_pool == True]
algos = set(df.algo)
# we're arbitrarily getting this down to 3 selection algorithms
while len(algos) > 4:
times = rank_algos(current, use_relative_speedup=False)
algo, speedup = times[-1]
algos.remove(algo)
current = df[df.algo.isin(algos)]
print("selected", algos)
rank_algos(current)# experimenting with different subsets of index type / dtype / use memory seems
# to pretty consistently show that kRadix11bits / kWarpDistributedShm / kFaissBlockSelect
# all get selected here | 0 |
rapidsai_public_repos/cuvs/cpp/scripts/heuristics | rapidsai_public_repos/cuvs/cpp/scripts/heuristics/select_k/select_k_dataset.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import defaultdict
import json
import pandas as pd
import numpy as np
def load_dataframe(filename):
"""Loads up the select_k benchmark times as a pandas dataframe
This loads up the timings from the MATRIX_BENCH script into a pandas dataframe
This file is generated by running:
./cpp/build/MATRIX_BENCH --benchmark_filter=SelectKDataset \
--benchmark_out_format=json \
--benchmark_out=select_k_times.json \
--select_k_dataset
Note running these MATRIX_BENCH tests takes over 24 hours right now
"""
benchmarks = json.load(open(filename))["benchmarks"]
df = pd.DataFrame(benchmarks, columns=["real_time", "run_name"])
run_info = [
run[1:4] + list(map(int, run[4:9]))
for run in df.run_name.str.split("/").tolist()
]
df[
[
"key_type",
"index_type",
"algo",
"row",
"col",
"k",
"use_index_input",
"use_memory_pool",
]
] = pd.DataFrame(run_info, index=df.index)
df["time"] = df["real_time"] / 1000
df = df.drop(["run_name", "real_time"], axis=1)
df = df.sort_values(
by=[
"k",
"row",
"col",
"key_type",
"index_type",
"use_index_input",
"use_memory_pool",
]
)
df = df.reset_index(drop=True)
return df
def get_dataset(df):
"""Returns the training features, labels and sample weights from a dataframe"""
# group the dataframe by the input features
feature_algo_time = defaultdict(list)
for row in df.itertuples():
feature_algo_time[
(
row.k,
row.row,
row.col,
row.use_memory_pool,
row.key_type,
row.index_type,
)
].append((row.algo, row.time))
# get the features (x), labels (y) and sample_weights from the grouped times
X, y, weights = [], [], []
for feature, algo_times in feature_algo_time.items():
# we can't yet handle the dtype values in training, remove
feature = feature[:-2]
# figure out the fastest algorithm for this set of features
algo_times = sorted(algo_times, key=lambda x: x[1])
best_algo, best_time = algo_times[0]
# set the sample_weight to the absolute speed increase above the
# time of the next fastest algorithm. the idea here is that
# we really want to capture the 2x or 10x speedups - but
# the 1% speedups might just be noise (and this is especially
# true for the faster runs)
if len(algo_times) == 1:
# no other algorithm handles this K value,
second_best_time = np.inf
else:
second_best_time = algo_times[1][1]
# sample_weight = min((second_best_time / best_time) - 1, 10)
sample_weight = min((second_best_time - best_time), 10)
X.append(feature)
y.append(best_algo)
weights.append(sample_weight)
return np.array(X), np.array(y), np.array(weights)
| 0 |
rapidsai_public_repos/cuvs/cpp | rapidsai_public_repos/cuvs/cpp/template/CMakeLists.txt | # =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR)
# ------------- configure rapids-cmake --------------#
include(cmake/thirdparty/fetch_rapids.cmake)
include(rapids-cmake)
include(rapids-cpm)
include(rapids-cuda)
include(rapids-export)
include(rapids-find)
# ------------- configure project --------------#
rapids_cuda_init_architectures(test_cuvs)
project(test_cuvs LANGUAGES CXX CUDA)
# ------------- configure cuvs -----------------#
rapids_cpm_init()
include(cmake/thirdparty/get_cuvs.cmake)
# -------------- compile tasks ----------------- #
add_executable(CAGRA_EXAMPLE src/cagra_example.cu)
target_link_libraries(CAGRA_EXAMPLE PRIVATE cuvs::cuvs cuvs::compiled)
add_executable(IVF_FLAT_EXAMPLE src/ivf_flat_example.cu)
target_link_libraries(IVF_FLAT_EXAMPLE PRIVATE cuvs::cuvs cuvs::compiled)
| 0 |
rapidsai_public_repos/cuvs/cpp | rapidsai_public_repos/cuvs/cpp/template/README.md | # Example CUVS Project Template
This template project provides a drop-in sample to either start building a new application with, or using CUVS in an existing CMake project.
First, please refer to our [installation docs](https://docs.rapids.ai/api/cuvs/stable/build.html#cuda-gpu-requirements) for the minimum requirements to use CUVS.
Once the minimum requirements are satisfied, this example template application can be built with the provided `build.sh` script. This is a bash script that calls the appropriate CMake commands, so you can look into it to see the typical CMake based build workflow.
This directory (`CUVS_SOURCE/cpp/template`) can be copied directly in order to build a new application with CUVS.
CUVS can be integrated into an existing CMake project by copying the contents in the `configure rapids-cmake` and `configure cuvs` sections of the provided `CMakeLists.txt` into your project, along with `cmake/thirdparty/get_cuvs.cmake`.
Make sure to link against the appropriate Cmake targets. Use `cuvs::cuvs`to add make the headers available and `cuvs::compiled` when utilizing the shared library.
```cmake
target_link_libraries(your_app_target PRIVATE cuvs::cuvs cuvs::compiled)
```
| 0 |
rapidsai_public_repos/cuvs/cpp | rapidsai_public_repos/cuvs/cpp/template/build.sh | #!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
# cuvs empty project template build script
# Abort script on first error
set -e
PARALLEL_LEVEL=${PARALLEL_LEVEL:=`nproc`}
BUILD_TYPE=Release
BUILD_DIR=build/
CUVS_REPO_REL=""
EXTRA_CMAKE_ARGS=""
set -e
if [[ ${CUVS_REPO_REL} != "" ]]; then
CUVS_REPO_PATH="`readlink -f \"${CUVS_REPO_REL}\"`"
EXTRA_CMAKE_ARGS="${EXTRA_CMAKE_ARGS} -DCPM_cuvs_SOURCE=${CUVS_REPO_PATH}"
fi
if [ "$1" == "clean" ]; then
rm -rf build
exit 0
fi
mkdir -p $BUILD_DIR
cd $BUILD_DIR
cmake \
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
-DCUVS_NVTX=OFF \
-DCMAKE_CUDA_ARCHITECTURES="NATIVE" \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
${EXTRA_CMAKE_ARGS} \
../
cmake --build . -j${PARALLEL_LEVEL}
| 0 |
rapidsai_public_repos/cuvs/cpp/template/cmake | rapidsai_public_repos/cuvs/cpp/template/cmake/thirdparty/fetch_rapids.cmake | # =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# Use this variable to update RAPIDS and RAFT versions
set(RAPIDS_VERSION "24.02")
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/RAFT_RAPIDS.cmake)
file(DOWNLOAD https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-${RAPIDS_VERSION}/RAPIDS.cmake
${CMAKE_CURRENT_BINARY_DIR}/RAFT_RAPIDS.cmake)
endif()
include(${CMAKE_CURRENT_BINARY_DIR}/RAFT_RAPIDS.cmake)
| 0 |
rapidsai_public_repos/cuvs/cpp/template/cmake | rapidsai_public_repos/cuvs/cpp/template/cmake/thirdparty/get_cuvs.cmake | # =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# Use RAPIDS_VERSION from cmake/thirdparty/fetch_rapids.cmake
set(CUVS_VERSION "${RAPIDS_VERSION}")
set(CUVS_FORK "rapidsai")
set(CUVS_PINNED_TAG "branch-${RAPIDS_VERSION}")
function(find_and_configure_cuvs)
set(oneValueArgs VERSION FORK PINNED_TAG COMPILE_LIBRARY ENABLE_NVTX ENABLE_MNMG_DEPENDENCIES)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN} )
set(CUVS_COMPONENTS "")
if(PKG_COMPILE_LIBRARY)
string(APPEND CUVS_COMPONENTS " compiled")
endif()
if(PKG_ENABLE_MNMG_DEPENDENCIES)
string(APPEND CUVS_COMPONENTS " distributed")
endif()
#-----------------------------------------------------
# Invoke CPM find_package()
#-----------------------------------------------------
rapids_cpm_find(cuvs ${PKG_VERSION}
GLOBAL_TARGETS cuvs::cuvs
BUILD_EXPORT_SET cuvs-template-exports
INSTALL_EXPORT_SET cuvs-template-exports
COMPONENTS ${CUVS_COMPONENTS}
CPM_ARGS
GIT_REPOSITORY https://github.com/${PKG_FORK}/cuvs.git
GIT_TAG ${PKG_PINNED_TAG}
SOURCE_SUBDIR cpp
OPTIONS
"BUILD_TESTS OFF"
"BUILD_PRIMS_BENCH OFF"
"BUILD_ANN_BENCH OFF"
"CUVS_NVTX ${ENABLE_NVTX}"
"CUVS_COMPILE_LIBRARY ${PKG_COMPILE_LIBRARY}"
)
endfunction()
# Change pinned tag here to test a commit in CI
# To use a different CUVS locally, set the CMake variable
# CPM_cuvs_SOURCE=/path/to/local/cuvs
find_and_configure_cuvs(VERSION ${CUVS_VERSION}.00
FORK ${CUVS_FORK}
PINNED_TAG ${CUVS_PINNED_TAG}
COMPILE_LIBRARY ON
ENABLE_MNMG_DEPENDENCIES OFF
ENABLE_NVTX OFF
)
| 0 |
rapidsai_public_repos/cuvs/cpp/template | rapidsai_public_repos/cuvs/cpp/template/src/ivf_flat_example.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
#include <cuvs/neighbors/ivf_flat.cuh>
#include <optional>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/util/cudart_utils.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include "common.cuh"
void ivf_flat_build_search_simple(raft::device_resources const& dev_resources,
raft::device_matrix_view<const float, int64_t> dataset,
raft::device_matrix_view<const float, int64_t> queries)
{
using namespace cuvs::neighbors;
ivf_flat::index_params index_params;
index_params.n_lists = 1024;
index_params.kmeans_trainset_fraction = 0.1;
index_params.metric = cuvs::distance::DistanceType::L2Expanded;
std::cout << "Building IVF-Flat index" << std::endl;
auto index = ivf_flat::build(dev_resources, index_params, dataset);
std::cout << "Number of clusters " << index.n_lists() << ", number of vectors added to index "
<< index.size() << std::endl;
// Create output arrays.
int64_t topk = 10;
int64_t n_queries = queries.extent(0);
auto neighbors = raft::make_device_matrix<int64_t>(dev_resources, n_queries, topk);
auto distances = raft::make_device_matrix<float>(dev_resources, n_queries, topk);
// Set search parameters.
ivf_flat::search_params search_params;
search_params.n_probes = 50;
// Search K nearest neighbors for each of the queries.
ivf_flat::search(
dev_resources, search_params, index, queries, neighbors.view(), distances.view());
// The call to ivf_flat::search is asynchronous. Before accessing the data, sync by calling
// raft::resource::sync_stream(dev_resources);
print_results(dev_resources, neighbors.view(), distances.view());
}
void ivf_flat_build_extend_search(raft::device_resources const& dev_resources,
raft::device_matrix_view<const float, int64_t> dataset,
raft::device_matrix_view<const float, int64_t> queries)
{
using namespace cuvs::neighbors;
// Define dataset indices.
auto data_indices = raft::make_device_vector<int64_t, int64_t>(dev_resources, dataset.extent(0));
thrust::counting_iterator<int64_t> first(0);
thrust::device_ptr<int64_t> ptr(data_indices.data_handle());
thrust::copy(
raft::resource::get_thrust_policy(dev_resources), first, first + dataset.extent(0), ptr);
// Sub-sample the dataset to create a training set.
auto trainset =
subsample(dev_resources, dataset, raft::make_const_mdspan(data_indices.view()), 0.1);
ivf_flat::index_params index_params;
index_params.n_lists = 100;
index_params.metric = cuvs::distance::DistanceType::L2Expanded;
index_params.add_data_on_build = false;
std::cout << "\nRun k-means clustering using the training set" << std::endl;
auto index =
ivf_flat::build(dev_resources, index_params, raft::make_const_mdspan(trainset.view()));
std::cout << "Number of clusters " << index.n_lists() << ", number of vectors added to index "
<< index.size() << std::endl;
std::cout << "Filling index with the dataset vectors" << std::endl;
index = ivf_flat::extend(dev_resources,
dataset,
std::make_optional(raft::make_const_mdspan(data_indices.view())),
index);
std::cout << "Index size after addin dataset vectors " << index.size() << std::endl;
// Set search parameters.
ivf_flat::search_params search_params;
search_params.n_probes = 10;
// Create output arrays.
int64_t topk = 10;
int64_t n_queries = queries.extent(0);
auto neighbors = raft::make_device_matrix<int64_t, int64_t>(dev_resources, n_queries, topk);
auto distances = raft::make_device_matrix<float, int64_t>(dev_resources, n_queries, topk);
// Search K nearest neighbors for each queries.
ivf_flat::search(
dev_resources, search_params, index, queries, neighbors.view(), distances.view());
// The call to ivf_flat::search is asynchronous. Before accessing the data, sync using:
// raft::resource::sync_stream(dev_resources);
print_results(dev_resources, neighbors.view(), distances.view());
}
int main()
{
raft::device_resources dev_resources;
// Set pool memory resource with 1 GiB initial pool size. All allocations use the same pool.
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> pool_mr(
rmm::mr::get_current_device_resource(), 1024 * 1024 * 1024ull);
rmm::mr::set_current_device_resource(&pool_mr);
// Alternatively, one could define a pool allocator for temporary arrays (used within RAFT
// algorithms). In that case only the internal arrays would use the pool, any other allocation
// uses the default RMM memory resource. Here is how to change the workspace memory resource to
// a pool with 2 GiB upper limit.
// raft::resource::set_workspace_to_pool_resource(dev_resources, 2 * 1024 * 1024 * 1024ull);
// Create input arrays.
int64_t n_samples = 10000;
int64_t n_dim = 3;
int64_t n_queries = 10;
auto dataset = raft::make_device_matrix<float, int64_t>(dev_resources, n_samples, n_dim);
auto queries = raft::make_device_matrix<float, int64_t>(dev_resources, n_queries, n_dim);
generate_dataset(dev_resources, dataset.view(), queries.view());
// Simple build and search example.
ivf_flat_build_search_simple(dev_resources,
raft::make_const_mdspan(dataset.view()),
raft::make_const_mdspan(queries.view()));
// Build and extend example.
ivf_flat_build_extend_search(dev_resources,
raft::make_const_mdspan(dataset.view()),
raft::make_const_mdspan(queries.view()));
}
| 0 |
rapidsai_public_repos/cuvs/cpp/template | rapidsai_public_repos/cuvs/cpp/template/src/common.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/matrix/copy.cuh>
#include <raft/random/make_blobs.cuh>
#include <raft/random/sample_without_replacement.cuh>
#include <raft/util/cudart_utils.hpp>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
// Fill dataset and queries with synthetic data.
void generate_dataset(raft::device_resources const& dev_resources,
raft::device_matrix_view<float, int64_t> dataset,
raft::device_matrix_view<float, int64_t> queries)
{
auto labels = raft::make_device_vector<int64_t, int64_t>(dev_resources, dataset.extent(0));
raft::random::make_blobs(dev_resources, dataset, labels.view());
raft::random::RngState r(1234ULL);
raft::random::uniform(dev_resources,
r,
raft::make_device_vector_view(queries.data_handle(), queries.size()),
-1.0f,
1.0f);
}
// Copy the results to host and print a few samples
template <typename IdxT>
void print_results(raft::device_resources const& dev_resources,
raft::device_matrix_view<IdxT, int64_t> neighbors,
raft::device_matrix_view<float, int64_t> distances)
{
int64_t topk = neighbors.extent(1);
auto neighbors_host = raft::make_host_matrix<IdxT, int64_t>(neighbors.extent(0), topk);
auto distances_host = raft::make_host_matrix<float, int64_t>(distances.extent(0), topk);
cudaStream_t stream = raft::resource::get_cuda_stream(dev_resources);
raft::copy(neighbors_host.data_handle(), neighbors.data_handle(), neighbors.size(), stream);
raft::copy(distances_host.data_handle(), distances.data_handle(), distances.size(), stream);
// The calls to RAFT algorithms and raft::copy is asynchronous.
// We need to sync the stream before accessing the data.
raft::resource::sync_stream(dev_resources, stream);
for (int query_id = 0; query_id < 2; query_id++) {
std::cout << "Query " << query_id << " neighbor indices: ";
raft::print_host_vector("", &neighbors_host(query_id, 0), topk, std::cout);
std::cout << "Query " << query_id << " neighbor distances: ";
raft::print_host_vector("", &distances_host(query_id, 0), topk, std::cout);
}
}
/** Subsample the dataset to create a training set*/
raft::device_matrix<float, int64_t> subsample(
raft::device_resources const& dev_resources,
raft::device_matrix_view<const float, int64_t> dataset,
raft::device_vector_view<const int64_t, int64_t> data_indices,
float fraction)
{
int64_t n_samples = dataset.extent(0);
int64_t n_dim = dataset.extent(1);
int64_t n_train = n_samples * fraction;
auto trainset = raft::make_device_matrix<float, int64_t>(dev_resources, n_train, n_dim);
int seed = 137;
raft::random::RngState rng(seed);
auto train_indices = raft::make_device_vector<int64_t>(dev_resources, n_train);
raft::random::sample_without_replacement(
dev_resources, rng, data_indices, std::nullopt, train_indices.view(), std::nullopt);
raft::matrix::copy_rows(
dev_resources, dataset, trainset.view(), raft::make_const_mdspan(train_indices.view()));
return trainset;
}
| 0 |
rapidsai_public_repos/cuvs/cpp/template | rapidsai_public_repos/cuvs/cpp/template/src/cagra_example.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/random/make_blobs.cuh>
#include <cuvs/neighbors/cagra.cuh>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include "common.cuh"
void cagra_build_search_simple(raft::device_resources const& dev_resources,
raft::device_matrix_view<const float, int64_t> dataset,
raft::device_matrix_view<const float, int64_t> queries)
{
using namespace cuvs::neighbors;
int64_t topk = 12;
int64_t n_queries = queries.extent(0);
// create output arrays
auto neighbors = raft::make_device_matrix<uint32_t>(dev_resources, n_queries, topk);
auto distances = raft::make_device_matrix<float>(dev_resources, n_queries, topk);
// use default index parameters
cagra::index_params index_params;
std::cout << "Building CAGRA index (search graph)" << std::endl;
auto index = cagra::build<float, uint32_t>(dev_resources, index_params, dataset);
std::cout << "CAGRA index has " << index.size() << " vectors" << std::endl;
std::cout << "CAGRA graph has degree " << index.graph_degree() << ", graph size ["
<< index.graph().extent(0) << ", " << index.graph().extent(1) << "]" << std::endl;
// use default search parameters
cagra::search_params search_params;
// search K nearest neighbors
cagra::search<float, uint32_t>(
dev_resources, search_params, index, queries, neighbors.view(), distances.view());
// The call to ivf_flat::search is asynchronous. Before accessing the data, sync by calling
// raft::resource::sync_stream(dev_resources);
print_results(dev_resources, neighbors.view(), distances.view());
}
int main()
{
raft::device_resources dev_resources;
// Set pool memory resource with 1 GiB initial pool size. All allocations use the same pool.
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> pool_mr(
rmm::mr::get_current_device_resource(), 1024 * 1024 * 1024ull);
rmm::mr::set_current_device_resource(&pool_mr);
// Alternatively, one could define a pool allocator for temporary arrays (used within RAFT
// algorithms). In that case only the internal arrays would use the pool, any other allocation
// uses the default RMM memory resource. Here is how to change the workspace memory resource to
// a pool with 2 GiB upper limit.
// raft::resource::set_workspace_to_pool_resource(dev_resources, 2 * 1024 * 1024 * 1024ull);
// Create input arrays.
int64_t n_samples = 10000;
int64_t n_dim = 90;
int64_t n_queries = 10;
auto dataset = raft::make_device_matrix<float, int64_t>(dev_resources, n_samples, n_dim);
auto queries = raft::make_device_matrix<float, int64_t>(dev_resources, n_queries, n_dim);
generate_dataset(dev_resources, dataset.view(), queries.view());
// Simple build and search example.
cagra_build_search_simple(dev_resources,
raft::make_const_mdspan(dataset.view()),
raft::make_const_mdspan(queries.view()));
}
| 0 |
rapidsai_public_repos/cuvs/cpp | rapidsai_public_repos/cuvs/cpp/cmake/config.json | {
"parse": {
"additional_commands": {
"CPMFindPackage": {
"kwargs": {
"NAME": 1,
"GITHUB_REPOSITORY": "?",
"GIT_TAG": "?",
"VERSION": "?",
"GIT_SHALLOW": "?",
"OPTIONS": "*",
"FIND_PACKAGE_ARGUMENTS": "*"
}
},
"ConfigureTest": {
"flags": ["TEST_NAME", "TEST_SRC"]
},
"ConfigureBench": {
"flags": ["BENCH_NAME", "BENCH_SRC"]
}
}
},
"format": {
"line_width": 100,
"tab_size": 2,
"command_case": "unchanged",
"max_lines_hwrap": 1,
"max_pargs_hwrap": 999,
"dangle_parens": true
},
"lint": {
"disabled_codes": ["C0301", "C0111", "C0113"],
"function_pattern": "[0-9A-z_]+",
"macro_pattern": "[0-9A-z_]+",
"global_var_pattern": "[A-z][0-9A-z_]+",
"internal_var_pattern": "_[A-z][0-9A-z_]+",
"local_var_pattern": "[A-z][A-z0-9_]+",
"private_var_pattern": "_[0-9A-z_]+",
"public_var_pattern": "[A-z][0-9A-z_]+",
"argument_var_pattern": "[A-z][A-z0-9_]+",
"keyword_pattern": "[A-z][0-9A-z_]+"
}
}
| 0 |
rapidsai_public_repos/cuvs/cpp/cmake | rapidsai_public_repos/cuvs/cpp/cmake/patches/nlohmann_json.patch | --- nlohmann/json.hpp 2021-05-06 11:40:39.770669693 +0800
+++ nlohmann/json_patched.hpp 2021-06-02 18:46:43.849334466 +0800
@@ -16607,6 +16607,21 @@
}
}
+
+ template <typename NumberType,
+ enable_if_t<std::is_signed<NumberType>::value, int> = 0>
+ bool is_negative_number(NumberType x)
+ {
+ return x < 0;
+ }
+
+ template < typename NumberType,
+ enable_if_t < std::is_unsigned<NumberType>::value, int > = 0 >
+ bool is_negative_number(NumberType /*unused*/)
+ {
+ return false;
+ }
+
/*!
@brief dump an integer
@@ -16649,12 +16664,11 @@
// use a pointer to fill the buffer
auto buffer_ptr = number_buffer.begin(); // NOLINT(llvm-qualified-auto,readability-qualified-auto,cppcoreguidelines-pro-type-vararg,hicpp-vararg)
- const bool is_negative = std::is_same<NumberType, number_integer_t>::value && !(x >= 0); // see issue #755
number_unsigned_t abs_value;
unsigned int n_chars{};
- if (is_negative)
+ if (is_negative_number(x))
{
*buffer_ptr = '-';
abs_value = remove_sign(static_cast<number_integer_t>(x));
| 0 |
rapidsai_public_repos/cuvs/cpp/cmake | rapidsai_public_repos/cuvs/cpp/cmake/patches/ggnn.patch | diff --git a/include/ggnn/cache/cuda_simple_knn_sym_cache.cuh b/include/ggnn/cache/cuda_simple_knn_sym_cache.cuh
index 890420e..d792903 100644
--- a/include/ggnn/cache/cuda_simple_knn_sym_cache.cuh
+++ b/include/ggnn/cache/cuda_simple_knn_sym_cache.cuh
@@ -62,7 +62,7 @@ struct SimpleKNNSymCache {
const ValueT dist_half)
: dist_query(dist_query), dist_half(dist_half) {}
- __device__ __forceinline__ DistQueryAndHalf() {}
+ DistQueryAndHalf() = default;
};
struct DistanceAndNorm {
@@ -98,8 +98,7 @@ struct SimpleKNNSymCache {
KeyT cache;
DistQueryAndHalf dist;
bool flag;
-
- __device__ __forceinline__ SyncTempStorage() {}
+ SyncTempStorage() = default;
};
public:
diff --git a/include/ggnn/cuda_knn_ggnn_gpu_instance.cuh b/include/ggnn/cuda_knn_ggnn_gpu_instance.cuh
index 8cbaf0d..6eb72ac 100644
--- a/include/ggnn/cuda_knn_ggnn_gpu_instance.cuh
+++ b/include/ggnn/cuda_knn_ggnn_gpu_instance.cuh
@@ -41,7 +41,6 @@ limitations under the License.
#include "ggnn/sym/cuda_knn_sym_query_layer.cuh"
#include "ggnn/utils/cuda_knn_utils.cuh"
#include "ggnn/utils/cuda_knn_constants.cuh"
-#include "ggnn/utils/cuda_knn_dataset.cuh"
template <typename ValueT>
__global__ void divide(ValueT* res, ValueT* input, ValueT N) {
@@ -98,9 +97,7 @@ struct GGNNGPUInstance {
typedef GGNNGraphDevice<KeyT, BaseT, ValueT> GGNNGraphDevice;
typedef GGNNGraphHost<KeyT, BaseT, ValueT> GGNNGraphHost;
- const Dataset<KeyT, BaseT, BAddrT>* dataset;
GGNNGraphBuffer<KeyT, ValueT>* ggnn_buffer {nullptr};
- GGNNQuery<KeyT, ValueT, BaseT> ggnn_query;
// Graph Shards resident on the GPU
std::vector<GGNNGraphDevice> ggnn_shards;
@@ -117,13 +114,12 @@ struct GGNNGPUInstance {
// number of shards that need to be processed by this instance
const int num_parts;
- GGNNGPUInstance(const int gpu_id, const Dataset<KeyT, BaseT, BAddrT>* dataset,
+ GGNNGPUInstance(const int gpu_id,
const int N_shard, const int L,
const bool enable_construction, const float tau_build,
const int num_parts=1, const int num_cpu_buffers=1) :
N_shard{N_shard}, L{L}, tau_build{tau_build},
- dataset{dataset}, gpu_id{gpu_id},
- ggnn_query{dataset->N_query, D, KQuery, num_parts},
+ gpu_id{gpu_id},
num_parts{num_parts}
{
CHECK_LE(L, MAX_LAYER);
@@ -135,7 +131,6 @@ struct GGNNGPUInstance {
CHECK_EQ(current_gpu_id, gpu_id) << "cudaSetDevice() needs to be called in advance!";
}
- ggnn_query.loadQueriesAsync(dataset->h_query, 0);
computeGraphParameters();
@@ -186,7 +181,7 @@ struct GGNNGPUInstance {
}
GGNNGPUInstance(const GGNNGPUInstance& other)
- : dataset{nullptr}, ggnn_query{0, D, KQuery},
+ :
gpu_id{0}, N_shard{0}, num_parts{0} {
// this exists to allow using vector::emplace_back
// when it triggers a reallocation, this code will be called.
@@ -305,6 +300,7 @@ struct GGNNGPUInstance {
// io
+ /*
void waitForDiskIO(const int shard_id) {
auto& cpu_buffer = ggnn_cpu_buffers[shard_id%ggnn_cpu_buffers.size()];
if (cpu_buffer.disk_io_thread.joinable())
@@ -468,11 +464,12 @@ struct GGNNGPUInstance {
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaPeekAtLastError());
}
+ */
// graph operations
template <int BLOCK_DIM_X = 32, int MAX_ITERATIONS = 400, int CACHE_SIZE = 512, int SORTED_SIZE = 256, bool DIST_STATS = false>
- void queryLayer(const int shard_id = 0) const {
+ void queryLayer(const BaseT* d_query, int batch_size, KeyT* d_query_result_ids, ValueT* d_query_result_dists, const int shard_id = 0) const {
CHECK_CUDA(cudaSetDevice(gpu_id));
const auto& shard = ggnn_shards.at(shard_id%ggnn_shards.size());
@@ -482,21 +479,21 @@ struct GGNNGPUInstance {
int* m_dist_statistics = nullptr;
if (DIST_STATS)
- cudaMallocManaged(&m_dist_statistics, dataset->N_query * sizeof(int));
+ cudaMallocManaged(&m_dist_statistics, batch_size * sizeof(int));
QueryKernel query_kernel;
query_kernel.d_base = shard.d_base;
- query_kernel.d_query = ggnn_query.d_query;
+ query_kernel.d_query = d_query;
query_kernel.d_graph = shard.d_graph;
- query_kernel.d_query_results = ggnn_query.d_query_result_ids;
- query_kernel.d_query_results_dists = ggnn_query.d_query_result_dists;
+ query_kernel.d_query_results = d_query_result_ids;
+ query_kernel.d_query_results_dists = d_query_result_dists;
query_kernel.d_translation = shard.d_translation;
query_kernel.d_nn1_stats = shard.d_nn1_stats;
- query_kernel.N = dataset->N_query;
+ query_kernel.N = batch_size;
query_kernel.N_offset = 0;
query_kernel.d_dist_stats = m_dist_statistics;
@@ -771,6 +768,16 @@ struct GGNNGPUInstance {
sym(layer, shard_id);
}
}
+
+ void set_stream(cudaStream_t stream) {
+ assert(ggnn_shards.size() == 1);
+ ggnn_shards.at(0).stream = stream;
+ }
+
+ void set_base_data(const BaseT* dataset) {
+ assert(ggnn_shards.size() == 1);
+ ggnn_shards.at(0).d_base = dataset;
+ }
};
#endif // INCLUDE_GGNN_CUDA_KNN_GGNN_GPU_INSTANCE_CUH_
diff --git a/include/ggnn/graph/cuda_knn_ggnn_graph_device.cuh b/include/ggnn/graph/cuda_knn_ggnn_graph_device.cuh
index c94a8f1..781226d 100644
--- a/include/ggnn/graph/cuda_knn_ggnn_graph_device.cuh
+++ b/include/ggnn/graph/cuda_knn_ggnn_graph_device.cuh
@@ -50,7 +50,7 @@ struct GGNNGraphDevice {
ValueT* d_nn1_stats;
/// base data pointer for the shard.
- BaseT* d_base;
+ const BaseT* d_base;
/// combined memory pool
char* d_memory;
@@ -69,7 +69,9 @@ struct GGNNGraphDevice {
const size_t selection_translation_size = align8(ST_all * sizeof(KeyT));
const size_t nn1_stats_size = align8(2 * sizeof(ValueT));
total_graph_size = graph_size + 2 * selection_translation_size + nn1_stats_size;
- base_size = align8(static_cast<size_t>(N) * D * sizeof(BaseT));
+ // base_size = align8(static_cast<size_t>(N) * D * sizeof(BaseT));
+ (void) N;
+ (void) D;
const size_t total_size = base_size+total_graph_size;
@@ -86,8 +88,7 @@ struct GGNNGraphDevice {
CHECK_CUDA(cudaMalloc(&d_memory, total_size));
size_t pos = 0;
- d_base = reinterpret_cast<BaseT*>(d_memory+pos);
- pos += base_size;
+ d_base = nullptr;
d_graph = reinterpret_cast<KeyT*>(d_memory+pos);
pos += graph_size;
d_translation = reinterpret_cast<KeyT*>(d_memory+pos);
@@ -99,14 +100,14 @@ struct GGNNGraphDevice {
CHECK_EQ(pos, total_size);
- CHECK_CUDA(cudaStreamCreate(&stream));
+ // CHECK_CUDA(cudaStreamCreate(&stream));
CHECK_CUDA(cudaPeekAtLastError());
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaPeekAtLastError());
}
- GGNNGraphDevice(const GGNNGraphDevice& other) {
+ GGNNGraphDevice(const GGNNGraphDevice&) {
// this exists to allow using vector::emplace_back
// when it triggers a reallocation, this code will be called.
// always make sure that enough memory is reserved ahead of time.
@@ -116,7 +117,7 @@ struct GGNNGraphDevice {
~GGNNGraphDevice() {
cudaFree(d_memory);
- CHECK_CUDA(cudaStreamDestroy(stream));
+ // CHECK_CUDA(cudaStreamDestroy(stream));
}
};
diff --git a/include/ggnn/graph/cuda_knn_ggnn_graph_host.cuh b/include/ggnn/graph/cuda_knn_ggnn_graph_host.cuh
index 2055f9e..ef5843a 100644
--- a/include/ggnn/graph/cuda_knn_ggnn_graph_host.cuh
+++ b/include/ggnn/graph/cuda_knn_ggnn_graph_host.cuh
@@ -92,7 +92,7 @@ struct GGNNGraphHost {
CHECK_CUDA(cudaPeekAtLastError());
}
- GGNNGraphHost(const GGNNGraphHost& other) {
+ GGNNGraphHost(const GGNNGraphHost&) {
// this exists to allow using vector::emplace_back
// when it triggers a reallocation, this code will be called.
// always make sure that enough memory is reserved ahead of time.
diff --git a/include/ggnn/select/cuda_knn_wrs_select_layer.cuh b/include/ggnn/select/cuda_knn_wrs_select_layer.cuh
index 49d76a1..eef69e6 100644
--- a/include/ggnn/select/cuda_knn_wrs_select_layer.cuh
+++ b/include/ggnn/select/cuda_knn_wrs_select_layer.cuh
@@ -22,7 +22,6 @@ limitations under the License.
#include <cuda.h>
#include <cuda_runtime.h>
-#include <gflags/gflags.h>
#include <cub/cub.cuh>
#include "ggnn/utils/cuda_knn_constants.cuh"
| 0 |
rapidsai_public_repos/cuvs/cpp/cmake | rapidsai_public_repos/cuvs/cpp/cmake/patches/hnswlib.patch | diff --git a/hnswlib/hnswalg.h b/hnswlib/hnswalg.h
index e95e0b5..f0fe50a 100644
--- a/hnswlib/hnswalg.h
+++ b/hnswlib/hnswalg.h
@@ -3,6 +3,7 @@
#include "visited_list_pool.h"
#include "hnswlib.h"
#include <atomic>
+#include <limits>
#include <random>
#include <stdlib.h>
#include <assert.h>
@@ -16,6 +17,8 @@ namespace hnswlib {
template<typename dist_t>
class HierarchicalNSW : public AlgorithmInterface<dist_t> {
public:
+ bool base_layer_only{false};
+ int num_seeds=32;
static const tableint max_update_element_locks = 65536;
HierarchicalNSW(SpaceInterface<dist_t> *s) {
}
@@ -56,7 +59,7 @@ namespace hnswlib {
visited_list_pool_ = new VisitedListPool(1, max_elements);
//initializations for special treatment of the first node
- enterpoint_node_ = -1;
+ enterpoint_node_ = std::numeric_limits<tableint>::max();
maxlevel_ = -1;
linkLists_ = (char **) malloc(sizeof(void *) * max_elements_);
@@ -527,7 +530,7 @@ namespace hnswlib {
tableint *datal = (tableint *) (data + 1);
for (int i = 0; i < size; i++) {
tableint cand = datal[i];
- if (cand < 0 || cand > max_elements_)
+ if (cand > max_elements_)
throw std::runtime_error("cand error");
dist_t d = fstdistfunc_(query_data, getDataByInternalId(cand), dist_func_param_);
@@ -1067,7 +1070,7 @@ namespace hnswlib {
tableint *datal = (tableint *) (data + 1);
for (int i = 0; i < size; i++) {
tableint cand = datal[i];
- if (cand < 0 || cand > max_elements_)
+ if (cand > max_elements_)
throw std::runtime_error("cand error");
dist_t d = fstdistfunc_(data_point, getDataByInternalId(cand), dist_func_param_);
if (d < curdist) {
@@ -1119,28 +1122,41 @@ namespace hnswlib {
tableint currObj = enterpoint_node_;
dist_t curdist = fstdistfunc_(query_data, getDataByInternalId(enterpoint_node_), dist_func_param_);
- for (int level = maxlevel_; level > 0; level--) {
- bool changed = true;
- while (changed) {
- changed = false;
- unsigned int *data;
+ if (base_layer_only) {
+ // You can increase the number of seeds when testing large-scale dataset, num_seeds = 48 for 100M-scale
+ for (int i = 0; i < num_seeds; i++) {
+ tableint obj = i * (max_elements_ / num_seeds);
+ dist_t dist = fstdistfunc_(query_data, getDataByInternalId(obj), dist_func_param_);
+ if (dist < curdist) {
+ curdist = dist;
+ currObj = obj;
+ }
+ }
+ }
+ else{
+ for (int level = maxlevel_; level > 0; level--) {
+ bool changed = true;
+ while (changed) {
+ changed = false;
+ unsigned int *data;
- data = (unsigned int *) get_linklist(currObj, level);
- int size = getListCount(data);
- metric_hops++;
- metric_distance_computations+=size;
+ data = (unsigned int *) get_linklist(currObj, level);
+ int size = getListCount(data);
+ metric_hops++;
+ metric_distance_computations+=size;
- tableint *datal = (tableint *) (data + 1);
- for (int i = 0; i < size; i++) {
- tableint cand = datal[i];
- if (cand < 0 || cand > max_elements_)
- throw std::runtime_error("cand error");
- dist_t d = fstdistfunc_(query_data, getDataByInternalId(cand), dist_func_param_);
+ tableint *datal = (tableint *) (data + 1);
+ for (int i = 0; i < size; i++) {
+ tableint cand = datal[i];
+ if (cand > max_elements_)
+ throw std::runtime_error("cand error");
+ dist_t d = fstdistfunc_(query_data, getDataByInternalId(cand), dist_func_param_);
- if (d < curdist) {
- curdist = d;
- currObj = cand;
- changed = true;
+ if (d < curdist) {
+ curdist = d;
+ currObj = cand;
+ changed = true;
+ }
}
}
}
diff --git a/hnswlib/visited_list_pool.h b/hnswlib/visited_list_pool.h
index 5e1a4a5..4195ebd 100644
--- a/hnswlib/visited_list_pool.h
+++ b/hnswlib/visited_list_pool.h
@@ -3,6 +3,7 @@
#include <mutex>
#include <string.h>
#include <deque>
+#include <limits>
namespace hnswlib {
typedef unsigned short int vl_type;
@@ -14,7 +15,7 @@ namespace hnswlib {
unsigned int numelements;
VisitedList(int numelements1) {
- curV = -1;
+ curV = std::numeric_limits<vl_type>::max();
numelements = numelements1;
mass = new vl_type[numelements];
}
| 0 |
rapidsai_public_repos/cuvs/cpp/cmake | rapidsai_public_repos/cuvs/cpp/cmake/modules/FindAVX.cmake | # =============================================================================
# Copyright (c) 2016- Facebook, Inc (Adam Paszke)
# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (Clement Farabet)
# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
#
# Note: This file was copied from PyTorch and modified for use in the RAFT library.
# Refer to thirdparty/LICENSES/LICENSE.pytorch for license and additional
# copyright information.
# =============================================================================
INCLUDE(CheckCXXSourceRuns)
SET(AVX_CODE
"
#include <immintrin.h>
int main()
{
__m256 a;
a = _mm256_set1_ps(0);
return 0;
}
"
)
SET(AVX512_CODE
"
#include <immintrin.h>
int main()
{
__m512i a = _mm512_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0);
__m512i b = a;
__mmask64 equality_mask = _mm512_cmp_epi8_mask(a, b, _MM_CMPINT_EQ);
return 0;
}
"
)
SET(AVX2_CODE
"
#include <immintrin.h>
int main()
{
__m256i a = {0};
a = _mm256_abs_epi16(a);
__m256i x;
_mm256_extract_epi64(x, 0); // we rely on this in our AVX2 code
return 0;
}
"
)
MACRO(CHECK_SSE lang type flags)
SET(__FLAG_I 1)
SET(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
FOREACH(__FLAG ${flags})
IF(NOT ${lang}_${type}_FOUND)
SET(CMAKE_REQUIRED_FLAGS ${__FLAG})
CHECK_CXX_SOURCE_RUNS("${${type}_CODE}" ${lang}_HAS_${type}_${__FLAG_I})
IF(${lang}_HAS_${type}_${__FLAG_I})
SET(${lang}_${type}_FOUND
TRUE
CACHE BOOL "${lang} ${type} support"
)
SET(${lang}_${type}_FLAGS
"${__FLAG}"
CACHE STRING "${lang} ${type} flags"
)
ENDIF()
MATH(EXPR __FLAG_I "${__FLAG_I}+1")
ENDIF()
ENDFOREACH()
SET(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE})
IF(NOT ${lang}_${type}_FOUND)
SET(${lang}_${type}_FOUND
FALSE
CACHE BOOL "${lang} ${type} support"
)
SET(${lang}_${type}_FLAGS
""
CACHE STRING "${lang} ${type} flags"
)
ENDIF()
MARK_AS_ADVANCED(${lang}_${type}_FOUND ${lang}_${type}_FLAGS)
ENDMACRO()
# CHECK_SSE(C "AVX" " ;-mavx;/arch:AVX") CHECK_SSE(C "AVX2" " ;-mavx2 -mfma;/arch:AVX2") CHECK_SSE(C
# "AVX512" " ;-mavx512f -mavx512dq -mavx512vl -mavx512bw -mfma;/arch:AVX512")
#
CHECK_SSE(CXX "AVX" " ;-mavx;/arch:AVX")
CHECK_SSE(CXX "AVX2" " ;-mavx2 -mfma;/arch:AVX2")
CHECK_SSE(CXX "AVX512" " ;-mavx512f -mavx512dq -mavx512vl -mavx512bw -mfma;/arch:AVX512")
| 0 |
rapidsai_public_repos/cuvs/cpp/cmake | rapidsai_public_repos/cuvs/cpp/cmake/modules/ConfigureCUDA.cmake | # =============================================================================
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
if(DISABLE_DEPRECATION_WARNINGS)
list(APPEND RAFT_CXX_FLAGS -Wno-deprecated-declarations)
list(APPEND RAFT_CUDA_FLAGS -Xcompiler=-Wno-deprecated-declarations)
endif()
# Be very strict when compiling with GCC as host compiler (and thus more lenient when compiling with
# clang)
if(CMAKE_COMPILER_IS_GNUCXX)
list(APPEND RAFT_CXX_FLAGS -Wall -Werror -Wno-unknown-pragmas -Wno-error=deprecated-declarations)
list(APPEND RAFT_CUDA_FLAGS -Xcompiler=-Wall,-Werror,-Wno-error=deprecated-declarations)
# set warnings as errors
if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL 11.2.0)
list(APPEND RAFT_CUDA_FLAGS -Werror=all-warnings)
endif()
endif()
if(CUDA_LOG_COMPILE_TIME)
list(APPEND RAFT_CUDA_FLAGS "--time=nvcc_compile_log.csv")
endif()
list(APPEND RAFT_CUDA_FLAGS --expt-extended-lambda --expt-relaxed-constexpr)
list(APPEND RAFT_CXX_FLAGS "-DCUDA_API_PER_THREAD_DEFAULT_STREAM")
list(APPEND RAFT_CUDA_FLAGS "-DCUDA_API_PER_THREAD_DEFAULT_STREAM")
# make sure we produce smallest binary size
list(APPEND RAFT_CUDA_FLAGS -Xfatbin=-compress-all)
# Option to enable line info in CUDA device compilation to allow introspection when profiling /
# memchecking
if(CUDA_ENABLE_LINEINFO)
list(APPEND RAFT_CUDA_FLAGS -lineinfo)
endif()
if(OpenMP_FOUND)
list(APPEND RAFT_CUDA_FLAGS -Xcompiler=${OpenMP_CXX_FLAGS})
endif()
# Debug options
if(CMAKE_BUILD_TYPE MATCHES Debug)
message(VERBOSE "RAFT: Building with debugging flags")
list(APPEND RAFT_CUDA_FLAGS -G -Xcompiler=-rdynamic)
list(APPEND RAFT_CUDA_FLAGS -Xptxas --suppress-stack-size-warning)
endif()
| 0 |
rapidsai_public_repos/cuvs/cpp/cmake | rapidsai_public_repos/cuvs/cpp/cmake/thirdparty/get_glog.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_glog)
set(oneValueArgs VERSION FORK PINNED_TAG EXCLUDE_FROM_ALL)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN} )
rapids_cpm_find(glog ${PKG_VERSION}
GLOBAL_TARGETS glog::glog
BUILD_EXPORT_SET cuvs-exports
INSTALL_EXPORT_SET cuvs-exports
CPM_ARGS
GIT_REPOSITORY https://github.com/${PKG_FORK}/glog.git
GIT_TAG ${PKG_PINNED_TAG}
EXCLUDE_FROM_ALL ${PKG_EXCLUDE_FROM_ALL}
)
if(glog_ADDED)
message(VERBOSE "RAFT: Using glog located in ${glog_SOURCE_DIR}")
else()
message(VERBOSE "RAFT: Using glog located in ${glog_DIR}")
endif()
endfunction()
# Change pinned tag here to test a commit in CI
# To use a different RAFT locally, set the CMake variable
# CPM_glog_SOURCE=/path/to/local/glog
find_and_configure_glog(VERSION 0.6.0
FORK google
PINNED_TAG v0.6.0
EXCLUDE_FROM_ALL ON
)
| 0 |
rapidsai_public_repos/cuvs/cpp/cmake | rapidsai_public_repos/cuvs/cpp/cmake/thirdparty/get_thrust.cmake | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Use CPM to find or clone thrust
function(find_and_configure_thrust)
include(${rapids-cmake-dir}/cpm/thrust.cmake)
rapids_cpm_thrust( NAMESPACE cuvs
BUILD_EXPORT_SET cuvs-exports
INSTALL_EXPORT_SET cuvs-exports)
endfunction()
find_and_configure_thrust()
| 0 |
rapidsai_public_repos/cuvs/cpp/cmake | rapidsai_public_repos/cuvs/cpp/cmake/thirdparty/get_spdlog.cmake | # =============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Use CPM to find or clone speedlog
function(find_and_configure_spdlog)
include(${rapids-cmake-dir}/cpm/spdlog.cmake)
rapids_cpm_spdlog(FMT_OPTION "EXTERNAL_FMT_HO" INSTALL_EXPORT_SET rmm-exports)
rapids_export_package(BUILD spdlog rmm-exports)
if(spdlog_ADDED)
rapids_export(
BUILD spdlog
EXPORT_SET spdlog
GLOBAL_TARGETS spdlog spdlog_header_only
NAMESPACE spdlog::)
include("${rapids-cmake-dir}/export/find_package_root.cmake")
rapids_export_find_package_root(BUILD spdlog [=[${CMAKE_CURRENT_LIST_DIR}]=] EXPORT_SET rmm-exports)
endif()
endfunction()
find_and_configure_spdlog() | 0 |
rapidsai_public_repos/cuvs/cpp/cmake | rapidsai_public_repos/cuvs/cpp/cmake/thirdparty/get_cutlass.cmake | # =============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
function(find_and_configure_cutlass)
set(oneValueArgs VERSION REPOSITORY PINNED_TAG)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
# if(RAFT_ENABLE_DIST_DEPENDENCIES OR RAFT_COMPILE_LIBRARIES)
set(CUTLASS_ENABLE_HEADERS_ONLY
ON
CACHE BOOL "Enable only the header library"
)
set(CUTLASS_NAMESPACE
"cuvs_cutlass"
CACHE STRING "Top level namespace of CUTLASS"
)
set(CUTLASS_ENABLE_CUBLAS
OFF
CACHE BOOL "Disable CUTLASS to build with cuBLAS library."
)
if (CUDA_STATIC_RUNTIME)
set(CUDART_LIBRARY "${CUDA_cudart_static_LIBRARY}" CACHE FILEPATH "fixing cutlass cmake code" FORCE)
endif()
rapids_cpm_find(
NvidiaCutlass ${PKG_VERSION}
GLOBAL_TARGETS nvidia::cutlass::cutlass
CPM_ARGS
GIT_REPOSITORY ${PKG_REPOSITORY}
GIT_TAG ${PKG_PINNED_TAG}
GIT_SHALLOW TRUE
OPTIONS "CUDAToolkit_ROOT ${CUDAToolkit_LIBRARY_DIR}"
)
if(TARGET CUTLASS AND NOT TARGET nvidia::cutlass::cutlass)
add_library(nvidia::cutlass::cutlass ALIAS CUTLASS)
endif()
if(NvidiaCutlass_ADDED)
rapids_export(
BUILD NvidiaCutlass
EXPORT_SET NvidiaCutlass
GLOBAL_TARGETS nvidia::cutlass::cutlass
NAMESPACE nvidia::cutlass::
)
endif()
# endif()
# We generate the cutlass-config files when we built cutlass locally, so always do
# `find_dependency`
rapids_export_package(
BUILD NvidiaCutlass cuvs-exports GLOBAL_TARGETS nvidia::cutlass::cutlass
)
rapids_export_package(
INSTALL NvidiaCutlass cuvs-exports GLOBAL_TARGETS nvidia::cutlass::cutlass
)
# Tell cmake where it can find the generated NvidiaCutlass-config.cmake we wrote.
include("${rapids-cmake-dir}/export/find_package_root.cmake")
rapids_export_find_package_root(
INSTALL NvidiaCutlass [=[${CMAKE_CURRENT_LIST_DIR}/../]=]
EXPORT_SET cuvs-exports
)
rapids_export_find_package_root(
BUILD NvidiaCutlass [=[${CMAKE_CURRENT_LIST_DIR}]=]
EXPORT_SET cuvs-exports
)
endfunction()
if(NOT RAFT_CUTLASS_GIT_TAG)
set(RAFT_CUTLASS_GIT_TAG v2.10.0)
endif()
if(NOT RAFT_CUTLASS_GIT_REPOSITORY)
set(RAFT_CUTLASS_GIT_REPOSITORY https://github.com/NVIDIA/cutlass.git)
endif()
find_and_configure_cutlass(
VERSION 2.10.0 REPOSITORY ${RAFT_CUTLASS_GIT_REPOSITORY} PINNED_TAG ${RAFT_CUTLASS_GIT_TAG}
)
| 0 |
rapidsai_public_repos/cuvs/cpp/cmake | rapidsai_public_repos/cuvs/cpp/cmake/thirdparty/get_raft.cmake | # =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# Use RAPIDS_VERSION from cmake/thirdparty/fetch_rapids.cmake
set(RAFT_VERSION "${RAPIDS_VERSION}")
set(RAFT_FORK "rapidsai")
set(RAFT_PINNED_TAG "branch-${RAPIDS_VERSION}")
function(find_and_configure_raft)
set(oneValueArgs VERSION FORK PINNED_TAG COMPILE_LIBRARY ENABLE_NVTX ENABLE_MNMG_DEPENDENCIES)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN} )
set(RAFT_COMPONENTS "")
if(PKG_COMPILE_LIBRARY)
string(APPEND RAFT_COMPONENTS " compiled")
endif()
if(PKG_ENABLE_MNMG_DEPENDENCIES)
string(APPEND RAFT_COMPONENTS " distributed")
endif()
#-----------------------------------------------------
# Invoke CPM find_package()
#-----------------------------------------------------
rapids_cpm_find(raft ${PKG_VERSION}
GLOBAL_TARGETS raft::raft
BUILD_EXPORT_SET cuvs-template-exports
INSTALL_EXPORT_SET cuvs-template-exports
COMPONENTS ${RAFT_COMPONENTS}
CPM_ARGS
GIT_REPOSITORY https://github.com/${PKG_FORK}/raft.git
GIT_TAG ${PKG_PINNED_TAG}
SOURCE_SUBDIR cpp
OPTIONS
"BUILD_TESTS OFF"
"BUILD_PRIMS_BENCH OFF"
"BUILD_ANN_BENCH OFF"
"RAFT_NVTX ${ENABLE_NVTX}"
"RAFT_COMPILE_LIBRARY ${PKG_COMPILE_LIBRARY}"
)
endfunction()
# Change pinned tag here to test a commit in CI
# To use a different RAFT locally, set the CMake variable
# CPM_raft_SOURCE=/path/to/local/raft
find_and_configure_raft(VERSION ${RAFT_VERSION}.00
FORK ${RAFT_FORK}
PINNED_TAG ${RAFT_PINNED_TAG}
COMPILE_LIBRARY ON
ENABLE_MNMG_DEPENDENCIES OFF
ENABLE_NVTX OFF
)
| 0 |
rapidsai_public_repos/cuvs/cpp/cmake | rapidsai_public_repos/cuvs/cpp/cmake/thirdparty/get_hnswlib.cmake | #=============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_hnswlib)
set(oneValueArgs VERSION FORK PINNED_TAG EXCLUDE_FROM_ALL)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN} )
set ( EXTERNAL_INCLUDES_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} )
if( NOT EXISTS ${EXTERNAL_INCLUDES_DIRECTORY}/_deps/hnswlib-src )
execute_process (
COMMAND git clone --branch=v0.6.2 https://github.com/nmslib/hnswlib.git hnswlib-src
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/_deps )
message("SOURCE ${CMAKE_CURRENT_SOURCE_DIR}")
execute_process (
COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/cmake/patches/hnswlib.patch
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/_deps/hnswlib-src
)
endif ()
include(cmake/modules/FindAVX.cmake)
set(HNSW_CXX_FLAGS "")
if(CXX_AVX_FOUND)
set(HNSW_CXX_FLAGS "${HNSW_CXX_FLAGS} ${CXX_AVX_FLAGS}")
elseif(CXX_AVX2_FOUND)
set(HNSW_CXX_FLAGS "${HNSW_CXX_FLAGS} ${CXX_AVX2_FLAGS}")
elseif(CXX_AVX512_FOUND)
set(HNSW_CXX_FLAGS "${HNSW_CXX_FLAGS} ${CXX_AVX512_FLAGS}")
endif()
endfunction()
# Change pinned tag here to test a commit in CI
# To use a different RAFT locally, set the CMake variable
# CPM_cuvs_SOURCE=/path/to/local/cuvs
find_and_configure_hnswlib(VERSION 0.6.2
FORK nmslib
PINNED_TAG v0.6.2
EXCLUDE_FROM_ALL YES)
| 0 |
rapidsai_public_repos/cuvs/cpp/cmake | rapidsai_public_repos/cuvs/cpp/cmake/thirdparty/get_gtest.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_gtest )
include(${rapids-cmake-dir}/cpm/gtest.cmake)
rapids_cpm_gtest()
endfunction()
find_and_configure_gtest()
| 0 |
rapidsai_public_repos/cuvs/cpp/cmake | rapidsai_public_repos/cuvs/cpp/cmake/thirdparty/get_nlohmann_json.cmake | #=============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_nlohmann_json)
set(oneValueArgs VERSION FORK PINNED_TAG EXCLUDE_FROM_ALL)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN} )
rapids_cpm_find(nlohmann_json ${PKG_VERSION}
GLOBAL_TARGETS nlohmann_json::nlohmann_json
BUILD_EXPORT_SET cuvs-bench-exports
INSTALL_EXPORT_SET cuvs-bench-exports
CPM_ARGS
GIT_REPOSITORY https://github.com/${PKG_FORK}/json.git
GIT_TAG ${PKG_PINNED_TAG}
EXCLUDE_FROM_ALL ${PKG_EXCLUDE_FROM_ALL})
endfunction()
# Change pinned tag here to test a commit in CI
# To use a different RAFT locally, set the CMake variable
# CPM_cuvs_SOURCE=/path/to/local/cuvs
find_and_configure_nlohmann_json(VERSION 3.11.2
FORK nlohmann
PINNED_TAG v3.11.2
EXCLUDE_FROM_ALL YES)
| 0 |
rapidsai_public_repos/cuvs/cpp/cmake | rapidsai_public_repos/cuvs/cpp/cmake/thirdparty/get_ggnn.cmake | #=============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_ggnn)
set(oneValueArgs VERSION FORK PINNED_TAG EXCLUDE_FROM_ALL)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN} )
set ( EXTERNAL_INCLUDES_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/ )
if (NOT EXISTS ${EXTERNAL_INCLUDES_DIRECTORY}/_deps/ggnn-src/)
execute_process (
COMMAND git clone "https://github.com/${PKG_FORK}/ggnn" --branch ${PKG_PINNED_TAG} ggnn-src
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/_deps/ )
message("SOURCE ${CMAKE_CURRENT_SOURCE_DIR}")
execute_process (
COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/cmake/patches/ggnn.patch
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/_deps/ggnn-src
)
endif()
endfunction()
# Change pinned tag here to test a commit in CI
# To use a different RAFT locally, set the CMake variable
# CPM_cuvs_SOURCE=/path/to/local/cuvs
find_and_configure_ggnn(VERSION 0.5
FORK cgtuebingen
PINNED_TAG release_0.5
EXCLUDE_FROM_ALL YES)
| 0 |
rapidsai_public_repos/cuvs/cpp/cmake | rapidsai_public_repos/cuvs/cpp/cmake/thirdparty/get_rmm.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_rmm)
include(${rapids-cmake-dir}/cpm/rmm.cmake)
rapids_cpm_rmm(BUILD_EXPORT_SET cuvs-exports
INSTALL_EXPORT_SET cuvs-exports)
endfunction()
find_and_configure_rmm()
| 0 |
rapidsai_public_repos/cuvs/cpp/cmake | rapidsai_public_repos/cuvs/cpp/cmake/thirdparty/get_fmt.cmake | # =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Use CPM to find or clone fmt
function(find_and_configure_fmt)
include(${rapids-cmake-dir}/cpm/fmt.cmake)
rapids_cpm_fmt(INSTALL_EXPORT_SET rmm-exports BUILD_EXPORT_SET rmm-exports)
endfunction()
find_and_configure_fmt() | 0 |
rapidsai_public_repos/cuvs/cpp/cmake | rapidsai_public_repos/cuvs/cpp/cmake/thirdparty/get_faiss.cmake | #=============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_faiss)
set(oneValueArgs VERSION REPOSITORY PINNED_TAG BUILD_STATIC_LIBS EXCLUDE_FROM_ALL ENABLE_GPU)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN} )
rapids_find_generate_module(faiss
HEADER_NAMES faiss/IndexFlat.h
LIBRARY_NAMES faiss
)
set(BUILD_SHARED_LIBS ON)
if (PKG_BUILD_STATIC_LIBS)
set(BUILD_SHARED_LIBS OFF)
set(CPM_DOWNLOAD_faiss ON)
endif()
include(cmake/modules/FindAVX.cmake)
# Link against AVX CPU lib if it exists
set(RAFT_FAISS_GLOBAL_TARGETS faiss::faiss)
set(RAFT_FAISS_EXPORT_GLOBAL_TARGETS faiss)
set(RAFT_FAISS_OPT_LEVEL "generic")
if(CXX_AVX_FOUND)
set(RAFT_FAISS_OPT_LEVEL "avx2")
list(APPEND RAFT_FAISS_GLOBAL_TARGETS faiss::faiss_avx2)
list(APPEND RAFT_FAISS_EXPORT_GLOBAL_TARGETS faiss_avx2)
endif()
rapids_cpm_find(faiss ${PKG_VERSION}
GLOBAL_TARGETS ${RAFT_FAISS_GLOBAL_TARGETS}
CPM_ARGS
GIT_REPOSITORY ${PKG_REPOSITORY}
GIT_TAG ${PKG_PINNED_TAG}
EXCLUDE_FROM_ALL ${PKG_EXCLUDE_FROM_ALL}
OPTIONS
"FAISS_ENABLE_GPU ${PKG_ENABLE_GPU}"
"FAISS_ENABLE_PYTHON OFF"
"FAISS_OPT_LEVEL ${RAFT_FAISS_OPT_LEVEL}"
"FAISS_USE_CUDA_TOOLKIT_STATIC ${CUDA_STATIC_RUNTIME}"
"BUILD_TESTING OFF"
"CMAKE_MESSAGE_LOG_LEVEL VERBOSE"
)
if(TARGET faiss AND NOT TARGET faiss::faiss)
add_library(faiss::faiss ALIAS faiss)
endif()
if(CXX_AVX_FOUND)
if(TARGET faiss_avx2 AND NOT TARGET faiss::faiss_avx2)
add_library(faiss::faiss_avx2 ALIAS faiss_avx2)
endif()
endif()
if(faiss_ADDED)
rapids_export(BUILD faiss
EXPORT_SET faiss-targets
GLOBAL_TARGETS ${RAFT_FAISS_EXPORT_GLOBAL_TARGETS}
NAMESPACE faiss::)
endif()
# We generate the faiss-config files when we built faiss locally, so always do `find_dependency`
rapids_export_package(BUILD OpenMP cuvs-ann-bench-exports) # faiss uses openMP but doesn't export a need for it
rapids_export_package(BUILD faiss cuvs-ann-bench-exports GLOBAL_TARGETS ${RAFT_FAISS_GLOBAL_TARGETS} ${RAFT_FAISS_EXPORT_GLOBAL_TARGETS})
rapids_export_package(INSTALL faiss cuvs-ann-bench-exports GLOBAL_TARGETS ${RAFT_FAISS_GLOBAL_TARGETS} ${RAFT_FAISS_EXPORT_GLOBAL_TARGETS})
# Tell cmake where it can find the generated faiss-config.cmake we wrote.
include("${rapids-cmake-dir}/export/find_package_root.cmake")
rapids_export_find_package_root(BUILD faiss [=[${CMAKE_CURRENT_LIST_DIR}]=]
EXPORT_SET cuvs-ann-bench-exports)
endfunction()
if(NOT RAFT_FAISS_GIT_TAG)
# TODO: Remove this once faiss supports FAISS_USE_CUDA_TOOLKIT_STATIC
# (https://github.com/facebookresearch/faiss/pull/2446)
set(RAFT_FAISS_GIT_TAG fea/statically-link-ctk)
# set(RAFT_FAISS_GIT_TAG bde7c0027191f29c9dadafe4f6e68ca0ee31fb30)
endif()
if(NOT RAFT_FAISS_GIT_REPOSITORY)
# TODO: Remove this once faiss supports FAISS_USE_CUDA_TOOLKIT_STATIC
# (https://github.com/facebookresearch/faiss/pull/2446)
set(RAFT_FAISS_GIT_REPOSITORY https://github.com/cjnolet/faiss.git)
# set(RAFT_FAISS_GIT_REPOSITORY https://github.com/facebookresearch/faiss.git)
endif()
find_and_configure_faiss(VERSION 1.7.4
REPOSITORY ${RAFT_FAISS_GIT_REPOSITORY}
PINNED_TAG ${RAFT_FAISS_GIT_TAG}
BUILD_STATIC_LIBS ${RAFT_USE_FAISS_STATIC}
EXCLUDE_FROM_ALL ${RAFT_EXCLUDE_FAISS_FROM_ALL}
ENABLE_GPU ${RAFT_FAISS_ENABLE_GPU})
| 0 |
rapidsai_public_repos/cuvs/cpp/src/spatial/knn | rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail/fused_l2_knn_int32_t_float.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstddef> // size_t
#include <cstdint> // int_Xt
#include <cuvs/distance/distance_types.hpp> // DistanceType
#include <cuvs/spatial/knn/detail/fused_l2_knn-inl.cuh>
#define instantiate_raft_spatial_knn_detail_fusedL2Knn(Mvalue_idx, Mvalue_t, MusePrevTopKs) \
template void cuvs::spatial::knn::detail::fusedL2Knn<Mvalue_idx, Mvalue_t, MusePrevTopKs>( \
size_t D, \
Mvalue_idx * out_inds, \
Mvalue_t * out_dists, \
const Mvalue_t* index, \
const Mvalue_t* query, \
size_t n_index_rows, \
size_t n_query_rows, \
int k, \
bool rowMajorIndex, \
bool rowMajorQuery, \
cudaStream_t stream, \
cuvs::distance::DistanceType metric, \
const Mvalue_t* index_norms, \
const Mvalue_t* query_norms)
instantiate_raft_spatial_knn_detail_fusedL2Knn(int32_t, float, true);
instantiate_raft_spatial_knn_detail_fusedL2Knn(int32_t, float, false);
#undef instantiate_raft_spatial_knn_detail_fusedL2Knn
| 0 |
rapidsai_public_repos/cuvs/cpp/src/spatial/knn | rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail/fused_l2_knn_int64_t_float.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstddef> // size_t
#include <cstdint> // int_Xt
#include <cuvs/distance/distance_types.hpp> // DistanceType
#include <cuvs/spatial/knn/detail/fused_l2_knn-inl.cuh>
#define instantiate_raft_spatial_knn_detail_fusedL2Knn(Mvalue_idx, Mvalue_t, MusePrevTopKs) \
template void cuvs::spatial::knn::detail::fusedL2Knn<Mvalue_idx, Mvalue_t, MusePrevTopKs>( \
size_t D, \
Mvalue_idx * out_inds, \
Mvalue_t * out_dists, \
const Mvalue_t* index, \
const Mvalue_t* query, \
size_t n_index_rows, \
size_t n_query_rows, \
int k, \
bool rowMajorIndex, \
bool rowMajorQuery, \
cudaStream_t stream, \
cuvs::distance::DistanceType metric, \
const Mvalue_t* index_norms, \
const Mvalue_t* query_norms)
instantiate_raft_spatial_knn_detail_fusedL2Knn(int64_t, float, true);
instantiate_raft_spatial_knn_detail_fusedL2Knn(int64_t, float, false);
#undef instantiate_raft_spatial_knn_detail_fusedL2Knn
| 0 |
rapidsai_public_repos/cuvs/cpp/src/spatial/knn | rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail/fused_l2_knn_uint32_t_float.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstddef> // size_t
#include <cstdint> // int_Xt
#include <cuvs/distance/distance_types.hpp> // DistanceType
#include <cuvs/spatial/knn/detail/fused_l2_knn-inl.cuh>
#define instantiate_raft_spatial_knn_detail_fusedL2Knn(Mvalue_idx, Mvalue_t, MusePrevTopKs) \
template void cuvs::spatial::knn::detail::fusedL2Knn<Mvalue_idx, Mvalue_t, MusePrevTopKs>( \
size_t D, \
Mvalue_idx * out_inds, \
Mvalue_t * out_dists, \
const Mvalue_t* index, \
const Mvalue_t* query, \
size_t n_index_rows, \
size_t n_query_rows, \
int k, \
bool rowMajorIndex, \
bool rowMajorQuery, \
cudaStream_t stream, \
cuvs::distance::DistanceType metric, \
const Mvalue_t* index_norms, \
const Mvalue_t* query_norms)
// These are used by brute_force_knn:
instantiate_raft_spatial_knn_detail_fusedL2Knn(uint32_t, float, true);
instantiate_raft_spatial_knn_detail_fusedL2Knn(uint32_t, float, false);
#undef instantiate_raft_spatial_knn_detail_fusedL2Knn
| 0 |
rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail/ball_cover/registers_pass_two_2d_euclidean.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by registers_00_generate.py
*
* Make changes there and run in this directory:
*
* > python registers_00_generate.py
*
*/
#include <cstdint> // int64_t
#include <cuvs/spatial/knn/detail/ball_cover/registers-inl.cuh>
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \
template void \
cuvs::spatial::knn::detail::rbc_low_dim_pass_two<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 2, cuvs::spatial::knn::detail::EuclideanFunc);
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two
| 0 |
rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail/ball_cover/registers_pass_two_3d_dist.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by registers_00_generate.py
*
* Make changes there and run in this directory:
*
* > python registers_00_generate.py
*
*/
#include <cstdint> // int64_t
#include <cuvs/spatial/knn/detail/ball_cover/registers-inl.cuh>
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \
template void \
cuvs::spatial::knn::detail::rbc_low_dim_pass_two<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 3, cuvs::spatial::knn::detail::DistFunc);
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two
| 0 |
rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail/ball_cover/registers.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuvs/spatial/knn/detail/ball_cover/registers-inl.cuh>
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims) \
template void \
cuvs::spatial::knn::detail::rbc_low_dim_pass_one<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
cuvs::spatial::knn::detail::DistFunc<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims) \
template void \
cuvs::spatial::knn::detail::rbc_low_dim_pass_two<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
cuvs::spatial::knn::detail::DistFunc<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(std::int64_t, float, std::uint32_t, 2);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(std::int64_t, float, std::uint32_t, 3);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(std::int64_t, float, std::uint32_t, 2);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(std::int64_t, float, std::uint32_t, 3);
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one
| 0 |
rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail/ball_cover/registers_pass_two_2d_haversine.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by registers_00_generate.py
*
* Make changes there and run in this directory:
*
* > python registers_00_generate.py
*
*/
#include <cstdint> // int64_t
#include <cuvs/spatial/knn/detail/ball_cover/registers-inl.cuh>
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \
template void \
cuvs::spatial::knn::detail::rbc_low_dim_pass_two<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 2, cuvs::spatial::knn::detail::HaversineFunc);
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two
| 0 |
rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail/ball_cover/registers_pass_one_2d_haversine.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by registers_00_generate.py
*
* Make changes there and run in this directory:
*
* > python registers_00_generate.py
*
*/
#include <cstdint> // int64_t
#include <cuvs/spatial/knn/detail/ball_cover/registers-inl.cuh>
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \
template void \
cuvs::spatial::knn::detail::rbc_low_dim_pass_one<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 2, cuvs::spatial::knn::detail::HaversineFunc);
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one
| 0 |
rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail/ball_cover/registers_pass_two_3d_euclidean.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by registers_00_generate.py
*
* Make changes there and run in this directory:
*
* > python registers_00_generate.py
*
*/
#include <cstdint> // int64_t
#include <cuvs/spatial/knn/detail/ball_cover/registers-inl.cuh>
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \
template void \
cuvs::spatial::knn::detail::rbc_low_dim_pass_two<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 3, cuvs::spatial::knn::detail::EuclideanFunc);
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two
| 0 |
rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail/ball_cover/registers_pass_one_3d_euclidean.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by registers_00_generate.py
*
* Make changes there and run in this directory:
*
* > python registers_00_generate.py
*
*/
#include <cstdint> // int64_t
#include <cuvs/spatial/knn/detail/ball_cover/registers-inl.cuh>
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \
template void \
cuvs::spatial::knn::detail::rbc_low_dim_pass_one<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 3, cuvs::spatial::knn::detail::EuclideanFunc);
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one
| 0 |
rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail/ball_cover/registers_pass_one_2d_euclidean.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by registers_00_generate.py
*
* Make changes there and run in this directory:
*
* > python registers_00_generate.py
*
*/
#include <cstdint> // int64_t
#include <cuvs/spatial/knn/detail/ball_cover/registers-inl.cuh>
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \
template void \
cuvs::spatial::knn::detail::rbc_low_dim_pass_one<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 2, cuvs::spatial::knn::detail::EuclideanFunc);
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one
| 0 |
rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail/ball_cover/registers_pass_one_2d_dist.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by registers_00_generate.py
*
* Make changes there and run in this directory:
*
* > python registers_00_generate.py
*
*/
#include <cstdint> // int64_t
#include <cuvs/spatial/knn/detail/ball_cover/registers-inl.cuh>
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \
template void \
cuvs::spatial::knn::detail::rbc_low_dim_pass_one<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 2, cuvs::spatial::knn::detail::DistFunc);
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one
| 0 |
rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail/ball_cover/registers_pass_two_2d_dist.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by registers_00_generate.py
*
* Make changes there and run in this directory:
*
* > python registers_00_generate.py
*
*/
#include <cstdint> // int64_t
#include <cuvs/spatial/knn/detail/ball_cover/registers-inl.cuh>
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \
template void \
cuvs::spatial::knn::detail::rbc_low_dim_pass_two<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 2, cuvs::spatial::knn::detail::DistFunc);
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two
| 0 |
rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail/ball_cover/registers_pass_one_3d_dist.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by registers_00_generate.py
*
* Make changes there and run in this directory:
*
* > python registers_00_generate.py
*
*/
#include <cstdint> // int64_t
#include <cuvs/spatial/knn/detail/ball_cover/registers-inl.cuh>
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \
template void \
cuvs::spatial::knn::detail::rbc_low_dim_pass_one<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 3, cuvs::spatial::knn::detail::DistFunc);
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one
| 0 |
rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail/ball_cover/registers_pass_one_3d_haversine.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by registers_00_generate.py
*
* Make changes there and run in this directory:
*
* > python registers_00_generate.py
*
*/
#include <cstdint> // int64_t
#include <cuvs/spatial/knn/detail/ball_cover/registers-inl.cuh>
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \
template void \
cuvs::spatial::knn::detail::rbc_low_dim_pass_one<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 3, cuvs::spatial::knn::detail::HaversineFunc);
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one
| 0 |
rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail/ball_cover/registers_00_generate.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
header = """/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by registers_00_generate.py
*
* Make changes there and run in this directory:
*
* > python registers_00_generate.py
*
*/
#include <cstdint> // int64_t
#include <cuvs/spatial/knn/detail/ball_cover/registers-inl.cuh>
"""
macro_pass_one = """
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one( \\
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \\
template void \\
cuvs::spatial::knn::detail::rbc_low_dim_pass_one<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \\
raft::resources const& handle, \\
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \\
const Mvalue_t* query, \\
const Mvalue_int n_query_rows, \\
Mvalue_int k, \\
const Mvalue_idx* R_knn_inds, \\
const Mvalue_t* R_knn_dists, \\
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \\
Mvalue_idx* inds, \\
Mvalue_t* dists, \\
float weight, \\
Mvalue_int* dists_counter)
"""
macro_pass_two = """
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two( \\
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \\
template void \\
cuvs::spatial::knn::detail::rbc_low_dim_pass_two<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \\
raft::resources const& handle, \\
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \\
const Mvalue_t* query, \\
const Mvalue_int n_query_rows, \\
Mvalue_int k, \\
const Mvalue_idx* R_knn_inds, \\
const Mvalue_t* R_knn_dists, \\
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \\
Mvalue_idx* inds, \\
Mvalue_t* dists, \\
float weight, \\
Mvalue_int* dists_counter)
"""
distances = dict(
haversine="cuvs::spatial::knn::detail::HaversineFunc",
euclidean="cuvs::spatial::knn::detail::EuclideanFunc",
dist="cuvs::spatial::knn::detail::DistFunc",
)
for k, v in distances.items():
for dim in [2, 3]:
path = f"registers_pass_one_{dim}d_{k}.cu"
with open(path, "w") as f:
f.write(header)
f.write(macro_pass_one)
f.write(f"instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(\n")
f.write(f" std::int64_t, float, std::uint32_t, {dim}, {v});\n")
f.write("#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one\n")
print(f"src/spatial/knn/detail/ball_cover/{path}")
for k, v in distances.items():
for dim in [2, 3]:
path = f"registers_pass_two_{dim}d_{k}.cu"
with open(path, "w") as f:
f.write(header)
f.write(macro_pass_two)
f.write(f"instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(\n")
f.write(f" std::int64_t, float, std::uint32_t, {dim}, {v});\n")
f.write("#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two\n")
print(f"src/spatial/knn/detail/ball_cover/{path}")
| 0 |
rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/src/spatial/knn/detail/ball_cover/registers_pass_two_3d_haversine.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by registers_00_generate.py
*
* Make changes there and run in this directory:
*
* > python registers_00_generate.py
*
*/
#include <cstdint> // int64_t
#include <cuvs/spatial/knn/detail/ball_cover/registers-inl.cuh>
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \
template void \
cuvs::spatial::knn::detail::rbc_low_dim_pass_two<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 3, cuvs::spatial::knn::detail::HaversineFunc);
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two
| 0 |
rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime/random/rmat_rectangular_generator_int_double.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common.cuh"
namespace cuvs::runtime::random {
FUNC_DEF(int, double);
} // namespace cuvs::runtime::random
| 0 |
rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime/random/common.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/random/rmat_rectangular_generator.cuh>
#include <raft_runtime/random/rmat_rectangular_generator.hpp>
#define FUNC_DEF(IdxT, ProbT) \
void rmat_rectangular_gen(raft::resources const& handle, \
IdxT* out, \
IdxT* out_src, \
IdxT* out_dst, \
const ProbT* theta, \
IdxT r_scale, \
IdxT c_scale, \
IdxT n_edges, \
raft::random::RngState& r) \
{ \
raft::random::rmat_rectangular_gen<IdxT, ProbT>(out, \
out_src, \
out_dst, \
theta, \
r_scale, \
c_scale, \
n_edges, \
resource::get_cuda_stream(handle), \
r); \
}
| 0 |
rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime/random/rmat_rectangular_generator_int64_float.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common.cuh"
namespace cuvs::runtime::random {
FUNC_DEF(int64_t, float);
} // namespace cuvs::runtime::random
| 0 |
rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime/random/rmat_rectangular_generator_int_float.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common.cuh"
namespace cuvs::runtime::random {
FUNC_DEF(int, float);
} // namespace cuvs::runtime::random
| 0 |
rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime/random/rmat_rectangular_generator_int64_double.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common.cuh"
namespace cuvs::runtime::random {
FUNC_DEF(int64_t, double);
} // namespace cuvs::runtime::random
| 0 |
rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime/distance/fused_l2_min_arg.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuvs/distance/distance_types.hpp>
#include <cuvs/distance/fused_l2_nn.cuh>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/kvp.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/norm.cuh>
#include <thrust/for_each.h>
#include <thrust/tuple.h>
namespace cuvs::runtime::distance {
template <typename IndexT, typename DataT>
struct KeyValueIndexOp {
__host__ __device__ __forceinline__ IndexT
operator()(const raft::KeyValuePair<IndexT, DataT>& a) const
{
return a.key;
}
};
template <typename value_t, typename idx_t>
void compute_fused_l2_nn_min_arg(raft::resources const& handle,
idx_t* min,
const value_t* x,
const value_t* y,
idx_t m,
idx_t n,
idx_t k,
bool sqrt)
{
rmm::device_uvector<int> workspace(m, resource::get_cuda_stream(handle));
auto kvp = raft::make_device_vector<raft::KeyValuePair<idx_t, value_t>>(handle, m);
rmm::device_uvector<value_t> x_norms(m, resource::get_cuda_stream(handle));
rmm::device_uvector<value_t> y_norms(n, resource::get_cuda_stream(handle));
raft::linalg::rowNorm(
x_norms.data(), x, k, m, raft::linalg::L2Norm, true, resource::get_cuda_stream(handle));
raft::linalg::rowNorm(
y_norms.data(), y, k, n, raft::linalg::L2Norm, true, resource::get_cuda_stream(handle));
cuvs::distance::fusedL2NNMinReduce(kvp.data_handle(),
x,
y,
x_norms.data(),
y_norms.data(),
m,
n,
k,
(void*)workspace.data(),
sqrt,
true,
resource::get_cuda_stream(handle));
KeyValueIndexOp<idx_t, value_t> conversion_op;
thrust::transform(resource::get_thrust_policy(handle),
kvp.data_handle(),
kvp.data_handle() + m,
min,
conversion_op);
resource::sync_stream(handle);
}
void fused_l2_nn_min_arg(raft::resources const& handle,
int* min,
const float* x,
const float* y,
int m,
int n,
int k,
bool sqrt)
{
compute_fused_l2_nn_min_arg<float, int>(handle, min, x, y, m, n, k, sqrt);
}
void fused_l2_nn_min_arg(raft::resources const& handle,
int* min,
const double* x,
const double* y,
int m,
int n,
int k,
bool sqrt)
{
compute_fused_l2_nn_min_arg<double, int>(handle, min, x, y, m, n, k, sqrt);
}
} // end namespace cuvs::runtime::distance
| 0 |
rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime/distance/pairwise_distance.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuvs/distance/distance.cuh>
#include <cuvs/distance/distance_types.hpp>
#include <raft/core/resources.hpp>
namespace cuvs::runtime::distance {
void pairwise_distance(raft::resources const& handle,
float* x,
float* y,
float* dists,
int m,
int n,
int k,
cuvs::distance::DistanceType metric,
bool isRowMajor,
float metric_arg)
{
cuvs::distance::pairwise_distance<float, int>(
handle, x, y, dists, m, n, k, metric, isRowMajor, metric_arg);
}
void pairwise_distance(raft::resources const& handle,
double* x,
double* y,
double* dists,
int m,
int n,
int k,
cuvs::distance::DistanceType metric,
bool isRowMajor,
float metric_arg)
{
cuvs::distance::pairwise_distance<double, int>(
handle, x, y, dists, m, n, k, metric, isRowMajor, metric_arg);
}
} // namespace cuvs::runtime::distance | 0 |
rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime/neighbors/refine_d_int64_t_int8_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuvs/neighbors/refine.cuh>
namespace cuvs::runtime::neighbors {
void refine(raft::resources const& handle,
raft::device_matrix_view<const int8_t, int64_t, row_major> dataset,
raft::device_matrix_view<const int8_t, int64_t, row_major> queries,
raft::device_matrix_view<const int64_t, int64_t, row_major> neighbor_candidates,
raft::device_matrix_view<int64_t, int64_t, row_major> indices,
raft::device_matrix_view<float, int64_t, row_major> distances,
distance::DistanceType metric)
{
cuvs::neighbors::refine<int64_t, int8_t, float, int64_t>(
handle, dataset, queries, neighbor_candidates, indices, distances, metric);
}
} // namespace cuvs::runtime::neighbors
| 0 |
rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime/neighbors/brute_force_knn_int64_t_float.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuvs/neighbors/brute_force.cuh>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
#include <raft_runtime/neighbors/brute_force.hpp>
#include <vector>
namespace cuvs::runtime::neighbors::brute_force {
#define RAFT_INST_BFKNN(IDX_T, DATA_T, MATRIX_IDX_T, INDEX_LAYOUT, SEARCH_LAYOUT) \
void knn(raft::resources const& handle, \
raft::device_matrix_view<const DATA_T, MATRIX_IDX_T, INDEX_LAYOUT> index, \
raft::device_matrix_view<const DATA_T, MATRIX_IDX_T, SEARCH_LAYOUT> search, \
raft::device_matrix_view<IDX_T, MATRIX_IDX_T, row_major> indices, \
raft::device_matrix_view<DATA_T, MATRIX_IDX_T, row_major> distances, \
distance::DistanceType metric, \
std::optional<float> metric_arg, \
std::optional<IDX_T> global_id_offset) \
{ \
std::vector<raft::device_matrix_view<const DATA_T, MATRIX_IDX_T, INDEX_LAYOUT>> vec; \
vec.push_back(index); \
cuvs::neighbors::brute_force::knn( \
handle, vec, search, indices, distances, metric, metric_arg, global_id_offset); \
}
RAFT_INST_BFKNN(int64_t, float, int64_t, raft::row_major, raft::row_major);
#undef RAFT_INST_BFKNN
} // namespace cuvs::runtime::neighbors::brute_force
| 0 |
rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime/neighbors/ivfpq_search_float_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuvs/neighbors/ivf_pq.cuh>
#include <raft_runtime/neighbors/ivf_pq.hpp>
namespace cuvs::runtime::neighbors::ivf_pq {
#define RAFT_SEARCH_INST(T, IdxT) \
void search(raft::resources const& handle, \
const cuvs::neighbors::ivf_pq::search_params& params, \
const cuvs::neighbors::ivf_pq::index<IdxT>& idx, \
raft::device_matrix_view<const T, IdxT, row_major> queries, \
raft::device_matrix_view<IdxT, IdxT, row_major> neighbors, \
raft::device_matrix_view<float, IdxT, row_major> distances) \
{ \
cuvs::neighbors::ivf_pq::search<T, IdxT>(handle, params, idx, queries, neighbors, distances); \
}
RAFT_SEARCH_INST(float, int64_t);
#undef RAFT_INST_SEARCH
} // namespace cuvs::runtime::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime/neighbors/ivfpq_serialize.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuvs/neighbors/ivf_pq.cuh>
#include <cuvs/neighbors/ivf_pq_serialize.cuh>
#include <raft_runtime/neighbors/ivf_pq.hpp>
namespace cuvs::runtime::neighbors::ivf_pq {
void serialize(raft::resources const& handle,
const std::string& filename,
const cuvs::neighbors::ivf_pq::index<int64_t>& index)
{
cuvs::neighbors::ivf_pq::serialize(handle, filename, index);
};
} // namespace cuvs::runtime::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime/neighbors/ivfpq_search_uint8_t_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuvs/neighbors/ivf_pq.cuh>
#include <raft_runtime/neighbors/ivf_pq.hpp>
namespace cuvs::runtime::neighbors::ivf_pq {
#define RAFT_SEARCH_INST(T, IdxT) \
void search(raft::resources const& handle, \
const cuvs::neighbors::ivf_pq::search_params& params, \
const cuvs::neighbors::ivf_pq::index<IdxT>& idx, \
raft::device_matrix_view<const T, IdxT, row_major> queries, \
raft::device_matrix_view<IdxT, IdxT, row_major> neighbors, \
raft::device_matrix_view<float, IdxT, row_major> distances) \
{ \
cuvs::neighbors::ivf_pq::search<T, IdxT>(handle, params, idx, queries, neighbors, distances); \
}
RAFT_SEARCH_INST(uint8_t, int64_t);
#undef RAFT_INST_SEARCH
} // namespace cuvs::runtime::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime/neighbors/refine_h_int64_t_uint8_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuvs/neighbors/refine.cuh>
namespace cuvs::runtime::neighbors {
void refine(raft::resources const& handle,
raft::host_matrix_view<const uint8_t, int64_t, row_major> dataset,
raft::host_matrix_view<const uint8_t, int64_t, row_major> queries,
raft::host_matrix_view<const int64_t, int64_t, row_major> neighbor_candidates,
raft::host_matrix_view<int64_t, int64_t, row_major> indices,
raft::host_matrix_view<float, int64_t, row_major> distances,
distance::DistanceType metric)
{
cuvs::neighbors::refine<int64_t, uint8_t, float, int64_t>(
handle, dataset, queries, neighbor_candidates, indices, distances, metric);
}
} // namespace cuvs::runtime::neighbors
| 0 |
rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/src/cuvs_runtime/neighbors/refine_h_int64_t_float.cu |
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuvs/neighbors/refine.cuh>
namespace cuvs::runtime::neighbors {
void refine(raft::resources const& handle,
raft::host_matrix_view<const float, int64_t, row_major> dataset,
raft::host_matrix_view<const float, int64_t, row_major> queries,
raft::host_matrix_view<const int64_t, int64_t, row_major> neighbor_candidates,
raft::host_matrix_view<int64_t, int64_t, row_major> indices,
raft::host_matrix_view<float, int64_t, row_major> distances,
distance::DistanceType metric)
{
cuvs::neighbors::refine<int64_t, float, float, int64_t>(
handle, dataset, queries, neighbor_candidates, indices, distances, metric);
}
} // namespace cuvs::runtime::neighbors
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.