repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/cuvs/python/cuvs/cuvs | rapidsai_public_repos/cuvs/python/cuvs/cuvs/test/test_cagra.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# h ttp://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from pylibraft.common import device_ndarray
from pylibraft.neighbors import cagra
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
# todo (dantegd): consolidate helper utils of ann methods
def generate_data(shape, dtype):
if dtype == np.byte:
x = np.random.randint(-127, 128, size=shape, dtype=np.byte)
elif dtype == np.ubyte:
x = np.random.randint(0, 255, size=shape, dtype=np.ubyte)
else:
x = np.random.random_sample(shape).astype(dtype)
return x
def calc_recall(ann_idx, true_nn_idx):
assert ann_idx.shape == true_nn_idx.shape
n = 0
for i in range(ann_idx.shape[0]):
n += np.intersect1d(ann_idx[i, :], true_nn_idx[i, :]).size
recall = n / ann_idx.size
return recall
def run_cagra_build_search_test(
n_rows=10000,
n_cols=10,
n_queries=100,
k=10,
dtype=np.float32,
metric="euclidean",
intermediate_graph_degree=128,
graph_degree=64,
build_algo="ivf_pq",
array_type="device",
compare=True,
inplace=True,
add_data_on_build=True,
search_params={},
):
dataset = generate_data((n_rows, n_cols), dtype)
if metric == "inner_product":
dataset = normalize(dataset, norm="l2", axis=1)
dataset_device = device_ndarray(dataset)
build_params = cagra.IndexParams(
metric=metric,
intermediate_graph_degree=intermediate_graph_degree,
graph_degree=graph_degree,
build_algo=build_algo,
)
if array_type == "device":
index = cagra.build(build_params, dataset_device)
else:
index = cagra.build(build_params, dataset)
assert index.trained
if not add_data_on_build:
dataset_1 = dataset[: n_rows // 2, :]
dataset_2 = dataset[n_rows // 2 :, :]
indices_1 = np.arange(n_rows // 2, dtype=np.uint32)
indices_2 = np.arange(n_rows // 2, n_rows, dtype=np.uint32)
if array_type == "device":
dataset_1_device = device_ndarray(dataset_1)
dataset_2_device = device_ndarray(dataset_2)
indices_1_device = device_ndarray(indices_1)
indices_2_device = device_ndarray(indices_2)
index = cagra.extend(index, dataset_1_device, indices_1_device)
index = cagra.extend(index, dataset_2_device, indices_2_device)
else:
index = cagra.extend(index, dataset_1, indices_1)
index = cagra.extend(index, dataset_2, indices_2)
queries = generate_data((n_queries, n_cols), dtype)
out_idx = np.zeros((n_queries, k), dtype=np.uint32)
out_dist = np.zeros((n_queries, k), dtype=np.float32)
queries_device = device_ndarray(queries)
out_idx_device = device_ndarray(out_idx) if inplace else None
out_dist_device = device_ndarray(out_dist) if inplace else None
search_params = cagra.SearchParams(**search_params)
ret_output = cagra.search(
search_params,
index,
queries_device,
k,
neighbors=out_idx_device,
distances=out_dist_device,
)
if not inplace:
out_dist_device, out_idx_device = ret_output
if not compare:
return
out_idx = out_idx_device.copy_to_host()
out_dist = out_dist_device.copy_to_host()
# Calculate reference values with sklearn
skl_metric = {
"sqeuclidean": "sqeuclidean",
"inner_product": "cosine",
"euclidean": "euclidean",
}[metric]
nn_skl = NearestNeighbors(
n_neighbors=k, algorithm="brute", metric=skl_metric
)
nn_skl.fit(dataset)
skl_idx = nn_skl.kneighbors(queries, return_distance=False)
recall = calc_recall(out_idx, skl_idx)
assert recall > 0.7
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("dtype", [np.float32, np.int8, np.uint8])
@pytest.mark.parametrize("array_type", ["device", "host"])
@pytest.mark.parametrize("build_algo", ["ivf_pq", "nn_descent"])
def test_cagra_dataset_dtype_host_device(
dtype, array_type, inplace, build_algo
):
# Note that inner_product tests use normalized input which we cannot
# represent in int8, therefore we test only sqeuclidean metric here.
run_cagra_build_search_test(
dtype=dtype,
inplace=inplace,
array_type=array_type,
build_algo=build_algo,
)
@pytest.mark.parametrize(
"params",
[
{
"intermediate_graph_degree": 64,
"graph_degree": 32,
"add_data_on_build": True,
"k": 1,
"metric": "euclidean",
"build_algo": "ivf_pq",
},
{
"intermediate_graph_degree": 32,
"graph_degree": 16,
"add_data_on_build": False,
"k": 5,
"metric": "sqeuclidean",
"build_algo": "ivf_pq",
},
{
"intermediate_graph_degree": 128,
"graph_degree": 32,
"add_data_on_build": True,
"k": 10,
"metric": "inner_product",
"build_algo": "nn_descent",
},
],
)
def test_cagra_index_params(params):
# Note that inner_product tests use normalized input which we cannot
# represent in int8, therefore we test only sqeuclidean metric here.
run_cagra_build_search_test(
k=params["k"],
metric=params["metric"],
graph_degree=params["graph_degree"],
intermediate_graph_degree=params["intermediate_graph_degree"],
compare=False,
build_algo=params["build_algo"],
)
@pytest.mark.parametrize(
"params",
[
{
"max_queries": 100,
"itopk_size": 32,
"max_iterations": 100,
"algo": "single_cta",
"team_size": 0,
"search_width": 1,
"min_iterations": 1,
"thread_block_size": 64,
"hashmap_mode": "hash",
"hashmap_min_bitlen": 0.2,
"hashmap_max_fill_rate": 0.5,
"num_random_samplings": 1,
},
{
"max_queries": 10,
"itopk_size": 128,
"max_iterations": 0,
"algo": "multi_cta",
"team_size": 8,
"search_width": 2,
"min_iterations": 10,
"thread_block_size": 0,
"hashmap_mode": "auto",
"hashmap_min_bitlen": 0.9,
"hashmap_max_fill_rate": 0.5,
"num_random_samplings": 10,
},
{
"max_queries": 0,
"itopk_size": 64,
"max_iterations": 0,
"algo": "multi_kernel",
"team_size": 16,
"search_width": 1,
"min_iterations": 0,
"thread_block_size": 0,
"hashmap_mode": "auto",
"hashmap_min_bitlen": 0,
"hashmap_max_fill_rate": 0.5,
"num_random_samplings": 1,
},
{
"max_queries": 0,
"itopk_size": 64,
"max_iterations": 0,
"algo": "auto",
"team_size": 32,
"search_width": 4,
"min_iterations": 0,
"thread_block_size": 0,
"hashmap_mode": "auto",
"hashmap_min_bitlen": 0,
"hashmap_max_fill_rate": 0.5,
"num_random_samplings": 1,
},
],
)
def test_cagra_search_params(params):
# Note that inner_product tests use normalized input which we cannot
# represent in int8, therefore we test only sqeuclidean metric here.
run_cagra_build_search_test(search_params=params)
@pytest.mark.parametrize("dtype", [np.float32, np.int8, np.ubyte])
@pytest.mark.parametrize("include_dataset", [True, False])
def test_save_load(dtype, include_dataset):
n_rows = 10000
n_cols = 50
n_queries = 1000
dataset = generate_data((n_rows, n_cols), dtype)
dataset_device = device_ndarray(dataset)
build_params = cagra.IndexParams()
index = cagra.build(build_params, dataset_device)
assert index.trained
filename = "my_index.bin"
cagra.save(filename, index, include_dataset=include_dataset)
loaded_index = cagra.load(filename)
# if we didn't save the dataset with the index, we need to update the
# index with an already loaded copy
if not include_dataset:
loaded_index.update_dataset(dataset)
queries = generate_data((n_queries, n_cols), dtype)
queries_device = device_ndarray(queries)
search_params = cagra.SearchParams()
k = 10
distance_dev, neighbors_dev = cagra.search(
search_params, index, queries_device, k
)
neighbors = neighbors_dev.copy_to_host()
dist = distance_dev.copy_to_host()
del index
distance_dev, neighbors_dev = cagra.search(
search_params, loaded_index, queries_device, k
)
neighbors2 = neighbors_dev.copy_to_host()
dist2 = distance_dev.copy_to_host()
assert np.all(neighbors == neighbors2)
assert np.allclose(dist, dist2, rtol=1e-6)
| 0 |
rapidsai_public_repos/cuvs/python/cuvs/cuvs | rapidsai_public_repos/cuvs/python/cuvs/cuvs/test/test_doctests.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import doctest
import inspect
import io
import pylibraft.cluster
import pylibraft.distance
import pylibraft.matrix
import pylibraft.neighbors
import pylibraft.random
import pytest
# Code adapted from https://github.com/rapidsai/cudf/blob/branch-23.02/python/cudf/cudf/tests/test_doctests.py # noqa
def _name_in_all(parent, name):
return name in getattr(parent, "__all__", [])
def _is_public_name(parent, name):
return not name.startswith("_")
def _find_doctests_in_obj(obj, finder=None, criteria=None):
"""Find all doctests in an object.
Parameters
----------
obj : module or class
The object to search for docstring examples.
finder : doctest.DocTestFinder, optional
The DocTestFinder object to use. If not provided, a DocTestFinder is
constructed.
criteria : callable, optional
Callable indicating whether to recurse over members of the provided
object. If not provided, names not defined in the object's ``__all__``
property are ignored.
Yields
------
doctest.DocTest
The next doctest found in the object.
"""
if finder is None:
finder = doctest.DocTestFinder()
if criteria is None:
criteria = _name_in_all
for docstring in finder.find(obj):
if docstring.examples:
yield docstring
for name, member in inspect.getmembers(obj):
# Only recurse over members matching the criteria
if not criteria(obj, name):
continue
# Recurse over the public API of modules (objects defined in the
# module's __all__)
if inspect.ismodule(member):
yield from _find_doctests_in_obj(
member, finder, criteria=_name_in_all
)
# Recurse over the public API of classes (attributes not prefixed with
# an underscore)
if inspect.isclass(member):
yield from _find_doctests_in_obj(
member, finder, criteria=_is_public_name
)
# doctest finder seems to dislike cython functions, since
# `inspect.isfunction` doesn't return true for them. hack around this
if callable(member) and not inspect.isfunction(member):
for docstring in finder.find(member):
if docstring.examples:
yield docstring
# since the root pylibraft module doesn't import submodules (or define an
# __all__) we are explicitly adding all the submodules we want to run
# doctests for here
DOC_STRINGS = list(_find_doctests_in_obj(pylibraft.cluster))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.common))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.distance))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.matrix.select_k))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.neighbors))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.neighbors.brute_force))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.neighbors.cagra))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.neighbors.ivf_flat))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.neighbors.ivf_pq))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.neighbors.refine))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.random))
@pytest.mark.parametrize(
"docstring",
DOC_STRINGS,
ids=lambda docstring: docstring.name,
)
def test_docstring(docstring):
# We ignore differences in whitespace in the doctest output, and enable
# the use of an ellipsis "..." to match any string in the doctest
# output. An ellipsis is useful for, e.g., memory addresses or
# imprecise floating point values.
optionflags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
runner = doctest.DocTestRunner(optionflags=optionflags)
# Capture stdout and include failing outputs in the traceback.
doctest_stdout = io.StringIO()
with contextlib.redirect_stdout(doctest_stdout):
runner.run(docstring)
results = runner.summarize()
assert not results.failed, (
f"{results.failed} of {results.attempted} doctests failed for "
f"{docstring.name}:\n{doctest_stdout.getvalue()}"
)
| 0 |
rapidsai_public_repos/cuvs/python/cuvs/cuvs | rapidsai_public_repos/cuvs/python/cuvs/cuvs/test/test_kmeans.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from pylibraft.cluster.kmeans import (
KMeansParams,
cluster_cost,
compute_new_centroids,
fit,
init_plus_plus,
)
from pylibraft.common import DeviceResources, device_ndarray
from pylibraft.distance import pairwise_distance
@pytest.mark.parametrize("n_rows", [100])
@pytest.mark.parametrize("n_cols", [5, 25])
@pytest.mark.parametrize("n_clusters", [5, 15])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_kmeans_fit(n_rows, n_cols, n_clusters, dtype):
# generate some random input points / centroids
X_host = np.random.random_sample((n_rows, n_cols)).astype(dtype)
centroids = device_ndarray(X_host[:n_clusters])
X = device_ndarray(X_host)
# compute the inertia, before fitting centroids
original_inertia = cluster_cost(X, centroids)
params = KMeansParams(n_clusters=n_clusters, seed=42)
# fit the centroids, make sure inertia has gone down
# TODO: once we have make_blobs exposed to python
# (https://github.com/rapidsai/raft/issues/1059)
# we should use that to test out the kmeans fit, like the C++
# tests do right now
centroids, inertia, n_iter = fit(params, X, centroids)
assert inertia < original_inertia
assert n_iter >= 1
assert np.allclose(cluster_cost(X, centroids), inertia, rtol=1e-6)
@pytest.mark.parametrize("n_rows", [100])
@pytest.mark.parametrize("n_cols", [5, 25])
@pytest.mark.parametrize("n_clusters", [5, 15])
@pytest.mark.parametrize("metric", ["euclidean", "sqeuclidean"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("additional_args", [True, False])
def test_compute_new_centroids(
n_rows, n_cols, metric, n_clusters, dtype, additional_args
):
# A single RAFT handle can optionally be reused across
# pylibraft functions.
handle = DeviceResources()
X = np.random.random_sample((n_rows, n_cols)).astype(dtype)
X_device = device_ndarray(X)
centroids = X[:n_clusters]
centroids_device = device_ndarray(centroids)
weight_per_cluster = np.zeros((n_clusters,), dtype=dtype)
weight_per_cluster_device = (
device_ndarray(weight_per_cluster) if additional_args else None
)
new_centroids = np.zeros((n_clusters, n_cols), dtype=dtype)
new_centroids_device = device_ndarray(new_centroids)
sample_weights = np.ones((n_rows,)).astype(dtype) / n_rows
sample_weights_device = (
device_ndarray(sample_weights) if additional_args else None
)
# Compute new centroids naively
dists = np.zeros((n_rows, n_clusters), dtype=dtype)
dists_device = device_ndarray(dists)
pairwise_distance(X_device, centroids_device, dists_device, metric=metric)
handle.sync()
labels = np.argmin(dists_device.copy_to_host(), axis=1).astype(np.int32)
labels_device = device_ndarray(labels)
expected_centers = np.empty((n_clusters, n_cols), dtype=dtype)
expected_wX = X * sample_weights.reshape((-1, 1))
for i in range(n_clusters):
j = expected_wX[labels == i]
j = j.sum(axis=0)
g = sample_weights[labels == i].sum()
expected_centers[i, :] = j / g
compute_new_centroids(
X_device,
centroids_device,
labels_device,
new_centroids_device,
sample_weights=sample_weights_device,
weight_per_cluster=weight_per_cluster_device,
handle=handle,
)
# pylibraft functions are often asynchronous so the
# handle needs to be explicitly synchronized
handle.sync()
actual_centers = new_centroids_device.copy_to_host()
assert np.allclose(expected_centers, actual_centers, rtol=1e-6)
@pytest.mark.parametrize("n_rows", [100])
@pytest.mark.parametrize("n_cols", [5, 25])
@pytest.mark.parametrize("n_clusters", [4, 15])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_cluster_cost(n_rows, n_cols, n_clusters, dtype):
X = np.random.random_sample((n_rows, n_cols)).astype(dtype)
X_device = device_ndarray(X)
centroids = X[:n_clusters]
centroids_device = device_ndarray(centroids)
inertia = cluster_cost(X_device, centroids_device)
# compute the nearest centroid to each sample
distances = pairwise_distance(
X_device, centroids_device, metric="sqeuclidean"
).copy_to_host()
cluster_ids = np.argmin(distances, axis=1)
cluster_distances = np.take_along_axis(
distances, cluster_ids[:, None], axis=1
)
# need reduced tolerance for float32
tol = 1e-3 if dtype == np.float32 else 1e-6
assert np.allclose(inertia, sum(cluster_distances), rtol=tol, atol=tol)
@pytest.mark.parametrize("n_rows", [100])
@pytest.mark.parametrize("n_cols", [5, 25])
@pytest.mark.parametrize("n_clusters", [4, 15])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_init_plus_plus(n_rows, n_cols, n_clusters, dtype):
X = np.random.random_sample((n_rows, n_cols)).astype(dtype)
X_device = device_ndarray(X)
centroids = init_plus_plus(X_device, n_clusters, seed=1)
centroids_ = centroids.copy_to_host()
assert centroids_.shape == (n_clusters, X.shape[1])
# Centroids are selected from the existing points
for centroid in centroids_:
assert (centroid == X).all(axis=1).any()
@pytest.mark.parametrize("n_rows", [100])
@pytest.mark.parametrize("n_cols", [5, 25])
@pytest.mark.parametrize("n_clusters", [4, 15])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_init_plus_plus_preallocated_output(n_rows, n_cols, n_clusters, dtype):
X = np.random.random_sample((n_rows, n_cols)).astype(dtype)
X_device = device_ndarray(X)
centroids = device_ndarray.empty((n_clusters, n_cols), dtype=dtype)
new_centroids = init_plus_plus(X_device, centroids=centroids, seed=1)
new_centroids_ = new_centroids.copy_to_host()
# The shape should not have changed
assert new_centroids_.shape == centroids.shape
# Centroids are selected from the existing points
for centroid in new_centroids_:
assert (centroid == X).all(axis=1).any()
def test_init_plus_plus_exclusive_arguments():
# Check an exception is raised when n_clusters and centroids shape
# are inconsistent.
X = np.random.random_sample((10, 5)).astype(np.float64)
X = device_ndarray(X)
n_clusters = 3
centroids = np.random.random_sample((n_clusters + 1, 5)).astype(np.float64)
centroids = device_ndarray(centroids)
with pytest.raises(
RuntimeError, match="Parameters 'n_clusters' and 'centroids'"
):
init_plus_plus(X, n_clusters, centroids=centroids)
| 0 |
rapidsai_public_repos/cuvs/python/cuvs/cuvs | rapidsai_public_repos/cuvs/python/cuvs/cuvs/test/test_ivf_pq.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# h ttp://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from pylibraft.common import device_ndarray
from pylibraft.neighbors import ivf_pq
from sklearn.metrics import pairwise_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
def generate_data(shape, dtype):
if dtype == np.byte:
x = np.random.randint(-127, 128, size=shape, dtype=np.byte)
elif dtype == np.ubyte:
x = np.random.randint(0, 255, size=shape, dtype=np.ubyte)
else:
x = np.random.random_sample(shape).astype(dtype)
return x
def calc_recall(ann_idx, true_nn_idx):
assert ann_idx.shape == true_nn_idx.shape
n = 0
for i in range(ann_idx.shape[0]):
n += np.intersect1d(ann_idx[i, :], true_nn_idx[i, :]).size
recall = n / ann_idx.size
return recall
def check_distances(dataset, queries, metric, out_idx, out_dist, eps=None):
"""
Calculate the real distance between queries and dataset[out_idx],
and compare it to out_dist.
"""
if eps is None:
# Quantization leads to errors in the distance calculation.
# The aim of this test is not to test precision, but to catch obvious
# errors.
eps = 0.1
dist = np.empty(out_dist.shape, out_dist.dtype)
for i in range(queries.shape[0]):
X = queries[np.newaxis, i, :]
Y = dataset[out_idx[i, :], :]
if metric == "sqeuclidean":
dist[i, :] = pairwise_distances(X, Y, "sqeuclidean")
elif metric == "euclidean":
dist[i, :] = pairwise_distances(X, Y, "euclidean")
elif metric == "inner_product":
dist[i, :] = np.matmul(X, Y.T)
else:
raise ValueError("Invalid metric")
dist_eps = abs(dist)
dist_eps[dist < 1e-3] = 1e-3
diff = abs(out_dist - dist) / dist_eps
assert np.mean(diff) < eps
def run_ivf_pq_build_search_test(
n_rows,
n_cols,
n_queries,
k,
n_lists,
metric,
dtype,
pq_bits=8,
pq_dim=0,
codebook_kind="subspace",
add_data_on_build="True",
n_probes=100,
lut_dtype=np.float32,
internal_distance_dtype=np.float32,
force_random_rotation=False,
kmeans_trainset_fraction=1,
kmeans_n_iters=20,
compare=True,
inplace=True,
array_type="device",
):
dataset = generate_data((n_rows, n_cols), dtype)
if metric == "inner_product":
dataset = normalize(dataset, norm="l2", axis=1)
dataset_device = device_ndarray(dataset)
build_params = ivf_pq.IndexParams(
n_lists=n_lists,
metric=metric,
kmeans_n_iters=kmeans_n_iters,
kmeans_trainset_fraction=kmeans_trainset_fraction,
pq_bits=pq_bits,
pq_dim=pq_dim,
codebook_kind=codebook_kind,
force_random_rotation=force_random_rotation,
add_data_on_build=add_data_on_build,
)
if array_type == "device":
index = ivf_pq.build(build_params, dataset_device)
else:
index = ivf_pq.build(build_params, dataset)
assert index.trained
if pq_dim != 0:
assert index.pq_dim == build_params.pq_dim
assert index.pq_bits == build_params.pq_bits
assert index.metric == build_params.metric
assert index.n_lists == build_params.n_lists
if not add_data_on_build:
dataset_1 = dataset[: n_rows // 2, :]
dataset_2 = dataset[n_rows // 2 :, :]
indices_1 = np.arange(n_rows // 2, dtype=np.int64)
indices_2 = np.arange(n_rows // 2, n_rows, dtype=np.int64)
if array_type == "device":
dataset_1_device = device_ndarray(dataset_1)
dataset_2_device = device_ndarray(dataset_2)
indices_1_device = device_ndarray(indices_1)
indices_2_device = device_ndarray(indices_2)
index = ivf_pq.extend(index, dataset_1_device, indices_1_device)
index = ivf_pq.extend(index, dataset_2_device, indices_2_device)
else:
index = ivf_pq.extend(index, dataset_1, indices_1)
index = ivf_pq.extend(index, dataset_2, indices_2)
assert index.size >= n_rows
queries = generate_data((n_queries, n_cols), dtype)
out_idx = np.zeros((n_queries, k), dtype=np.int64)
out_dist = np.zeros((n_queries, k), dtype=np.float32)
queries_device = device_ndarray(queries)
out_idx_device = device_ndarray(out_idx) if inplace else None
out_dist_device = device_ndarray(out_dist) if inplace else None
search_params = ivf_pq.SearchParams(
n_probes=n_probes,
lut_dtype=lut_dtype,
internal_distance_dtype=internal_distance_dtype,
)
ret_output = ivf_pq.search(
search_params,
index,
queries_device,
k,
neighbors=out_idx_device,
distances=out_dist_device,
)
if not inplace:
out_dist_device, out_idx_device = ret_output
if not compare:
return
out_idx = out_idx_device.copy_to_host()
out_dist = out_dist_device.copy_to_host()
# Calculate reference values with sklearn
skl_metric = {
"sqeuclidean": "sqeuclidean",
"inner_product": "cosine",
"euclidean": "euclidean",
}[metric]
nn_skl = NearestNeighbors(
n_neighbors=k, algorithm="brute", metric=skl_metric
)
nn_skl.fit(dataset)
skl_idx = nn_skl.kneighbors(queries, return_distance=False)
recall = calc_recall(out_idx, skl_idx)
assert recall > 0.7
check_distances(dataset, queries, metric, out_idx, out_dist)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("n_rows", [10000])
@pytest.mark.parametrize("n_cols", [10])
@pytest.mark.parametrize("n_queries", [100])
@pytest.mark.parametrize("n_lists", [100])
@pytest.mark.parametrize("dtype", [np.float32, np.int8, np.uint8])
@pytest.mark.parametrize("array_type", ["host", "device"])
def test_ivf_pq_dtypes(
n_rows, n_cols, n_queries, n_lists, dtype, inplace, array_type
):
# Note that inner_product tests use normalized input which we cannot
# represent in int8, therefore we test only sqeuclidean metric here.
run_ivf_pq_build_search_test(
n_rows=n_rows,
n_cols=n_cols,
n_queries=n_queries,
k=10,
n_lists=n_lists,
metric="sqeuclidean",
dtype=dtype,
inplace=inplace,
array_type=array_type,
)
@pytest.mark.parametrize(
"params",
[
pytest.param(
{
"n_rows": 0,
"n_cols": 10,
"n_queries": 10,
"k": 1,
"n_lists": 10,
},
marks=pytest.mark.xfail(reason="empty dataset"),
),
{"n_rows": 1, "n_cols": 10, "n_queries": 10, "k": 1, "n_lists": 1},
{"n_rows": 10, "n_cols": 1, "n_queries": 10, "k": 10, "n_lists": 10},
# {"n_rows": 999, "n_cols": 42, "n_queries": 453, "k": 137,
# "n_lists": 53},
],
)
def test_ivf_pq_n(params):
# We do not test recall, just confirm that we can handle edge cases for
# certain parameters
run_ivf_pq_build_search_test(
n_rows=params["n_rows"],
n_cols=params["n_cols"],
n_queries=params["n_queries"],
k=params["k"],
n_lists=params["n_lists"],
metric="sqeuclidean",
dtype=np.float32,
compare=False,
)
@pytest.mark.parametrize(
"metric", ["sqeuclidean", "inner_product", "euclidean"]
)
@pytest.mark.parametrize("dtype", [np.float32])
@pytest.mark.parametrize("codebook_kind", ["subspace", "cluster"])
@pytest.mark.parametrize("rotation", [True, False])
def test_ivf_pq_build_params(metric, dtype, codebook_kind, rotation):
run_ivf_pq_build_search_test(
n_rows=10000,
n_cols=10,
n_queries=1000,
k=10,
n_lists=100,
metric=metric,
dtype=dtype,
pq_bits=8,
pq_dim=0,
codebook_kind=codebook_kind,
add_data_on_build=True,
n_probes=100,
force_random_rotation=rotation,
)
@pytest.mark.parametrize(
"params",
[
{"pq_dims": 10, "pq_bits": 8, "n_lists": 100},
{"pq_dims": 16, "pq_bits": 7, "n_lists": 100},
{"pq_dims": 0, "pq_bits": 8, "n_lists": 90},
{
"pq_dims": 0,
"pq_bits": 8,
"n_lists": 100,
"trainset_fraction": 0.9,
"n_iters": 30,
},
],
)
def test_ivf_pq_params(params):
run_ivf_pq_build_search_test(
n_rows=10000,
n_cols=16,
n_queries=1000,
k=10,
n_lists=params["n_lists"],
metric="sqeuclidean",
dtype=np.float32,
pq_bits=params["pq_bits"],
pq_dim=params["pq_dims"],
kmeans_trainset_fraction=params.get("trainset_fraction", 1.0),
kmeans_n_iters=params.get("n_iters", 20),
)
@pytest.mark.parametrize(
"params",
[
{
"k": 10,
"n_probes": 100,
"lut": np.float16,
"idd": np.float32,
},
{
"k": 10,
"n_probes": 99,
"lut": np.uint8,
"idd": np.float32,
},
{
"k": 10,
"n_probes": 100,
"lut": np.float16,
"idd": np.float16,
},
{
"k": 129,
"n_probes": 100,
"lut": np.float32,
"idd": np.float32,
},
],
)
def test_ivf_pq_search_params(params):
run_ivf_pq_build_search_test(
n_rows=10000,
n_cols=16,
n_queries=1000,
k=params["k"],
n_lists=100,
n_probes=params["n_probes"],
metric="sqeuclidean",
dtype=np.float32,
lut_dtype=params["lut"],
internal_distance_dtype=params["idd"],
)
@pytest.mark.parametrize("dtype", [np.float32, np.int8, np.uint8])
@pytest.mark.parametrize("array_type", ["host", "device"])
def test_extend(dtype, array_type):
run_ivf_pq_build_search_test(
n_rows=10000,
n_cols=10,
n_queries=100,
k=10,
n_lists=100,
metric="sqeuclidean",
dtype=dtype,
add_data_on_build=False,
array_type=array_type,
)
def test_build_assertions():
with pytest.raises(TypeError):
run_ivf_pq_build_search_test(
n_rows=1000,
n_cols=10,
n_queries=100,
k=10,
n_lists=100,
metric="sqeuclidean",
dtype=np.float64,
)
n_rows = 1000
n_cols = 100
n_queries = 212
k = 10
dataset = generate_data((n_rows, n_cols), np.float32)
dataset_device = device_ndarray(dataset)
index_params = ivf_pq.IndexParams(
n_lists=50,
metric="sqeuclidean",
kmeans_n_iters=20,
kmeans_trainset_fraction=1,
add_data_on_build=False,
)
index = ivf_pq.Index()
queries = generate_data((n_queries, n_cols), np.float32)
out_idx = np.zeros((n_queries, k), dtype=np.int64)
out_dist = np.zeros((n_queries, k), dtype=np.float32)
queries_device = device_ndarray(queries)
out_idx_device = device_ndarray(out_idx)
out_dist_device = device_ndarray(out_dist)
search_params = ivf_pq.SearchParams(n_probes=50)
with pytest.raises(ValueError):
# Index must be built before search
ivf_pq.search(
search_params,
index,
queries_device,
k,
out_idx_device,
out_dist_device,
)
index = ivf_pq.build(index_params, dataset_device)
assert index.trained
indices = np.arange(n_rows + 1, dtype=np.int64)
indices_device = device_ndarray(indices)
with pytest.raises(ValueError):
# Dataset dimension mismatch
ivf_pq.extend(index, queries_device, indices_device)
with pytest.raises(ValueError):
# indices dimension mismatch
ivf_pq.extend(index, dataset_device, indices_device)
@pytest.mark.parametrize(
"params",
[
{"q_dt": np.float64},
{"q_order": "F"},
{"q_cols": 101},
{"idx_dt": np.uint32},
{"idx_order": "F"},
{"idx_rows": 42},
{"idx_cols": 137},
{"dist_dt": np.float64},
{"dist_order": "F"},
{"dist_rows": 42},
{"dist_cols": 137},
],
)
def test_search_inputs(params):
"""Test with invalid input dtype, order, or dimension."""
n_rows = 1000
n_cols = 100
n_queries = 256
k = 10
dtype = np.float32
q_dt = params.get("q_dt", np.float32)
q_order = params.get("q_order", "C")
queries = generate_data(
(n_queries, params.get("q_cols", n_cols)), q_dt
).astype(q_dt, order=q_order)
queries_device = device_ndarray(queries)
idx_dt = params.get("idx_dt", np.int64)
idx_order = params.get("idx_order", "C")
out_idx = np.zeros(
(params.get("idx_rows", n_queries), params.get("idx_cols", k)),
dtype=idx_dt,
order=idx_order,
)
out_idx_device = device_ndarray(out_idx)
dist_dt = params.get("dist_dt", np.float32)
dist_order = params.get("dist_order", "C")
out_dist = np.zeros(
(params.get("dist_rows", n_queries), params.get("dist_cols", k)),
dtype=dist_dt,
order=dist_order,
)
out_dist_device = device_ndarray(out_dist)
index_params = ivf_pq.IndexParams(
n_lists=50, metric="sqeuclidean", add_data_on_build=True
)
dataset = generate_data((n_rows, n_cols), dtype)
dataset_device = device_ndarray(dataset)
index = ivf_pq.build(index_params, dataset_device)
assert index.trained
with pytest.raises(Exception):
search_params = ivf_pq.SearchParams(n_probes=50)
ivf_pq.search(
search_params,
index,
queries_device,
k,
out_idx_device,
out_dist_device,
)
def test_save_load():
n_rows = 10000
n_cols = 50
n_queries = 1000
dtype = np.float32
dataset = generate_data((n_rows, n_cols), dtype)
dataset_device = device_ndarray(dataset)
build_params = ivf_pq.IndexParams(n_lists=100, metric="sqeuclidean")
index = ivf_pq.build(build_params, dataset_device)
assert index.trained
filename = "my_index.bin"
ivf_pq.save(filename, index)
loaded_index = ivf_pq.load(filename)
assert index.pq_dim == loaded_index.pq_dim
assert index.pq_bits == loaded_index.pq_bits
assert index.metric == loaded_index.metric
assert index.n_lists == loaded_index.n_lists
assert index.size == loaded_index.size
queries = generate_data((n_queries, n_cols), dtype)
queries_device = device_ndarray(queries)
search_params = ivf_pq.SearchParams(n_probes=100)
k = 10
distance_dev, neighbors_dev = ivf_pq.search(
search_params, index, queries_device, k
)
neighbors = neighbors_dev.copy_to_host()
dist = distance_dev.copy_to_host()
del index
distance_dev, neighbors_dev = ivf_pq.search(
search_params, loaded_index, queries_device, k
)
neighbors2 = neighbors_dev.copy_to_host()
dist2 = distance_dev.copy_to_host()
assert np.all(neighbors == neighbors2)
assert np.allclose(dist, dist2, rtol=1e-6)
| 0 |
rapidsai_public_repos/cuvs/python/cuvs/cuvs | rapidsai_public_repos/cuvs/python/cuvs/cuvs/test/test_distance.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from pylibraft.common import DeviceResources, Stream, device_ndarray
from pylibraft.distance import pairwise_distance
from scipy.spatial.distance import cdist
@pytest.mark.parametrize("n_rows", [50, 100])
@pytest.mark.parametrize("n_cols", [10, 50])
@pytest.mark.parametrize(
"metric",
[
"euclidean",
"cityblock",
"chebyshev",
"canberra",
"correlation",
"hamming",
"jensenshannon",
"russellrao",
"cosine",
"sqeuclidean",
"inner_product",
],
)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("order", ["F", "C"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_distance(n_rows, n_cols, inplace, metric, order, dtype):
input1 = np.random.random_sample((n_rows, n_cols))
input1 = np.asarray(input1, order=order).astype(dtype)
# RussellRao expects boolean arrays
if metric == "russellrao":
input1[input1 < 0.5] = 0
input1[input1 >= 0.5] = 1
# JensenShannon expects probability arrays
elif metric == "jensenshannon":
norm = np.sum(input1, axis=1)
input1 = (input1.T / norm).T
output = np.zeros((n_rows, n_rows), dtype=dtype)
if metric == "inner_product":
expected = np.matmul(input1, input1.T)
else:
expected = cdist(input1, input1, metric)
input1_device = device_ndarray(input1)
output_device = device_ndarray(output) if inplace else None
s2 = Stream()
handle = DeviceResources(stream=s2)
ret_output = pairwise_distance(
input1_device, input1_device, output_device, metric, handle=handle
)
handle.sync()
output_device = ret_output if not inplace else output_device
actual = output_device.copy_to_host()
assert np.allclose(expected, actual, atol=1e-3, rtol=1e-3)
| 0 |
rapidsai_public_repos/cuvs/python/cuvs/cuvs | rapidsai_public_repos/cuvs/python/cuvs/cuvs/test/test_brute_force.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from pylibraft.common import DeviceResources, Stream, device_ndarray
from pylibraft.neighbors.brute_force import knn
from scipy.spatial.distance import cdist
@pytest.mark.parametrize("n_index_rows", [32, 100])
@pytest.mark.parametrize("n_query_rows", [32, 100])
@pytest.mark.parametrize("n_cols", [40, 100])
@pytest.mark.parametrize("k", [1, 5, 32])
@pytest.mark.parametrize(
"metric",
[
"euclidean",
"cityblock",
"chebyshev",
"canberra",
"correlation",
"russellrao",
"cosine",
"sqeuclidean",
# "inner_product",
],
)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("dtype", [np.float32])
def test_knn(n_index_rows, n_query_rows, n_cols, k, inplace, metric, dtype):
index = np.random.random_sample((n_index_rows, n_cols)).astype(dtype)
queries = np.random.random_sample((n_query_rows, n_cols)).astype(dtype)
# RussellRao expects boolean arrays
if metric == "russellrao":
index[index < 0.5] = 0.0
index[index >= 0.5] = 1.0
queries[queries < 0.5] = 0.0
queries[queries >= 0.5] = 1.0
indices = np.zeros((n_query_rows, k), dtype="int64")
distances = np.zeros((n_query_rows, k), dtype=dtype)
index_device = device_ndarray(index)
queries_device = device_ndarray(queries)
indices_device = device_ndarray(indices)
distances_device = device_ndarray(distances)
s2 = Stream()
handle = DeviceResources(stream=s2)
ret_distances, ret_indices = knn(
index_device,
queries_device,
k,
indices=indices_device,
distances=distances_device,
metric=metric,
handle=handle,
)
handle.sync()
pw_dists = cdist(queries, index, metric=metric)
distances_device = ret_distances if not inplace else distances_device
actual_distances = distances_device.copy_to_host()
actual_distances[actual_distances <= 1e-5] = 0.0
argsort = np.argsort(pw_dists, axis=1)
for i in range(pw_dists.shape[0]):
expected_indices = argsort[i]
gpu_dists = actual_distances[i]
cpu_ordered = pw_dists[i, expected_indices]
np.testing.assert_allclose(
cpu_ordered[:k], gpu_dists, atol=1e-3, rtol=1e-3
)
def test_knn_check_col_major_inputs():
# make sure that we get an exception if passed col-major inputs,
# instead of returning incorrect results
cp = pytest.importorskip("cupy")
n_index_rows, n_query_rows, n_cols = 128, 16, 32
index = cp.random.random_sample((n_index_rows, n_cols), dtype="float32")
queries = cp.random.random_sample((n_query_rows, n_cols), dtype="float32")
with pytest.raises(ValueError):
knn(cp.asarray(index, order="F"), queries, k=4)
with pytest.raises(ValueError):
knn(index, cp.asarray(queries, order="F"), k=4)
# shouldn't throw an exception with c-contiguous inputs
knn(index, queries, k=4)
| 0 |
rapidsai_public_repos/cuvs/python/cuvs/cuvs | rapidsai_public_repos/cuvs/python/cuvs/cuvs/test/test_ivf_flat.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# h ttp://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from pylibraft.common import device_ndarray
from pylibraft.neighbors import ivf_flat
from sklearn.metrics import pairwise_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
def generate_data(shape, dtype):
if dtype == np.byte:
x = np.random.randint(-127, 128, size=shape, dtype=np.byte)
elif dtype == np.ubyte:
x = np.random.randint(0, 255, size=shape, dtype=np.ubyte)
else:
x = np.random.random_sample(shape).astype(dtype)
return x
def calc_recall(ann_idx, true_nn_idx):
assert ann_idx.shape == true_nn_idx.shape
n = 0
for i in range(ann_idx.shape[0]):
n += np.intersect1d(ann_idx[i, :], true_nn_idx[i, :]).size
recall = n / ann_idx.size
return recall
def check_distances(dataset, queries, metric, out_idx, out_dist, eps=None):
"""
Calculate the real distance between queries and dataset[out_idx],
and compare it to out_dist.
"""
if eps is None:
# Quantization leads to errors in the distance calculation.
# The aim of this test is not to test precision, but to catch obvious
# errors.
eps = 0.1
dist = np.empty(out_dist.shape, out_dist.dtype)
for i in range(queries.shape[0]):
X = queries[np.newaxis, i, :]
Y = dataset[out_idx[i, :], :]
if metric == "sqeuclidean":
dist[i, :] = pairwise_distances(X, Y, "sqeuclidean")
elif metric == "euclidean":
dist[i, :] = pairwise_distances(X, Y, "euclidean")
elif metric == "inner_product":
dist[i, :] = np.matmul(X, Y.T)
else:
raise ValueError("Invalid metric")
dist_eps = abs(dist)
dist_eps[dist < 1e-3] = 1e-3
diff = abs(out_dist - dist) / dist_eps
assert np.mean(diff) < eps
def run_ivf_flat_build_search_test(
n_rows,
n_cols,
n_queries,
k,
n_lists,
metric,
dtype,
add_data_on_build=True,
n_probes=100,
kmeans_trainset_fraction=1,
kmeans_n_iters=20,
compare=True,
inplace=True,
array_type="device",
):
dataset = generate_data((n_rows, n_cols), dtype)
if metric == "inner_product":
dataset = normalize(dataset, norm="l2", axis=1)
dataset_device = device_ndarray(dataset)
build_params = ivf_flat.IndexParams(
n_lists=n_lists,
metric=metric,
kmeans_n_iters=kmeans_n_iters,
kmeans_trainset_fraction=kmeans_trainset_fraction,
add_data_on_build=add_data_on_build,
)
if array_type == "device":
index = ivf_flat.build(build_params, dataset_device)
else:
index = ivf_flat.build(build_params, dataset)
assert index.trained
assert index.metric == build_params.metric
assert index.n_lists == build_params.n_lists
if not add_data_on_build:
dataset_1 = dataset[: n_rows // 2, :]
dataset_2 = dataset[n_rows // 2 :, :]
indices_1 = np.arange(n_rows // 2, dtype=np.int64)
indices_2 = np.arange(n_rows // 2, n_rows, dtype=np.int64)
if array_type == "device":
dataset_1_device = device_ndarray(dataset_1)
dataset_2_device = device_ndarray(dataset_2)
indices_1_device = device_ndarray(indices_1)
indices_2_device = device_ndarray(indices_2)
index = ivf_flat.extend(index, dataset_1_device, indices_1_device)
index = ivf_flat.extend(index, dataset_2_device, indices_2_device)
else:
index = ivf_flat.extend(index, dataset_1, indices_1)
index = ivf_flat.extend(index, dataset_2, indices_2)
assert index.size >= n_rows
queries = generate_data((n_queries, n_cols), dtype)
out_idx = np.zeros((n_queries, k), dtype=np.int64)
out_dist = np.zeros((n_queries, k), dtype=np.float32)
queries_device = device_ndarray(queries)
out_idx_device = device_ndarray(out_idx) if inplace else None
out_dist_device = device_ndarray(out_dist) if inplace else None
search_params = ivf_flat.SearchParams(n_probes=n_probes)
ret_output = ivf_flat.search(
search_params,
index,
queries_device,
k,
neighbors=out_idx_device,
distances=out_dist_device,
)
if not inplace:
out_dist_device, out_idx_device = ret_output
if not compare:
return
out_idx = out_idx_device.copy_to_host()
out_dist = out_dist_device.copy_to_host()
# Calculate reference values with sklearn
skl_metric = {
"sqeuclidean": "sqeuclidean",
"inner_product": "cosine",
"euclidean": "euclidean",
}[metric]
nn_skl = NearestNeighbors(
n_neighbors=k, algorithm="brute", metric=skl_metric
)
nn_skl.fit(dataset)
skl_idx = nn_skl.kneighbors(queries, return_distance=False)
recall = calc_recall(out_idx, skl_idx)
assert recall > 0.7
check_distances(dataset, queries, metric, out_idx, out_dist)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("n_rows", [10000])
@pytest.mark.parametrize("n_cols", [10])
@pytest.mark.parametrize("n_queries", [100])
@pytest.mark.parametrize("n_lists", [100])
@pytest.mark.parametrize("dtype", [np.float32, np.int8, np.uint8])
@pytest.mark.parametrize("array_type", ["device"])
def test_ivf_pq_dtypes(
n_rows, n_cols, n_queries, n_lists, dtype, inplace, array_type
):
# Note that inner_product tests use normalized input which we cannot
# represent in int8, therefore we test only sqeuclidean metric here.
run_ivf_flat_build_search_test(
n_rows=n_rows,
n_cols=n_cols,
n_queries=n_queries,
k=10,
n_lists=n_lists,
metric="sqeuclidean",
dtype=dtype,
inplace=inplace,
array_type=array_type,
)
@pytest.mark.parametrize(
"params",
[
pytest.param(
{
"n_rows": 0,
"n_cols": 10,
"n_queries": 10,
"k": 1,
"n_lists": 10,
},
marks=pytest.mark.xfail(reason="empty dataset"),
),
{"n_rows": 1, "n_cols": 10, "n_queries": 10, "k": 1, "n_lists": 1},
{"n_rows": 10, "n_cols": 1, "n_queries": 10, "k": 10, "n_lists": 10},
# {"n_rows": 999, "n_cols": 42, "n_queries": 453, "k": 137,
# "n_lists": 53},
],
)
def test_ivf_flat_n(params):
# We do not test recall, just confirm that we can handle edge cases for
# certain parameters
run_ivf_flat_build_search_test(
n_rows=params["n_rows"],
n_cols=params["n_cols"],
n_queries=params["n_queries"],
k=params["k"],
n_lists=params["n_lists"],
metric="sqeuclidean",
dtype=np.float32,
compare=False,
)
@pytest.mark.parametrize(
"metric", ["sqeuclidean", "inner_product", "euclidean"]
)
@pytest.mark.parametrize("dtype", [np.float32])
def test_ivf_flat_build_params(metric, dtype):
run_ivf_flat_build_search_test(
n_rows=10000,
n_cols=10,
n_queries=1000,
k=10,
n_lists=100,
metric=metric,
dtype=dtype,
add_data_on_build=True,
n_probes=100,
)
@pytest.mark.parametrize(
"params",
[
{
"n_lists": 100,
"trainset_fraction": 0.9,
"n_iters": 30,
},
],
)
def test_ivf_flat_params(params):
run_ivf_flat_build_search_test(
n_rows=10000,
n_cols=16,
n_queries=1000,
k=10,
n_lists=params["n_lists"],
metric="sqeuclidean",
dtype=np.float32,
kmeans_trainset_fraction=params.get("trainset_fraction", 1.0),
kmeans_n_iters=params.get("n_iters", 20),
)
@pytest.mark.parametrize(
"params",
[
{
"k": 10,
"n_probes": 100,
},
{
"k": 10,
"n_probes": 99,
},
{
"k": 10,
"n_probes": 100,
},
{
"k": 129,
"n_probes": 100,
},
],
)
def test_ivf_pq_search_params(params):
run_ivf_flat_build_search_test(
n_rows=10000,
n_cols=16,
n_queries=1000,
k=params["k"],
n_lists=100,
n_probes=params["n_probes"],
metric="sqeuclidean",
dtype=np.float32,
)
@pytest.mark.parametrize("dtype", [np.float32, np.int8, np.uint8])
@pytest.mark.parametrize("array_type", ["device"])
def test_extend(dtype, array_type):
run_ivf_flat_build_search_test(
n_rows=10000,
n_cols=10,
n_queries=100,
k=10,
n_lists=100,
metric="sqeuclidean",
dtype=dtype,
add_data_on_build=False,
array_type=array_type,
)
def test_build_assertions():
with pytest.raises(TypeError):
run_ivf_flat_build_search_test(
n_rows=1000,
n_cols=10,
n_queries=100,
k=10,
n_lists=100,
metric="sqeuclidean",
dtype=np.float64,
)
n_rows = 1000
n_cols = 100
n_queries = 212
k = 10
dataset = generate_data((n_rows, n_cols), np.float32)
dataset_device = device_ndarray(dataset)
index_params = ivf_flat.IndexParams(
n_lists=50,
metric="sqeuclidean",
kmeans_n_iters=20,
kmeans_trainset_fraction=1,
add_data_on_build=False,
)
index = ivf_flat.Index()
queries = generate_data((n_queries, n_cols), np.float32)
out_idx = np.zeros((n_queries, k), dtype=np.int64)
out_dist = np.zeros((n_queries, k), dtype=np.float32)
queries_device = device_ndarray(queries)
out_idx_device = device_ndarray(out_idx)
out_dist_device = device_ndarray(out_dist)
search_params = ivf_flat.SearchParams(n_probes=50)
with pytest.raises(ValueError):
# Index must be built before search
ivf_flat.search(
search_params,
index,
queries_device,
k,
out_idx_device,
out_dist_device,
)
index = ivf_flat.build(index_params, dataset_device)
assert index.trained
indices = np.arange(n_rows + 1, dtype=np.int64)
indices_device = device_ndarray(indices)
with pytest.raises(ValueError):
# Dataset dimension mismatch
ivf_flat.extend(index, queries_device, indices_device)
with pytest.raises(ValueError):
# indices dimension mismatch
ivf_flat.extend(index, dataset_device, indices_device)
@pytest.mark.parametrize(
"params",
[
{"q_dt": np.float64},
{"q_order": "F"},
{"q_cols": 101},
{"idx_dt": np.uint32},
{"idx_order": "F"},
{"idx_rows": 42},
{"idx_cols": 137},
{"dist_dt": np.float64},
{"dist_order": "F"},
{"dist_rows": 42},
{"dist_cols": 137},
],
)
def test_search_inputs(params):
"""Test with invalid input dtype, order, or dimension."""
n_rows = 1000
n_cols = 100
n_queries = 256
k = 10
dtype = np.float32
q_dt = params.get("q_dt", np.float32)
q_order = params.get("q_order", "C")
queries = generate_data(
(n_queries, params.get("q_cols", n_cols)), q_dt
).astype(q_dt, order=q_order)
queries_device = device_ndarray(queries)
idx_dt = params.get("idx_dt", np.int64)
idx_order = params.get("idx_order", "C")
out_idx = np.zeros(
(params.get("idx_rows", n_queries), params.get("idx_cols", k)),
dtype=idx_dt,
order=idx_order,
)
out_idx_device = device_ndarray(out_idx)
dist_dt = params.get("dist_dt", np.float32)
dist_order = params.get("dist_order", "C")
out_dist = np.zeros(
(params.get("dist_rows", n_queries), params.get("dist_cols", k)),
dtype=dist_dt,
order=dist_order,
)
out_dist_device = device_ndarray(out_dist)
index_params = ivf_flat.IndexParams(
n_lists=50, metric="sqeuclidean", add_data_on_build=True
)
dataset = generate_data((n_rows, n_cols), dtype)
dataset_device = device_ndarray(dataset)
index = ivf_flat.build(index_params, dataset_device)
assert index.trained
with pytest.raises(Exception):
search_params = ivf_flat.SearchParams(n_probes=50)
ivf_flat.search(
search_params,
index,
queries_device,
k,
out_idx_device,
out_dist_device,
)
@pytest.mark.parametrize("dtype", [np.float32, np.int8, np.ubyte])
def test_save_load(dtype):
n_rows = 10000
n_cols = 50
n_queries = 1000
dataset = generate_data((n_rows, n_cols), dtype)
dataset_device = device_ndarray(dataset)
build_params = ivf_flat.IndexParams(n_lists=100, metric="sqeuclidean")
index = ivf_flat.build(build_params, dataset_device)
assert index.trained
filename = "my_index.bin"
ivf_flat.save(filename, index)
loaded_index = ivf_flat.load(filename)
assert index.metric == loaded_index.metric
assert index.n_lists == loaded_index.n_lists
assert index.dim == loaded_index.dim
assert index.adaptive_centers == loaded_index.adaptive_centers
queries = generate_data((n_queries, n_cols), dtype)
queries_device = device_ndarray(queries)
search_params = ivf_flat.SearchParams(n_probes=100)
k = 10
distance_dev, neighbors_dev = ivf_flat.search(
search_params, index, queries_device, k
)
neighbors = neighbors_dev.copy_to_host()
dist = distance_dev.copy_to_host()
del index
distance_dev, neighbors_dev = ivf_flat.search(
search_params, loaded_index, queries_device, k
)
neighbors2 = neighbors_dev.copy_to_host()
dist2 = distance_dev.copy_to_host()
assert np.all(neighbors == neighbors2)
assert np.allclose(dist, dist2, rtol=1e-6)
| 0 |
rapidsai_public_repos/cuvs/python/cuvs/cuvs | rapidsai_public_repos/cuvs/python/cuvs/cuvs/test/test_fused_l2_argmin.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from pylibraft.common import DeviceResources, device_ndarray
from pylibraft.distance import fused_l2_nn_argmin
from scipy.spatial.distance import cdist
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("n_rows", [10, 100])
@pytest.mark.parametrize("n_clusters", [5, 10])
@pytest.mark.parametrize("n_cols", [3, 5])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_fused_l2_nn_minarg(n_rows, n_cols, n_clusters, dtype, inplace):
input1 = np.random.random_sample((n_rows, n_cols))
input1 = np.asarray(input1, order="C").astype(dtype)
input2 = np.random.random_sample((n_clusters, n_cols))
input2 = np.asarray(input2, order="C").astype(dtype)
output = np.zeros((n_rows), dtype="int32")
expected = cdist(input1, input2, metric="euclidean")
expected = expected.argmin(axis=1)
input1_device = device_ndarray(input1)
input2_device = device_ndarray(input2)
output_device = device_ndarray(output) if inplace else None
handle = DeviceResources()
ret_output = fused_l2_nn_argmin(
input1_device, input2_device, output_device, True, handle=handle
)
handle.sync()
output_device = ret_output if not inplace else output_device
actual = output_device.copy_to_host()
assert np.allclose(expected, actual, rtol=1e-4)
| 0 |
rapidsai_public_repos/cuvs/python/cuvs/cuvs | rapidsai_public_repos/cuvs/python/cuvs/cuvs/cluster/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Set the list of Cython files to build
set(cython_sources kmeans.pyx)
set(linked_libraries cuvs::compiled)
# Build all of the Cython targets
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}" ASSOCIATED_TARGETS cuvs MODULE_PREFIX cluster_
)
| 0 |
rapidsai_public_repos/cuvs/python/cuvs/cuvs | rapidsai_public_repos/cuvs/python/cuvs/cuvs/cluster/__init__.pxd | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/cuvs/python/cuvs/cuvs | rapidsai_public_repos/cuvs/python/cuvs/cuvs/cluster/__init__.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .kmeans import (
KMeansParams,
cluster_cost,
compute_new_centroids,
fit,
init_plus_plus,
)
__all__ = [
"KMeansParams",
"cluster_cost",
"compute_new_centroids",
"fit",
"init_plus_plus",
]
| 0 |
rapidsai_public_repos/cuvs/python/cuvs/cuvs | rapidsai_public_repos/cuvs/python/cuvs/cuvs/cluster/kmeans.pyx | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import numpy as np
from cython.operator cimport dereference as deref
from libc.stdint cimport uintptr_t
from libcpp cimport nullptr
from collections import namedtuple
from enum import IntEnum
from pylibraft.common import Handle, cai_wrapper, device_ndarray
from pylibraft.common.handle import auto_sync_handle
from pylibraft.common.handle cimport device_resources
from pylibraft.random.cpp.rng_state cimport RngState
from pylibraft.common.input_validation import *
from pylibraft.distance import DISTANCE_TYPES
from pylibraft.cluster.cpp cimport kmeans as cpp_kmeans, kmeans_types
from pylibraft.cluster.cpp.kmeans cimport (
cluster_cost as cpp_cluster_cost,
init_plus_plus as cpp_init_plus_plus,
update_centroids,
)
from pylibraft.common.cpp.mdspan cimport *
from pylibraft.common.cpp.optional cimport optional
from pylibraft.common.handle cimport device_resources
from pylibraft.common import auto_convert_output
@auto_sync_handle
@auto_convert_output
def compute_new_centroids(X,
centroids,
labels,
new_centroids,
sample_weights=None,
weight_per_cluster=None,
handle=None):
"""
Compute new centroids given an input matrix and existing centroids
Parameters
----------
X : Input CUDA array interface compliant matrix shape (m, k)
centroids : Input CUDA array interface compliant matrix shape
(n_clusters, k)
labels : Input CUDA array interface compliant matrix shape
(m, 1)
new_centroids : Writable CUDA array interface compliant matrix shape
(n_clusters, k)
sample_weights : Optional input CUDA array interface compliant matrix shape
(n_clusters, 1) default: None
weight_per_cluster : Optional writable CUDA array interface compliant
matrix shape (n_clusters, 1) default: None
batch_samples : Optional integer specifying the batch size for X to compute
distances in batches. default: m
batch_centroids : Optional integer specifying the batch size for centroids
to compute distances in batches. default: n_clusters
{handle_docstring}
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import Handle
>>> from pylibraft.cluster.kmeans import compute_new_centroids
>>> # A single RAFT handle can optionally be reused across
>>> # pylibraft functions.
>>> handle = Handle()
>>> n_samples = 5000
>>> n_features = 50
>>> n_clusters = 3
>>> X = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> centroids = cp.random.random_sample((n_clusters, n_features),
... dtype=cp.float32)
...
>>> labels = cp.random.randint(0, high=n_clusters, size=n_samples,
... dtype=cp.int32)
>>> new_centroids = cp.empty((n_clusters, n_features),
... dtype=cp.float32)
>>> compute_new_centroids(
... X, centroids, labels, new_centroids, handle=handle
... )
>>> # pylibraft functions are often asynchronous so the
>>> # handle needs to be explicitly synchronized
>>> handle.sync()
"""
x_cai = X.__cuda_array_interface__
centroids_cai = centroids.__cuda_array_interface__
new_centroids_cai = new_centroids.__cuda_array_interface__
labels_cai = labels.__cuda_array_interface__
m = x_cai["shape"][0]
x_k = x_cai["shape"][1]
n_clusters = centroids_cai["shape"][0]
centroids_k = centroids_cai["shape"][1]
new_centroids_k = centroids_cai["shape"][1]
x_dt = np.dtype(x_cai["typestr"])
centroids_dt = np.dtype(centroids_cai["typestr"])
new_centroids_dt = np.dtype(new_centroids_cai["typestr"])
labels_dt = np.dtype(labels_cai["typestr"])
if not do_cols_match(X, centroids):
raise ValueError("X and centroids must have same number of columns.")
if not do_rows_match(X, labels):
raise ValueError("X and labels must have same number of rows")
x_ptr = <uintptr_t>x_cai["data"][0]
centroids_ptr = <uintptr_t>centroids_cai["data"][0]
new_centroids_ptr = <uintptr_t>new_centroids_cai["data"][0]
labels_ptr = <uintptr_t>labels_cai["data"][0]
if sample_weights is not None:
sample_weights_cai = sample_weights.__cuda_array_interface__
sample_weights_ptr = <uintptr_t>sample_weights_cai["data"][0]
sample_weights_dt = np.dtype(sample_weights_cai["typestr"])
else:
sample_weights_ptr = <uintptr_t>nullptr
if weight_per_cluster is not None:
weight_per_cluster_cai = weight_per_cluster.__cuda_array_interface__
weight_per_cluster_ptr = <uintptr_t>weight_per_cluster_cai["data"][0]
weight_per_cluster_dt = np.dtype(weight_per_cluster_cai["typestr"])
else:
weight_per_cluster_ptr = <uintptr_t>nullptr
handle = handle if handle is not None else Handle()
cdef device_resources *h = <device_resources*><size_t>handle.getHandle()
x_c_contiguous = is_c_contiguous(x_cai)
centroids_c_contiguous = is_c_contiguous(centroids_cai)
new_centroids_c_contiguous = is_c_contiguous(new_centroids_cai)
if not x_c_contiguous or not centroids_c_contiguous \
or not new_centroids_c_contiguous:
raise ValueError("Inputs must all be c contiguous")
if not do_dtypes_match(X, centroids, new_centroids):
raise ValueError("Inputs must all have the same dtypes "
"(float32 or float64)")
if x_dt == np.float32:
update_centroids(deref(h),
<float*> x_ptr,
<int> m,
<int> x_k,
<int> n_clusters,
<float*> sample_weights_ptr,
<float*> centroids_ptr,
<int*> labels_ptr,
<float*> new_centroids_ptr,
<float*> weight_per_cluster_ptr)
elif x_dt == np.float64:
update_centroids(deref(h),
<double*> x_ptr,
<int> m,
<int> x_k,
<int> n_clusters,
<double*> sample_weights_ptr,
<double*> centroids_ptr,
<int*> labels_ptr,
<double*> new_centroids_ptr,
<double*> weight_per_cluster_ptr)
else:
raise ValueError("dtype %s not supported" % x_dt)
@auto_sync_handle
@auto_convert_output
def init_plus_plus(X, n_clusters=None, seed=None, handle=None, centroids=None):
"""
Compute initial centroids using the "kmeans++" algorithm.
Parameters
----------
X : Input CUDA array interface compliant matrix shape (m, k)
n_clusters : Number of clusters to select
seed : Controls the random sampling of centroids
centroids : Optional writable CUDA array interface compliant matrix shape
(n_clusters, k). Use instead of passing `n_clusters`.
{handle_docstring}
Examples
--------
>>> import cupy as cp
>>> from pylibraft.cluster.kmeans import init_plus_plus
>>> n_samples = 5000
>>> n_features = 50
>>> n_clusters = 3
>>> X = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> centroids = init_plus_plus(X, n_clusters)
"""
if (n_clusters is not None and
centroids is not None and n_clusters != centroids.shape[0]):
msg = ("Parameters 'n_clusters' and 'centroids' "
"are exclusive. Only pass one at a time.")
raise RuntimeError(msg)
cdef device_resources *h = <device_resources*><size_t>handle.getHandle()
X_cai = cai_wrapper(X)
X_cai.validate_shape_dtype(expected_dims=2)
dtype = X_cai.dtype
if centroids is not None:
n_clusters = centroids.shape[0]
else:
centroids_shape = (n_clusters, X_cai.shape[1])
centroids = device_ndarray.empty(centroids_shape, dtype=dtype)
centroids_cai = cai_wrapper(centroids)
# Can't set attributes of KMeansParameters after creating it, so taking
# a detour via a dict to collect the possible constructor arguments
params_ = dict(n_clusters=n_clusters)
if seed is not None:
params_["seed"] = seed
params = KMeansParams(**params_)
if dtype == np.float64:
cpp_init_plus_plus(
deref(h), params.c_obj,
make_device_matrix_view[double, int, row_major](
<double *><uintptr_t>X_cai.data,
<int>X_cai.shape[0], <int>X_cai.shape[1]),
make_device_matrix_view[double, int, row_major](
<double *><uintptr_t>centroids_cai.data,
<int>centroids_cai.shape[0], <int>centroids_cai.shape[1]),
)
elif dtype == np.float32:
cpp_init_plus_plus(
deref(h), params.c_obj,
make_device_matrix_view[float, int, row_major](
<float *><uintptr_t>X_cai.data,
<int>X_cai.shape[0], <int>X_cai.shape[1]),
make_device_matrix_view[float, int, row_major](
<float *><uintptr_t>centroids_cai.data,
<int>centroids_cai.shape[0], <int>centroids_cai.shape[1]),
)
else:
raise ValueError(f"Unhandled dtype ({dtype}) for X.")
return centroids
@auto_sync_handle
@auto_convert_output
def cluster_cost(X, centroids, handle=None):
"""
Compute cluster cost given an input matrix and existing centroids
Parameters
----------
X : Input CUDA array interface compliant matrix shape (m, k)
centroids : Input CUDA array interface compliant matrix shape
(n_clusters, k)
{handle_docstring}
Examples
--------
>>> import cupy as cp
>>> from pylibraft.cluster.kmeans import cluster_cost
>>> n_samples = 5000
>>> n_features = 50
>>> n_clusters = 3
>>> X = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> centroids = cp.random.random_sample((n_clusters, n_features),
... dtype=cp.float32)
>>> inertia = cluster_cost(X, centroids)
"""
x_cai = X.__cuda_array_interface__
centroids_cai = centroids.__cuda_array_interface__
m = x_cai["shape"][0]
x_k = x_cai["shape"][1]
n_clusters = centroids_cai["shape"][0]
centroids_k = centroids_cai["shape"][1]
x_dt = np.dtype(x_cai["typestr"])
centroids_dt = np.dtype(centroids_cai["typestr"])
if not do_cols_match(X, centroids):
raise ValueError("X and centroids must have same number of columns.")
x_ptr = <uintptr_t>x_cai["data"][0]
centroids_ptr = <uintptr_t>centroids_cai["data"][0]
handle = handle if handle is not None else Handle()
cdef device_resources *h = <device_resources*><size_t>handle.getHandle()
x_c_contiguous = is_c_contiguous(x_cai)
centroids_c_contiguous = is_c_contiguous(centroids_cai)
if not x_c_contiguous or not centroids_c_contiguous:
raise ValueError("Inputs must all be c contiguous")
if not do_dtypes_match(X, centroids):
raise ValueError("Inputs must all have the same dtypes "
"(float32 or float64)")
cdef float f_cost = 0
cdef double d_cost = 0
if x_dt == np.float32:
cpp_cluster_cost(deref(h),
<float*> x_ptr,
<int> m,
<int> x_k,
<int> n_clusters,
<float*> centroids_ptr,
<float*> &f_cost)
return f_cost
elif x_dt == np.float64:
cpp_cluster_cost(deref(h),
<double*> x_ptr,
<int> m,
<int> x_k,
<int> n_clusters,
<double*> centroids_ptr,
<double*> &d_cost)
return d_cost
else:
raise ValueError("dtype %s not supported" % x_dt)
class InitMethod(IntEnum):
""" Method for initializing kmeans """
KMeansPlusPlus = <int> kmeans_types.InitMethod.KMeansPlusPlus
Random = <int> kmeans_types.InitMethod.Random
Array = <int> kmeans_types.InitMethod.Array
cdef class KMeansParams:
""" Specifies hyper-parameters for the kmeans algorithm.
Parameters
----------
n_clusters : int, optional
The number of clusters to form as well as the number of centroids
to generate
max_iter : int, optional
Maximum number of iterations of the k-means algorithm for a single run
tol : float, optional
Relative tolerance with regards to inertia to declare convergence
verbosity : int, optional
seed: int, optional
Seed to the random number generator.
metric : str, optional
Metric names to use for distance computation, see
:func:`pylibraft.distance.pairwise_distance` for valid values.
init : InitMethod, optional
n_init : int, optional
Number of instance k-means algorithm will be run with different seeds.
oversampling_factor : float, optional
Oversampling factor for use in the k-means algorithm
"""
cdef kmeans_types.KMeansParams c_obj
def __init__(self,
n_clusters: Optional[int] = None,
max_iter: Optional[int] = None,
tol: Optional[float] = None,
verbosity: Optional[int] = None,
seed: Optional[int] = None,
metric: Optional[str] = None,
init: Optional[InitMethod] = None,
n_init: Optional[int] = None,
oversampling_factor: Optional[float] = None,
batch_samples: Optional[int] = None,
batch_centroids: Optional[int] = None,
inertia_check: Optional[bool] = None):
if n_clusters is not None:
self.c_obj.n_clusters = n_clusters
if max_iter is not None:
self.c_obj.max_iter = max_iter
if tol is not None:
self.c_obj.tol = tol
if verbosity is not None:
self.c_obj.verbosity = verbosity
if seed is not None:
self.c_obj.rng_state.seed = seed
if metric is not None:
distance = DISTANCE_TYPES.get(metric)
if distance is None:
valid_metrics = list(DISTANCE_TYPES.keys())
raise ValueError(f"Unknown metric '{metric}'. Valid values "
f"are: {valid_metrics}")
self.c_obj.metric = distance
if init is not None:
self.c_obj.init = init
if n_init is not None:
self.c_obj.n_init = n_init
if oversampling_factor is not None:
self.c_obj.oversampling_factor = oversampling_factor
if batch_samples is not None:
self.c_obj.batch_samples = batch_samples
if batch_centroids is not None:
self.c_obj.batch_centroids = batch_centroids
if inertia_check is not None:
self.c_obj.inertia_check = inertia_check
@property
def n_clusters(self):
return self.c_obj.n_clusters
@property
def max_iter(self):
return self.c_obj.max_iter
@property
def tol(self):
return self.c_obj.tol
@property
def verbosity(self):
return self.c_obj.verbosity
@property
def seed(self):
return self.c_obj.rng_state.seed
@property
def init(self):
return InitMethod(self.c_obj.init)
@property
def oversampling_factor(self):
return self.c_obj.oversampling_factor
@property
def batch_samples(self):
return self.c_obj.batch_samples
@property
def batch_centroids(self):
return self.c_obj.batch_centroids
@property
def inertia_check(self):
return self.c_obj.inertia_check
FitOutput = namedtuple("FitOutput", "centroids inertia n_iter")
@auto_sync_handle
@auto_convert_output
def fit(
KMeansParams params, X, centroids=None, sample_weights=None, handle=None
):
"""
Find clusters with the k-means algorithm
Parameters
----------
params : KMeansParams
Parameters to use to fit KMeans model
X : Input CUDA array interface compliant matrix shape (m, k)
centroids : Optional writable CUDA array interface compliant matrix
shape (n_clusters, k)
sample_weights : Optional input CUDA array interface compliant matrix shape
(n_clusters, 1) default: None
{handle_docstring}
Returns
-------
centroids : raft.device_ndarray
The computed centroids for each cluster
inertia : float
Sum of squared distances of samples to their closest cluster center
n_iter : int
The number of iterations used to fit the model
Examples
--------
>>> import cupy as cp
>>> from pylibraft.cluster.kmeans import fit, KMeansParams
>>> n_samples = 5000
>>> n_features = 50
>>> n_clusters = 3
>>> X = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> params = KMeansParams(n_clusters=n_clusters)
>>> centroids, inertia, n_iter = fit(params, X)
"""
cdef device_resources *h = <device_resources*><size_t>handle.getHandle()
cdef float f_inertia = 0.0
cdef double d_inertia = 0.0
cdef int n_iter = 0
cdef optional[device_vector_view[const double, int]] d_sample_weights
cdef optional[device_vector_view[const float, int]] f_sample_weights
X_cai = cai_wrapper(X)
dtype = X_cai.dtype
if centroids is None:
centroids_shape = (params.n_clusters, X_cai.shape[1])
centroids = device_ndarray.empty(centroids_shape, dtype=dtype)
centroids_cai = cai_wrapper(centroids)
# validate inputs have are all c-contiguous, and have a consistent dtype
# and expected shape
X_cai.validate_shape_dtype(2)
centroids_cai.validate_shape_dtype(2, dtype)
if sample_weights is not None:
sample_weights_cai = cai_wrapper(sample_weights)
sample_weights_cai.validate_shape_dtype(1, dtype)
if dtype == np.float64:
if sample_weights is not None:
d_sample_weights = make_device_vector_view(
<const double *><uintptr_t>sample_weights_cai.data,
<int>sample_weights_cai.shape[0])
cpp_kmeans.fit(
deref(h),
params.c_obj,
make_device_matrix_view[double, int, row_major](
<double *><uintptr_t>X_cai.data,
<int>X_cai.shape[0], <int>X_cai.shape[1]),
d_sample_weights,
make_device_matrix_view[double, int, row_major](
<double *><uintptr_t>centroids_cai.data,
<int>centroids_cai.shape[0], <int>centroids_cai.shape[1]),
make_host_scalar_view[double, int](&d_inertia),
make_host_scalar_view[int, int](&n_iter))
return FitOutput(centroids, d_inertia, n_iter)
elif dtype == np.float32:
if sample_weights is not None:
f_sample_weights = make_device_vector_view(
<const float *><uintptr_t>sample_weights_cai.data,
<int>sample_weights_cai.shape[0])
cpp_kmeans.fit(
deref(h),
params.c_obj,
make_device_matrix_view[float, int, row_major](
<float *><uintptr_t>X_cai.data,
<int>X_cai.shape[0], <int>X_cai.shape[1]),
f_sample_weights,
make_device_matrix_view[float, int, row_major](
<float *><uintptr_t>centroids_cai.data,
<int>centroids_cai.shape[0], <int>centroids_cai.shape[1]),
make_host_scalar_view[float, int](&f_inertia),
make_host_scalar_view[int, int](&n_iter))
return FitOutput(centroids, f_inertia, n_iter)
else:
raise ValueError(f"unhandled dtype {dtype}")
| 0 |
rapidsai_public_repos/cuvs/python/cuvs/cuvs/cluster | rapidsai_public_repos/cuvs/python/cuvs/cuvs/cluster/cpp/kmeans_types.pxd | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcpp cimport bool
from pylibraft.distance.distance_type cimport DistanceType
from pylibraft.random.cpp.rng_state cimport RngState
cdef extern from "raft/cluster/kmeans_types.hpp" \
namespace "raft::cluster::kmeans":
ctypedef enum InitMethod 'raft::cluster::KMeansParams::InitMethod':
KMeansPlusPlus 'raft::cluster::kmeans::KMeansParams::InitMethod::KMeansPlusPlus' # noqa
Random 'raft::cluster::kmeans::KMeansParams::InitMethod::Random'
Array 'raft::cluster::kmeans::KMeansParams::InitMethod::Array'
cdef cppclass KMeansParams:
KMeansParams() except +
int n_clusters
InitMethod init
int max_iter
double tol
int verbosity
RngState rng_state
DistanceType metric
int n_init
double oversampling_factor
int batch_samples
int batch_centroids
bool inertia_check
| 0 |
rapidsai_public_repos/cuvs/python/cuvs/cuvs/cluster | rapidsai_public_repos/cuvs/python/cuvs/cuvs/cluster/cpp/kmeans.pxd | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import numpy as np
from cython.operator cimport dereference as deref
from libc.stdint cimport uintptr_t
from libcpp cimport bool, nullptr
from pylibraft.cluster.cpp.kmeans_types cimport KMeansParams
from pylibraft.common.cpp.mdspan cimport *
from pylibraft.common.cpp.optional cimport optional
from pylibraft.common.handle cimport device_resources
cdef extern from "raft_runtime/cluster/kmeans.hpp" \
namespace "raft::runtime::cluster::kmeans" nogil:
cdef void update_centroids(
const device_resources& handle,
const double *X,
int n_samples,
int n_features,
int n_clusters,
const double *sample_weights,
const double *centroids,
const int* labels,
double *new_centroids,
double *weight_per_cluster) except +
cdef void update_centroids(
const device_resources& handle,
const float *X,
int n_samples,
int n_features,
int n_clusters,
const float *sample_weights,
const float *centroids,
const int* labels,
float *new_centroids,
float *weight_per_cluster) except +
cdef void cluster_cost(
const device_resources& handle,
const float* X,
int n_samples,
int n_features,
int n_clusters,
const float * centroids,
float * cost) except +
cdef void cluster_cost(
const device_resources& handle,
const double* X,
int n_samples,
int n_features,
int n_clusters,
const double * centroids,
double * cost) except +
cdef void init_plus_plus(
const device_resources & handle,
const KMeansParams& params,
device_matrix_view[float, int, row_major] X,
device_matrix_view[float, int, row_major] centroids) except +
cdef void init_plus_plus(
const device_resources & handle,
const KMeansParams& params,
device_matrix_view[double, int, row_major] X,
device_matrix_view[double, int, row_major] centroids) except +
cdef void fit(
const device_resources & handle,
const KMeansParams& params,
device_matrix_view[float, int, row_major] X,
optional[device_vector_view[float, int]] sample_weight,
device_matrix_view[float, int, row_major] inertia,
host_scalar_view[float, int] inertia,
host_scalar_view[int, int] n_iter) except +
cdef void fit(
const device_resources & handle,
const KMeansParams& params,
device_matrix_view[double, int, row_major] X,
optional[device_vector_view[double, int]] sample_weight,
device_matrix_view[double, int, row_major] inertia,
host_scalar_view[double, int] inertia,
host_scalar_view[int, int] n_iter) except +
| 0 |
rapidsai_public_repos/cuvs/python | rapidsai_public_repos/cuvs/python/cuvs-bench/pyproject.toml | # Copyright (c) 2023, NVIDIA CORPORATION.
[build-system]
build-backend = "setuptools.build_meta"
requires = [
"setuptools",
"wheel",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
[project]
name = "cuvs-ann-bench"
version = "24.02.00"
description = "cuVS benchmarks"
authors = [
{ name = "NVIDIA Corporation" },
]
license = { text = "Apache 2.0" }
requires-python = ">=3.9"
dependencies = [
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
classifiers = [
"Intended Audience :: Developers",
"Topic :: Database",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
]
[project.urls]
Homepage = "https://github.com/rapidsai/cuvs"
[tool.setuptools.packages.find]
where = ["src"]
[tool.setuptools.package-data]
"*" = ["*.*"]
[tool.isort]
line_length = 79
multi_line_output = 3
include_trailing_comma = true
force_grid_wrap = 0
combine_as_imports = true
order_by_type = true
skip = [
"thirdparty",
".eggs",
".git",
".hg",
".mypy_cache",
".tox",
".venv",
"_build",
"buck-out",
"build",
"dist",
]
| 0 |
rapidsai_public_repos/cuvs/python | rapidsai_public_repos/cuvs/python/cuvs-bench/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2020 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/__init__.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/algos.yaml | faiss_gpu_flat:
executable: FAISS_GPU_FLAT_ANN_BENCH
requires_gpu: true
faiss_gpu_ivf_flat:
executable: FAISS_GPU_IVF_FLAT_ANN_BENCH
requires_gpu: true
faiss_gpu_ivf_pq:
executable: FAISS_GPU_IVF_PQ_ANN_BENCH
requires_gpu: true
faiss_gpu_ivf_sq:
executable: FAISS_GPU_IVF_PQ_ANN_BENCH
requires_gpu: true
faiss_cpu_flat:
executable: FAISS_CPU_FLAT_ANN_BENCH
requires_gpu: false
faiss_cpu_ivf_flat:
executable: FAISS_CPU_IVF_FLAT_ANN_BENCH
requires_gpu: false
faiss_cpu_ivf_pq:
executable: FAISS_CPU_IVF_PQ_ANN_BENCH
requires_gpu: false
raft_ivf_flat:
executable: RAFT_IVF_FLAT_ANN_BENCH
requires_gpu: true
raft_ivf_pq:
executable: RAFT_IVF_PQ_ANN_BENCH
requires_gpu: true
raft_cagra:
executable: RAFT_CAGRA_ANN_BENCH
requires_gpu: true
raft_brute_force:
executable: RAFT_BRUTE_FORCE_ANN_BENCH
requires_gpu: true
ggnn:
executable: GGNN_ANN_BENCH
requires_gpu: true
hnswlib:
executable: HNSWLIB_ANN_BENCH
requires_gpu: false
raft_cagra_hnswlib:
executable: RAFT_CAGRA_HNSWLIB_ANN_BENCH
requires_gpu: true
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/__main__.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import itertools
import json
import os
import subprocess
import sys
import uuid
import warnings
from importlib import import_module
import yaml
log_levels = {
"off": 0,
"error": 1,
"warn": 2,
"info": 3,
"debug": 4,
"trace": 5,
}
def parse_log_level(level_str):
if level_str not in log_levels:
raise ValueError("Invalid log level: %s" % level_str)
return log_levels[level_str.lower()]
def positive_int(input_str: str) -> int:
try:
i = int(input_str)
if i < 1:
raise ValueError
except ValueError:
raise argparse.ArgumentTypeError(
f"{input_str} is not a positive integer"
)
return i
def merge_build_files(build_dir, build_file, temp_build_file):
build_dict = {}
# If build file exists, read it
build_json_path = os.path.join(build_dir, build_file)
tmp_build_json_path = os.path.join(build_dir, temp_build_file)
if os.path.isfile(build_json_path):
try:
with open(build_json_path, "r") as f:
build_dict = json.load(f)
except Exception as e:
print(
"Error loading existing build file: %s (%s)"
% (build_json_path, e)
)
temp_build_dict = {}
if os.path.isfile(tmp_build_json_path):
with open(tmp_build_json_path, "r") as f:
temp_build_dict = json.load(f)
else:
raise ValueError("Temp build file not found: %s" % tmp_build_json_path)
tmp_benchmarks = (
temp_build_dict["benchmarks"]
if "benchmarks" in temp_build_dict
else {}
)
benchmarks = build_dict["benchmarks"] if "benchmarks" in build_dict else {}
# If the build time is absolute 0 then an error occurred
final_bench_dict = {}
for b in benchmarks:
if b["real_time"] > 0:
final_bench_dict[b["name"]] = b
for tmp_bench in tmp_benchmarks:
if tmp_bench["real_time"] > 0:
final_bench_dict[tmp_bench["name"]] = tmp_bench
temp_build_dict["benchmarks"] = [v for k, v in final_bench_dict.items()]
with open(build_json_path, "w") as f:
json_str = json.dumps(temp_build_dict, indent=2)
f.write(json_str)
def validate_algorithm(algos_conf, algo, gpu_present):
algos_conf_keys = set(algos_conf.keys())
if gpu_present:
return algo in algos_conf_keys
else:
return (
algo in algos_conf_keys
and algos_conf[algo]["requires_gpu"] is False
)
def find_executable(algos_conf, algo, group, k, batch_size):
executable = algos_conf[algo]["executable"]
return_str = f"{algo}_{group}-{k}-{batch_size}"
build_path = os.getenv("RAFT_HOME")
if build_path is not None:
build_path = os.path.join(build_path, "cpp", "build", executable)
if os.path.exists(build_path):
print(f"-- Using RAFT bench from repository in {build_path}. ")
return (executable, build_path, return_str)
# if there is no build folder present, we look in the conda environment
conda_path = os.getenv("CONDA_PREFIX")
if conda_path is not None:
conda_path = os.path.join(conda_path, "bin", "ann", executable)
if os.path.exists(conda_path):
print("-- Using RAFT bench found in conda environment. ")
return (executable, conda_path, return_str)
else:
raise FileNotFoundError(executable)
def run_build_and_search(
conf_file,
conf_filename,
conf_filedir,
executables_to_run,
dataset_path,
force,
build,
search,
dry_run,
k,
batch_size,
search_threads,
mode="throughput",
raft_log_level="info",
):
for executable, ann_executable_path, algo in executables_to_run.keys():
# Need to write temporary configuration
temp_conf_filename = f"{conf_filename}_{algo}_{uuid.uuid1()}.json"
with open(temp_conf_filename, "w") as f:
temp_conf = dict()
temp_conf["dataset"] = conf_file["dataset"]
temp_conf["search_basic_param"] = conf_file["search_basic_param"]
temp_conf["index"] = executables_to_run[
(executable, ann_executable_path, algo)
]["index"]
json_str = json.dumps(temp_conf, indent=2)
f.write(json_str)
legacy_result_folder = os.path.join(
dataset_path, conf_file["dataset"]["name"], "result"
)
os.makedirs(legacy_result_folder, exist_ok=True)
if build:
build_folder = os.path.join(legacy_result_folder, "build")
os.makedirs(build_folder, exist_ok=True)
build_file = f"{algo}.json"
temp_build_file = f"{build_file}.lock"
cmd = [
ann_executable_path,
"--build",
"--data_prefix=" + dataset_path,
"--benchmark_out_format=json",
"--benchmark_counters_tabular=true",
"--benchmark_out="
+ f"{os.path.join(build_folder, temp_build_file)}",
"--raft_log_level=" + f"{parse_log_level(raft_log_level)}",
]
if force:
cmd = cmd + ["--force"]
cmd = cmd + [temp_conf_filename]
if dry_run:
print(
"Benchmark command for %s:\n%s\n" % (algo, " ".join(cmd))
)
else:
try:
subprocess.run(cmd, check=True)
merge_build_files(
build_folder, build_file, temp_build_file
)
except Exception as e:
print("Error occurred running benchmark: %s" % e)
finally:
os.remove(os.path.join(build_folder, temp_build_file))
if not search:
os.remove(temp_conf_filename)
if search:
search_folder = os.path.join(legacy_result_folder, "search")
os.makedirs(search_folder, exist_ok=True)
cmd = [
ann_executable_path,
"--search",
"--data_prefix=" + dataset_path,
"--benchmark_counters_tabular=true",
"--override_kv=k:%s" % k,
"--override_kv=n_queries:%s" % batch_size,
"--benchmark_min_warmup_time=1",
"--benchmark_out_format=json",
"--mode=%s" % mode,
"--benchmark_out="
+ f"{os.path.join(search_folder, f'{algo}.json')}",
"--raft_log_level=" + f"{parse_log_level(raft_log_level)}",
]
if force:
cmd = cmd + ["--force"]
if search_threads:
cmd = cmd + ["--threads=%s" % search_threads]
cmd = cmd + [temp_conf_filename]
if dry_run:
print(
"Benchmark command for %s:\n%s\n" % (algo, " ".join(cmd))
)
else:
try:
subprocess.run(cmd, check=True)
except Exception as e:
print("Error occurred running benchmark: %s" % e)
finally:
os.remove(temp_conf_filename)
def main():
scripts_path = os.path.dirname(os.path.realpath(__file__))
call_path = os.getcwd()
# Read list of allowed algorithms
try:
import rmm # noqa: F401
gpu_present = True
except ImportError:
gpu_present = False
with open(f"{scripts_path}/algos.yaml", "r") as f:
algos_yaml = yaml.safe_load(f)
if "RAPIDS_DATASET_ROOT_DIR" in os.environ:
default_dataset_path = os.getenv("RAPIDS_DATASET_ROOT_DIR")
else:
default_dataset_path = os.path.join(call_path, "datasets/")
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--subset-size",
type=positive_int,
help="the number of subset rows of the dataset to build the index",
)
parser.add_argument(
"-k",
"--count",
default=10,
type=positive_int,
help="the number of nearest neighbors to search for",
)
parser.add_argument(
"-bs",
"--batch-size",
default=10000,
type=positive_int,
help="number of query vectors to use in each query trial",
)
parser.add_argument(
"--dataset-configuration",
help="path to YAML configuration file for datasets",
)
parser.add_argument(
"--configuration",
help="path to YAML configuration file or directory for algorithms\
Any run groups found in the specified file/directory will \
automatically override groups of the same name present in the \
default configurations, including `base`",
)
parser.add_argument(
"--dataset",
help="name of dataset",
default="glove-100-inner",
)
parser.add_argument(
"--dataset-path",
help="path to dataset folder, by default will look in "
"RAPIDS_DATASET_ROOT_DIR if defined, otherwise a datasets "
"subdirectory from the calling directory",
default=default_dataset_path,
)
parser.add_argument("--build", action="store_true")
parser.add_argument("--search", action="store_true")
parser.add_argument(
"--algorithms",
help="run only comma separated list of named \
algorithms. If parameters `groups` and `algo-groups \
are both undefined, then group `base` is run by default",
default=None,
)
parser.add_argument(
"--groups",
help="run only comma separated groups of parameters",
default="base",
)
parser.add_argument(
"--algo-groups",
help='add comma separated <algorithm>.<group> to run. \
Example usage: "--algo-groups=cuvs_cagra.large,hnswlib.large"',
)
parser.add_argument(
"-f",
"--force",
help="re-run algorithms even if their results \
already exist",
action="store_true",
)
parser.add_argument(
"-m",
"--search-mode",
help="run search in 'latency' (measure individual batches) or "
"'throughput' (pipeline batches and measure end-to-end) mode",
default="latency",
)
parser.add_argument(
"-t",
"--search-threads",
help="specify the number threads to use for throughput benchmark."
" Single value or a pair of min and max separated by ':'. "
"Example: --search-threads=1:4. Power of 2 values between 'min' "
"and 'max' will be used. If only 'min' is specified, then a "
"single test is run with 'min' threads. By default min=1, "
"max=<num hyper threads>.",
default=None,
)
parser.add_argument(
"-r",
"--dry-run",
help="dry-run mode will convert the yaml config for the specified "
"algorithms and datasets to the json format that's consumed "
"by the lower-level c++ binaries and then print the command "
"to run execute the benchmarks but will not actually execute "
"the command.",
action="store_true",
)
parser.add_argument(
"--raft-log-level",
help="Log level, possible values are "
"[off, error, warn, info, debug, trace]. "
"Default: 'info'. Note that 'debug' or more detailed "
"logging level requires that the library is compiled with "
"-DRAFT_ACTIVE_LEVEL=<L> where <L> >= <requested log level>",
default="info",
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# If both build and search are not provided,
# run both
if not args.build and not args.search:
build = True
search = True
else:
build = args.build
search = args.search
dry_run = args.dry_run
mode = args.search_mode
k = args.count
batch_size = args.batch_size
# Read configuration file associated to datasets
if args.dataset_configuration:
dataset_conf_f = args.dataset_configuration
else:
dataset_conf_f = os.path.join(scripts_path, "conf", "datasets.yaml")
with open(dataset_conf_f, "r") as f:
dataset_conf_all = yaml.safe_load(f)
dataset_conf = None
for dataset in dataset_conf_all:
if args.dataset == dataset["name"]:
dataset_conf = dataset
break
if not dataset_conf:
raise ValueError("Could not find a dataset configuration")
conf_file = dict()
conf_file["dataset"] = dataset_conf
if args.subset_size:
conf_file["dataset"]["subset_size"] = args.subset_size
conf_file["search_basic_param"] = {}
conf_file["search_basic_param"]["k"] = k
conf_file["search_basic_param"]["batch_size"] = batch_size
algos_conf_fs = os.listdir(os.path.join(scripts_path, "conf", "algos"))
algos_conf_fs = [
os.path.join(scripts_path, "conf", "algos", f)
for f in algos_conf_fs
if ".json" not in f
]
conf_filedir = os.path.join(scripts_path, "conf", "algos")
if args.configuration:
if os.path.isdir(args.configuration):
conf_filedir = args.configuration
algos_conf_fs = algos_conf_fs + [
os.path.join(args.configuration, f)
for f in os.listdir(args.configuration)
if ".json" not in f
]
elif os.path.isfile(args.configuration):
conf_filedir = os.path.normpath(args.configuration).split(os.sep)
conf_filedir = os.path.join(*conf_filedir[:-1])
algos_conf_fs = algos_conf_fs + [args.configuration]
filter_algos = True if args.algorithms else False
if filter_algos:
allowed_algos = args.algorithms.split(",")
named_groups = args.groups.split(",")
filter_algo_groups = True if args.algo_groups else False
allowed_algo_groups = None
if filter_algo_groups:
allowed_algo_groups = [
algo_group.split(".") for algo_group in args.algo_groups.split(",")
]
allowed_algo_groups = list(zip(*allowed_algo_groups))
algos_conf = dict()
for algo_f in algos_conf_fs:
with open(algo_f, "r") as f:
try:
algo = yaml.safe_load(f)
except Exception as e:
warnings.warn(
f"Could not load YAML config {algo_f} due to "
+ e.with_traceback()
)
continue
insert_algo = True
insert_algo_group = False
if filter_algos:
if algo["name"] not in allowed_algos:
insert_algo = False
if filter_algo_groups:
if algo["name"] in allowed_algo_groups[0]:
insert_algo_group = True
def add_algo_group(group_list):
if algo["name"] not in algos_conf:
algos_conf[algo["name"]] = {"groups": {}}
for group in algo["groups"].keys():
if group in group_list:
algos_conf[algo["name"]]["groups"][group] = algo[
"groups"
][group]
if "constraints" in algo:
algos_conf[algo["name"]]["constraints"] = algo[
"constraints"
]
if insert_algo:
add_algo_group(named_groups)
if insert_algo_group:
add_algo_group(allowed_algo_groups[1])
executables_to_run = dict()
for algo in algos_conf.keys():
validate_algorithm(algos_yaml, algo, gpu_present)
for group in algos_conf[algo]["groups"].keys():
executable = find_executable(
algos_yaml, algo, group, k, batch_size
)
if executable not in executables_to_run:
executables_to_run[executable] = {"index": []}
build_params = algos_conf[algo]["groups"][group]["build"]
search_params = algos_conf[algo]["groups"][group]["search"]
param_names = []
param_lists = []
for param in build_params.keys():
param_names.append(param)
param_lists.append(build_params[param])
all_build_params = itertools.product(*param_lists)
search_param_names = []
search_param_lists = []
for search_param in search_params.keys():
search_param_names.append(search_param)
search_param_lists.append(search_params[search_param])
for params in all_build_params:
index = {"algo": algo, "build_param": {}}
if group != "base":
index_name = f"{algo}_{group}"
else:
index_name = f"{algo}"
for i in range(len(params)):
index["build_param"][param_names[i]] = params[i]
index_name += "." + f"{param_names[i]}{params[i]}"
if "constraints" in algos_conf[algo]:
if "build" in algos_conf[algo]["constraints"]:
importable = algos_conf[algo]["constraints"]["build"]
importable = importable.split(".")
module = ".".join(importable[:-1])
func = importable[-1]
validator = import_module(module)
build_constraints = getattr(validator, func)
if "dims" not in conf_file["dataset"]:
raise ValueError(
"`dims` needed for build constraints but not "
"specified in datasets.yaml"
)
if not build_constraints(
index["build_param"], conf_file["dataset"]["dims"]
):
continue
index["name"] = index_name
index["file"] = os.path.join(
args.dataset_path, args.dataset, "index", index_name
)
index["search_params"] = []
all_search_params = itertools.product(*search_param_lists)
for search_params in all_search_params:
search_dict = dict()
for i in range(len(search_params)):
search_dict[search_param_names[i]] = search_params[i]
if "constraints" in algos_conf[algo]:
if "search" in algos_conf[algo]["constraints"]:
importable = algos_conf[algo]["constraints"][
"search"
]
importable = importable.split(".")
module = ".".join(importable[:-1])
func = importable[-1]
validator = import_module(module)
search_constraints = getattr(validator, func)
if search_constraints(
search_dict,
index["build_param"],
k,
batch_size,
):
index["search_params"].append(search_dict)
else:
index["search_params"].append(search_dict)
executables_to_run[executable]["index"].append(index)
if len(index["search_params"]) == 0:
print("No search parameters were added to configuration")
run_build_and_search(
conf_file,
f"{args.dataset}",
conf_filedir,
executables_to_run,
args.dataset_path,
args.force,
build,
search,
dry_run,
k,
batch_size,
args.search_threads,
mode,
args.raft_log_level,
)
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/glove-50-inner.json | {
"dataset": {
"name": "glove-50-inner",
"base_file": "glove-50-inner/base.fbin",
"query_file": "glove-50-inner/query.fbin",
"distance": "euclidean"
},
"search_basic_param": {
"batch_size": 5000,
"k": 10,
"run_count": 3
},
"index": [
{
"name" : "hnswlib.M12",
"algo" : "hnswlib",
"build_param": {"M":12, "efConstruction":500, "numThreads":32},
"file" : "index/glove-50-inner/hnswlib/M12",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/glove-50-inner/hnswlib/M12"
},
{
"name" : "hnswlib.M16",
"algo" : "hnswlib",
"build_param": {"M":16, "efConstruction":500, "numThreads":32},
"file" : "index/glove-50-inner/hnswlib/M16",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/glove-50-inner/hnswlib/M16"
},
{
"name" : "hnswlib.M24",
"algo" : "hnswlib",
"build_param": {"M":24, "efConstruction":500, "numThreads":32},
"file" : "index/glove-50-inner/hnswlib/M24",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/glove-50-inner/hnswlib/M24"
},
{
"name" : "hnswlib.M36",
"algo" : "hnswlib",
"build_param": {"M":36, "efConstruction":500, "numThreads":32},
"file" : "index/glove-50-inner/hnswlib/M36",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/glove-50-inner/hnswlib/M36"
},
{
"name": "raft_bfknn",
"algo": "raft_bfknn",
"build_param": {},
"file": "index/glove-50-inner/raft_bfknn/bfknn",
"search_params": [
{
"probe": 1
}
],
"search_result_file": "result/glove-50-inner/raft_bfknn/bfknn"
},
{
"name": "faiss_ivf_flat.nlist1024",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 1024
},
"file": "index/glove-50-inner/faiss_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-inner/faiss_ivf_flat/nlist1024"
},
{
"name": "faiss_ivf_flat.nlist2048",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 2048
},
"file": "index/glove-50-inner/faiss_ivf_flat/nlist2048",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-inner/faiss_ivf_flat/nlist2048"
},
{
"name": "faiss_ivf_flat.nlist4096",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 4096
},
"file": "index/glove-50-inner/faiss_ivf_flat/nlist4096",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-inner/faiss_ivf_flat/nlist4096"
},
{
"name": "faiss_ivf_flat.nlist8192",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 8192
},
"file": "index/glove-50-inner/faiss_ivf_flat/nlist8192",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-inner/faiss_ivf_flat/nlist8192"
},
{
"name": "faiss_ivf_flat.nlist16384",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 16384
},
"file": "index/glove-50-inner/faiss_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/glove-50-inner/faiss_ivf_flat/nlist16384"
},
{
"name": "faiss_ivf_pq.M64-nlist1024",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": true
},
"file": "index/glove-50-inner/faiss_ivf_pq/M64-nlist1024",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-inner/faiss_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_ivf_pq.M64-nlist1024.noprecomp",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": false
},
"file": "index/glove-50-inner/faiss_ivf_pq/M64-nlist1024.noprecomp",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-inner/faiss_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_ivf_sq.nlist1024-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "fp16"
},
"file": "index/glove-50-inner/faiss_ivf_sq/nlist1024-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-inner/faiss_ivf_sq/nlist1024-fp16"
},
{
"name": "faiss_ivf_sq.nlist2048-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "fp16"
},
"file": "index/glove-50-inner/faiss_ivf_sq/nlist2048-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-inner/faiss_ivf_sq/nlist2048-fp16"
},
{
"name": "faiss_ivf_sq.nlist4096-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "fp16"
},
"file": "index/glove-50-inner/faiss_ivf_sq/nlist4096-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-inner/faiss_ivf_sq/nlist4096-fp16"
},
{
"name": "faiss_ivf_sq.nlist8192-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "fp16"
},
"file": "index/glove-50-inner/faiss_ivf_sq/nlist8192-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-inner/faiss_ivf_sq/nlist8192-fp16"
},
{
"name": "faiss_ivf_sq.nlist16384-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "fp16"
},
"file": "index/glove-50-inner/faiss_ivf_sq/nlist16384-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/glove-50-inner/faiss_ivf_sq/nlist16384-fp16"
},
{
"name": "faiss_ivf_sq.nlist1024-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "int8"
},
"file": "index/glove-50-inner/faiss_ivf_sq/nlist1024-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-inner/faiss_ivf_sq/nlist1024-int8"
},
{
"name": "faiss_ivf_sq.nlist2048-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "int8"
},
"file": "index/glove-50-inner/faiss_ivf_sq/nlist2048-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-inner/faiss_ivf_sq/nlist2048-int8"
},
{
"name": "faiss_ivf_sq.nlist4096-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "int8"
},
"file": "index/glove-50-inner/faiss_ivf_sq/nlist4096-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-inner/faiss_ivf_sq/nlist4096-int8"
},
{
"name": "faiss_ivf_sq.nlist8192-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "int8"
},
"file": "index/glove-50-inner/faiss_ivf_sq/nlist8192-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-inner/faiss_ivf_sq/nlist8192-int8"
},
{
"name": "faiss_ivf_sq.nlist16384-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "int8"
},
"file": "index/glove-50-inner/faiss_ivf_sq/nlist16384-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/glove-50-inner/faiss_ivf_sq/nlist16384-int8"
},
{
"name": "faiss_flat",
"algo": "faiss_gpu_flat",
"build_param": {},
"file": "index/glove-50-inner/faiss_flat/flat",
"search_params": [
{}
],
"search_result_file": "result/glove-50-inner/faiss_flat/flat"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-inner/raft_ivf_pq/dimpq128-cluster1024",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
}
],
"search_result_file": "result/glove-50-inner/raft_ivf_pq/dimpq128-cluster1024"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-inner/raft_ivf_pq/dimpq128-cluster1024-float-float",
"search_params": [
{
"k": 10,
"nprobe": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 5,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/glove-50-inner/raft_ivf_pq/dimpq128-cluster1024-float-float"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-inner/raft_ivf_pq/dimpq128-cluster1024-float-half",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/glove-50-inner/raft_ivf_pq/dimpq128-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-inner/raft_ivf_pq/dimpq128-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/glove-50-inner/raft_ivf_pq/dimpq128-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-inner/raft_ivf_pq/dimpq64-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/glove-50-inner/raft_ivf_pq/dimpq64-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-inner/raft_ivf_pq/dimpq64-cluster1024-float-half",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/glove-50-inner/raft_ivf_pq/dimpq64-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq32-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 32,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-inner/raft_ivf_pq/dimpq32-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/glove-50-inner/raft_ivf_pq/dimpq32-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq16-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 16,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-inner/raft_ivf_pq/dimpq16-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/glove-50-inner/raft_ivf_pq/dimpq16-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-half-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-inner/raft_ivf_pq/dimpq128-cluster1024-half-float",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
}
],
"search_result_file": "result/glove-50-inner/raft_ivf_pq/dimpq128-cluster1024-half-float"
},
{
"name": "raft_ivf_pq.dimpq512-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 512,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-inner/raft_ivf_pq/dimpq512-cluster1024-float-float",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/glove-50-inner/raft_ivf_pq/dimpq512-cluster1024-float-float"
},
{
"name": "raft_ivf_flat.nlist1024",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 1024,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-inner/raft_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-inner/raft_ivf_flat/nlist1024"
},
{
"name": "raft_ivf_flat.nlist16384",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 16384,
"ratio": 2,
"niter": 20
},
"file": "index/glove-50-inner/raft_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/glove-50-inner/raft_ivf_flat/nlist16384"
},
{
"name" : "raft_cagra.dim32",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 32
},
"file" : "index/glove-50-inner/raft_cagra/dim32",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/glove-50-inner/raft_cagra/dim32"
},
{
"name" : "raft_cagra.dim64",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 64
},
"file" : "index/glove-50-inner/raft_cagra/dim64",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/glove-50-inner/raft_cagra/dim64"
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/datasets.yaml | - name: bigann-1B
base_file: bigann-1B/base.1B.u8bin
subset_size: 100000000
dims: 128
query_file: bigann-1B/query.public.10K.u8bin
groundtruth_neighbors_file: bigann-1B/groundtruth.neighbors.ibin
distance: euclidean
- name: deep-1B
base_file: deep-1B/base.1B.fbin
query_file: deep-1B/query.public.10K.fbin
dims: 96
groundtruth_neighbors_file: deep-1B/groundtruth.neighbors.ibin
distance: inner_product
- name: bigann-100M
base_file: bigann-100M/base.1B.u8bin
subset_size: 100000000
dims: 128
query_file: bigann-100M/query.public.10K.u8bin
groundtruth_neighbors_file: bigann-100M/groundtruth.neighbors.ibin
distance: euclidean
- name: deep-image-96-inner
base_file: deep-image-96-inner/base.fbin
query_file: deep-image-96-inner/query.fbin
dims: 96
groundtruth_neighbors_file: deep-image-96-inner/groundtruth.neighbors.ibin
distance: euclidean
- name: fashion-mnist-784-euclidean
dims: 784
base_file: fashion-mnist-784-euclidean/base.fbin
query_file: fashion-mnist-784-euclidean/query.fbin
groundtruth_neighbors_file: fashion-mnist-784-euclidean/groundtruth.neighbors.ibin
distance: euclidean
- name: gist-960-euclidean
dims: 960
base_file: gist-960-euclidean/base.fbin
query_file: gist-960-euclidean/query.fbin
groundtruth_neighbors_file: gist-960-euclidean/groundtruth.neighbors.ibin
distance: euclidean
- name: glove-50-angular
dims: 50
base_file: glove-50-angular/base.fbin
query_file: glove-50-angular/query.fbin
groundtruth_neighbors_file: glove-50-angular/groundtruth.neighbors.ibin
distance: euclidean
- name: glove-50-inner
dims: 50
base_file: glove-50-inner/base.fbin
query_file: glove-50-inner/query.fbin
groundtruth_neighbors_file: glove-50-inner/groundtruth.neighbors.ibin
distance: euclidean
- name: glove-100-angular
dims: 100
base_file: glove-100-angular/base.fbin
query_file: glove-100-angular/query.fbin
groundtruth_neighbors_file: glove-100-angular/groundtruth.neighbors.ibin
distance: euclidean
- name: glove-100-inner
dims: 100
base_file: glove-100-inner/base.fbin
query_file: glove-100-inner/query.fbin
groundtruth_neighbors_file: glove-100-inner/groundtruth.neighbors.ibin
distance: euclidean
- name: lastfm-65-angular
dims: 65
base_file: lastfm-65-angular/base.fbin
query_file: lastfm-65-angular/query.fbin
groundtruth_neighbors_file: lastfm-65-angular/groundtruth.neighbors.ibin
distance: euclidean
- name: mnist-784-euclidean
dims: 784
base_file: mnist-784-euclidean/base.fbin
query_file: mnist-784-euclidean/query.fbin
groundtruth_neighbors_file: mnist-784-euclidean/groundtruth.neighbors.ibin
distance: euclidean
- name: nytimes-256-angular
dims: 256
base_file: nytimes-256-angular/base.fbin
query_file: nytimes-256-angular/query.fbin
groundtruth_neighbors_file: nytimes-256-angular/groundtruth.neighbors.ibin
distance: euclidean
- name: nytimes-256-inner
dims: 256
base_file: nytimes-256-inner/base.fbin
query_file: nytimes-256-inner/query.fbin
groundtruth_neighbors_file: nytimes-256-inner/groundtruth.neighbors.ibin
distance: euclidean
- name: sift-128-euclidean
dims: 128
base_file: sift-128-euclidean/base.fbin
query_file: sift-128-euclidean/query.fbin
groundtruth_neighbors_file: sift-128-euclidean/groundtruth.neighbors.ibin
distance: euclidean
- name: wiki_all_1M
dims: 768
base_file: wiki_all_1M/base.1M.fbin
query_file: wiki_all_1M/queries.fbin
groundtruth_neighbors_file: wiki_all_1M/groundtruth.1M.neighbors.ibin
distance: euclidean
- name: wiki_all_10M,
dims: 768
base_file: wiki_all_10M/base.10M.fbin
query_file: wiki_all_10M/queries.fbin
groundtruth_neighbors_file: wiki_all_10M/groundtruth.10M.neighbors.ibin
distance: euclidean
- name: wiki_all_88M
dims: 768
base_file: wiki_all_88M/base.88M.fbin
query_file: wiki_all_88M/queries.fbin
groundtruth_neighbors_file: wiki_all_88M/groundtruth.88M.neighbors.ibin
distance: euclidean
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/mnist-784-euclidean.json | {
"dataset": {
"name": "mnist-784-euclidean",
"base_file": "mnist-784-euclidean/base.fbin",
"query_file": "mnist-784-euclidean/query.fbin",
"groundtruth_neighbors_file": "mnist-784-euclidean/groundtruth.neighbors.ibin",
"distance": "euclidean"
},
"search_basic_param": {
"batch_size": 5000,
"k": 10,
"run_count": 3
},
"index": [
{
"name" : "hnswlib.M12",
"algo" : "hnswlib",
"build_param": {"M":12, "efConstruction":500, "numThreads":32},
"file" : "index/mnist-784-euclidean/hnswlib/M12",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/mnist-784-euclidean/hnswlib/M12"
},
{
"name" : "hnswlib.M16",
"algo" : "hnswlib",
"build_param": {"M":16, "efConstruction":500, "numThreads":32},
"file" : "index/mnist-784-euclidean/hnswlib/M16",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/mnist-784-euclidean/hnswlib/M16"
},
{
"name" : "hnswlib.M24",
"algo" : "hnswlib",
"build_param": {"M":24, "efConstruction":500, "numThreads":32},
"file" : "index/mnist-784-euclidean/hnswlib/M24",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/mnist-784-euclidean/hnswlib/M24"
},
{
"name" : "hnswlib.M36",
"algo" : "hnswlib",
"build_param": {"M":36, "efConstruction":500, "numThreads":32},
"file" : "index/mnist-784-euclidean/hnswlib/M36",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/mnist-784-euclidean/hnswlib/M36"
},
{
"name": "raft_bfknn",
"algo": "raft_bfknn",
"build_param": {},
"file": "index/mnist-784-euclidean/raft_bfknn/bfknn",
"search_params": [
{
"probe": 1
}
],
"search_result_file": "result/mnist-784-euclidean/raft_bfknn/bfknn"
},
{
"name": "faiss_gpu_ivf_flat.nlist1024",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 1024
},
"file": "index/mnist-784-euclidean/faiss_gpu_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_ivf_flat/nlist1024"
},
{
"name": "faiss_gpu_ivf_flat.nlist2048",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 2048
},
"file": "index/mnist-784-euclidean/faiss_gpu_ivf_flat/nlist2048",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_ivf_flat/nlist2048"
},
{
"name": "faiss_gpu_ivf_flat.nlist4096",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 4096
},
"file": "index/mnist-784-euclidean/faiss_gpu_ivf_flat/nlist4096",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_ivf_flat/nlist4096"
},
{
"name": "faiss_gpu_ivf_flat.nlist8192",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 8192
},
"file": "index/mnist-784-euclidean/faiss_gpu_ivf_flat/nlist8192",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_ivf_flat/nlist8192"
},
{
"name": "faiss_gpu_ivf_flat.nlist16384",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 16384
},
"file": "index/mnist-784-euclidean/faiss_gpu_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_ivf_flat/nlist16384"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": true
},
"file": "index/mnist-784-euclidean/faiss_gpu_ivf_pq/M64-nlist1024",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024.noprecomp",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": false
},
"file": "index/mnist-784-euclidean/faiss_gpu_ivf_pq/M64-nlist1024.noprecomp",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "fp16"
},
"file": "index/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist1024-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist1024-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "fp16"
},
"file": "index/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist2048-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist2048-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "fp16"
},
"file": "index/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist4096-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist4096-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "fp16"
},
"file": "index/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist8192-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist8192-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "fp16"
},
"file": "index/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist16384-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist16384-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "int8"
},
"file": "index/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist1024-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist1024-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "int8"
},
"file": "index/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist2048-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist2048-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "int8"
},
"file": "index/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist4096-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist4096-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "int8"
},
"file": "index/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist8192-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist8192-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "int8"
},
"file": "index/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist16384-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_ivf_sq/nlist16384-int8"
},
{
"name": "faiss_gpu_flat",
"algo": "faiss_gpu_flat",
"build_param": {},
"file": "index/mnist-784-euclidean/faiss_gpu_flat/flat",
"search_params": [
{}
],
"search_result_file": "result/mnist-784-euclidean/faiss_gpu_flat/flat"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
}
],
"search_result_file": "result/mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-float",
"search_params": [
{
"k": 10,
"numProbes": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 5,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-float"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-half",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/mnist-784-euclidean/raft_ivf_pq/dimpq64-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/mnist-784-euclidean/raft_ivf_pq/dimpq64-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/mnist-784-euclidean/raft_ivf_pq/dimpq64-cluster1024-float-half",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/mnist-784-euclidean/raft_ivf_pq/dimpq64-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq32-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 32,
"ratio": 1,
"niter": 25
},
"file": "index/mnist-784-euclidean/raft_ivf_pq/dimpq32-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/mnist-784-euclidean/raft_ivf_pq/dimpq32-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq16-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 16,
"ratio": 1,
"niter": 25
},
"file": "index/mnist-784-euclidean/raft_ivf_pq/dimpq16-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/mnist-784-euclidean/raft_ivf_pq/dimpq16-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-half-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024-half-float",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
}
],
"search_result_file": "result/mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024-half-float"
},
{
"name": "raft_ivf_pq.dimpq512-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 512,
"ratio": 1,
"niter": 25
},
"file": "index/mnist-784-euclidean/raft_ivf_pq/dimpq512-cluster1024-float-float",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/mnist-784-euclidean/raft_ivf_pq/dimpq512-cluster1024-float-float"
},
{
"name": "raft_ivf_flat.nlist1024",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 1024,
"ratio": 1,
"niter": 25
},
"file": "index/mnist-784-euclidean/raft_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/mnist-784-euclidean/raft_ivf_flat/nlist1024"
},
{
"name": "raft_ivf_flat.nlist16384",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 16384,
"ratio": 2,
"niter": 20
},
"file": "index/mnist-784-euclidean/raft_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/mnist-784-euclidean/raft_ivf_flat/nlist16384"
},
{
"name" : "raft_cagra.dim32",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 32
},
"file" : "index/mnist-784-euclidean/raft_cagra/dim32",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/mnist-784-euclidean/raft_cagra/dim32"
},
{
"name" : "raft_cagra.dim64",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 64
},
"file" : "index/mnist-784-euclidean/raft_cagra/dim64",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/mnist-784-euclidean/raft_cagra/dim64"
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/glove-50-angular.json | {
"dataset": {
"name": "glove-50-angular",
"base_file": "glove-50-angular/base.fbin",
"query_file": "glove-50-angular/query.fbin",
"distance": "euclidean"
},
"search_basic_param": {
"batch_size": 5000,
"k": 10,
"run_count": 3
},
"index": [
{
"name" : "hnswlib.M12",
"algo" : "hnswlib",
"build_param": {"M":12, "efConstruction":500, "numThreads":32},
"file" : "index/glove-50-angular/hnswlib/M12",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/glove-50-angular/hnswlib/M12"
},
{
"name" : "hnswlib.M16",
"algo" : "hnswlib",
"build_param": {"M":16, "efConstruction":500, "numThreads":32},
"file" : "index/glove-50-angular/hnswlib/M16",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/glove-50-angular/hnswlib/M16"
},
{
"name" : "hnswlib.M24",
"algo" : "hnswlib",
"build_param": {"M":24, "efConstruction":500, "numThreads":32},
"file" : "index/glove-50-angular/hnswlib/M24",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/glove-50-angular/hnswlib/M24"
},
{
"name" : "hnswlib.M36",
"algo" : "hnswlib",
"build_param": {"M":36, "efConstruction":500, "numThreads":32},
"file" : "index/glove-50-angular/hnswlib/M36",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/glove-50-angular/hnswlib/M36"
},
{
"name": "raft_bfknn",
"algo": "raft_bfknn",
"build_param": {},
"file": "index/glove-50-angular/raft_bfknn/bfknn",
"search_params": [
{
"probe": 1
}
],
"search_result_file": "result/glove-50-angular/raft_bfknn/bfknn"
},
{
"name": "faiss_gpu_ivf_flat.nlist1024",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 1024
},
"file": "index/glove-50-angular/faiss_gpu_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_ivf_flat/nlist1024"
},
{
"name": "faiss_gpu_ivf_flat.nlist2048",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 2048
},
"file": "index/glove-50-angular/faiss_gpu_ivf_flat/nlist2048",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_ivf_flat/nlist2048"
},
{
"name": "faiss_gpu_ivf_flat.nlist4096",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 4096
},
"file": "index/glove-50-angular/faiss_gpu_ivf_flat/nlist4096",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_ivf_flat/nlist4096"
},
{
"name": "faiss_gpu_ivf_flat.nlist8192",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 8192
},
"file": "index/glove-50-angular/faiss_gpu_ivf_flat/nlist8192",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_ivf_flat/nlist8192"
},
{
"name": "faiss_gpu_ivf_flat.nlist16384",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 16384
},
"file": "index/glove-50-angular/faiss_gpu_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_ivf_flat/nlist16384"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": true
},
"file": "index/glove-50-angular/faiss_gpu_ivf_pq/M64-nlist1024",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024.noprecomp",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": false
},
"file": "index/glove-50-angular/faiss_gpu_ivf_pq/M64-nlist1024.noprecomp",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "fp16"
},
"file": "index/glove-50-angular/faiss_gpu_ivf_sq/nlist1024-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_ivf_sq/nlist1024-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "fp16"
},
"file": "index/glove-50-angular/faiss_gpu_ivf_sq/nlist2048-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_ivf_sq/nlist2048-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "fp16"
},
"file": "index/glove-50-angular/faiss_gpu_ivf_sq/nlist4096-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_ivf_sq/nlist4096-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "fp16"
},
"file": "index/glove-50-angular/faiss_gpu_ivf_sq/nlist8192-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_ivf_sq/nlist8192-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "fp16"
},
"file": "index/glove-50-angular/faiss_gpu_ivf_sq/nlist16384-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_ivf_sq/nlist16384-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "int8"
},
"file": "index/glove-50-angular/faiss_gpu_ivf_sq/nlist1024-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_ivf_sq/nlist1024-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "int8"
},
"file": "index/glove-50-angular/faiss_gpu_ivf_sq/nlist2048-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_ivf_sq/nlist2048-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "int8"
},
"file": "index/glove-50-angular/faiss_gpu_ivf_sq/nlist4096-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_ivf_sq/nlist4096-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "int8"
},
"file": "index/glove-50-angular/faiss_gpu_ivf_sq/nlist8192-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_ivf_sq/nlist8192-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "int8"
},
"file": "index/glove-50-angular/faiss_gpu_ivf_sq/nlist16384-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_ivf_sq/nlist16384-int8"
},
{
"name": "faiss_gpu_flat",
"algo": "faiss_gpu_flat",
"build_param": {},
"file": "index/glove-50-angular/faiss_gpu_flat/flat",
"search_params": [
{}
],
"search_result_file": "result/glove-50-angular/faiss_gpu_flat/flat"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-angular/raft_ivf_pq/dimpq128-cluster1024",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
}
],
"search_result_file": "result/glove-50-angular/raft_ivf_pq/dimpq128-cluster1024"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-angular/raft_ivf_pq/dimpq128-cluster1024-float-float",
"search_params": [
{
"k": 10,
"nprobe": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 5,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/glove-50-angular/raft_ivf_pq/dimpq128-cluster1024-float-float"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-angular/raft_ivf_pq/dimpq128-cluster1024-float-half",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/glove-50-angular/raft_ivf_pq/dimpq128-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-angular/raft_ivf_pq/dimpq128-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/glove-50-angular/raft_ivf_pq/dimpq128-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-angular/raft_ivf_pq/dimpq64-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/glove-50-angular/raft_ivf_pq/dimpq64-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-angular/raft_ivf_pq/dimpq64-cluster1024-float-half",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/glove-50-angular/raft_ivf_pq/dimpq64-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq32-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 32,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-angular/raft_ivf_pq/dimpq32-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/glove-50-angular/raft_ivf_pq/dimpq32-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq16-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 16,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-angular/raft_ivf_pq/dimpq16-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/glove-50-angular/raft_ivf_pq/dimpq16-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-half-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-angular/raft_ivf_pq/dimpq128-cluster1024-half-float",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
}
],
"search_result_file": "result/glove-50-angular/raft_ivf_pq/dimpq128-cluster1024-half-float"
},
{
"name": "raft_ivf_pq.dimpq512-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 512,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-angular/raft_ivf_pq/dimpq512-cluster1024-float-float",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/glove-50-angular/raft_ivf_pq/dimpq512-cluster1024-float-float"
},
{
"name": "raft_ivf_flat.nlist1024",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 1024,
"ratio": 1,
"niter": 25
},
"file": "index/glove-50-angular/raft_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-50-angular/raft_ivf_flat/nlist1024"
},
{
"name": "raft_ivf_flat.nlist16384",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 16384,
"ratio": 2,
"niter": 20
},
"file": "index/glove-50-angular/raft_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/glove-50-angular/raft_ivf_flat/nlist16384"
},
{
"name" : "raft_cagra.dim32",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 32
},
"file" : "index/glove-50-angular/raft_cagra/dim32",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/glove-50-angular/raft_cagra/dim32"
},
{
"name" : "raft_cagra.dim64",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 64
},
"file" : "index/glove-50-angular/raft_cagra/dim64",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/glove-50-angular/raft_cagra/dim64"
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/nytimes-256-inner.json | {
"dataset": {
"name": "nytimes-256-inner",
"base_file": "nytimes-256-inner/base.fbin",
"query_file": "nytimes-256-inner/query.fbin",
"groundtruth_neighbors_file": "nytimes-256-inner/groundtruth.neighbors.ibin",
"distance": "euclidean"
},
"search_basic_param": {
"batch_size": 5000,
"k": 10,
"run_count": 3
},
"index": [
{
"name" : "hnswlib.M12",
"algo" : "hnswlib",
"build_param": {"M":12, "efConstruction":500, "numThreads":32},
"file" : "index/nytimes-256-inner/hnswlib/M12",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/nytimes-256-inner/hnswlib/M12"
},
{
"name" : "hnswlib.M16",
"algo" : "hnswlib",
"build_param": {"M":16, "efConstruction":500, "numThreads":32},
"file" : "index/nytimes-256-inner/hnswlib/M16",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/nytimes-256-inner/hnswlib/M16"
},
{
"name" : "hnswlib.M24",
"algo" : "hnswlib",
"build_param": {"M":24, "efConstruction":500, "numThreads":32},
"file" : "index/nytimes-256-inner/hnswlib/M24",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/nytimes-256-inner/hnswlib/M24"
},
{
"name" : "hnswlib.M36",
"algo" : "hnswlib",
"build_param": {"M":36, "efConstruction":500, "numThreads":32},
"file" : "index/nytimes-256-inner/hnswlib/M36",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/nytimes-256-inner/hnswlib/M36"
},
{
"name": "raft_bfknn",
"algo": "raft_bfknn",
"build_param": {},
"file": "index/nytimes-256-inner/raft_bfknn/bfknn",
"search_params": [
{
"probe": 1
}
],
"search_result_file": "result/nytimes-256-inner/raft_bfknn/bfknn"
},
{
"name": "faiss_ivf_flat.nlist1024",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 1024
},
"file": "index/nytimes-256-inner/faiss_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-inner/faiss_ivf_flat/nlist1024"
},
{
"name": "faiss_ivf_flat.nlist2048",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 2048
},
"file": "index/nytimes-256-inner/faiss_ivf_flat/nlist2048",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-inner/faiss_ivf_flat/nlist2048"
},
{
"name": "faiss_ivf_flat.nlist4096",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 4096
},
"file": "index/nytimes-256-inner/faiss_ivf_flat/nlist4096",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-inner/faiss_ivf_flat/nlist4096"
},
{
"name": "faiss_ivf_flat.nlist8192",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 8192
},
"file": "index/nytimes-256-inner/faiss_ivf_flat/nlist8192",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-inner/faiss_ivf_flat/nlist8192"
},
{
"name": "faiss_ivf_flat.nlist16384",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 16384
},
"file": "index/nytimes-256-inner/faiss_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/nytimes-256-inner/faiss_ivf_flat/nlist16384"
},
{
"name": "faiss_ivf_pq.M64-nlist1024",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": true
},
"file": "index/nytimes-256-inner/faiss_ivf_pq/M64-nlist1024",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-inner/faiss_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_ivf_pq.M64-nlist1024.noprecomp",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": false
},
"file": "index/nytimes-256-inner/faiss_ivf_pq/M64-nlist1024.noprecomp",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-inner/faiss_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_ivf_sq.nlist1024-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "fp16"
},
"file": "index/nytimes-256-inner/faiss_ivf_sq/nlist1024-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-inner/faiss_ivf_sq/nlist1024-fp16"
},
{
"name": "faiss_ivf_sq.nlist2048-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "fp16"
},
"file": "index/nytimes-256-inner/faiss_ivf_sq/nlist2048-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-inner/faiss_ivf_sq/nlist2048-fp16"
},
{
"name": "faiss_ivf_sq.nlist4096-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "fp16"
},
"file": "index/nytimes-256-inner/faiss_ivf_sq/nlist4096-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-inner/faiss_ivf_sq/nlist4096-fp16"
},
{
"name": "faiss_ivf_sq.nlist8192-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "fp16"
},
"file": "index/nytimes-256-inner/faiss_ivf_sq/nlist8192-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-inner/faiss_ivf_sq/nlist8192-fp16"
},
{
"name": "faiss_ivf_sq.nlist16384-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "fp16"
},
"file": "index/nytimes-256-inner/faiss_ivf_sq/nlist16384-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/nytimes-256-inner/faiss_ivf_sq/nlist16384-fp16"
},
{
"name": "faiss_ivf_sq.nlist1024-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "int8"
},
"file": "index/nytimes-256-inner/faiss_ivf_sq/nlist1024-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-inner/faiss_ivf_sq/nlist1024-int8"
},
{
"name": "faiss_ivf_sq.nlist2048-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "int8"
},
"file": "index/nytimes-256-inner/faiss_ivf_sq/nlist2048-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-inner/faiss_ivf_sq/nlist2048-int8"
},
{
"name": "faiss_ivf_sq.nlist4096-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "int8"
},
"file": "index/nytimes-256-inner/faiss_ivf_sq/nlist4096-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-inner/faiss_ivf_sq/nlist4096-int8"
},
{
"name": "faiss_ivf_sq.nlist8192-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "int8"
},
"file": "index/nytimes-256-inner/faiss_ivf_sq/nlist8192-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-inner/faiss_ivf_sq/nlist8192-int8"
},
{
"name": "faiss_ivf_sq.nlist16384-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "int8"
},
"file": "index/nytimes-256-inner/faiss_ivf_sq/nlist16384-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/nytimes-256-inner/faiss_ivf_sq/nlist16384-int8"
},
{
"name": "faiss_flat",
"algo": "faiss_gpu_flat",
"build_param": {},
"file": "index/nytimes-256-inner/faiss_flat/flat",
"search_params": [
{}
],
"search_result_file": "result/nytimes-256-inner/faiss_flat/flat"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-inner/raft_ivf_pq/dimpq128-cluster1024",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
}
],
"search_result_file": "result/nytimes-256-inner/raft_ivf_pq/dimpq128-cluster1024"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-inner/raft_ivf_pq/dimpq128-cluster1024-float-float",
"search_params": [
{
"k": 10,
"numProbes": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 5,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/nytimes-256-inner/raft_ivf_pq/dimpq128-cluster1024-float-float"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-inner/raft_ivf_pq/dimpq128-cluster1024-float-half",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/nytimes-256-inner/raft_ivf_pq/dimpq128-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-inner/raft_ivf_pq/dimpq128-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/nytimes-256-inner/raft_ivf_pq/dimpq128-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-inner/raft_ivf_pq/dimpq64-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/nytimes-256-inner/raft_ivf_pq/dimpq64-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-inner/raft_ivf_pq/dimpq64-cluster1024-float-half",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/nytimes-256-inner/raft_ivf_pq/dimpq64-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq32-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 32,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-inner/raft_ivf_pq/dimpq32-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/nytimes-256-inner/raft_ivf_pq/dimpq32-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq16-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 16,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-inner/raft_ivf_pq/dimpq16-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/nytimes-256-inner/raft_ivf_pq/dimpq16-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-half-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-inner/raft_ivf_pq/dimpq128-cluster1024-half-float",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
}
],
"search_result_file": "result/nytimes-256-inner/raft_ivf_pq/dimpq128-cluster1024-half-float"
},
{
"name": "raft_ivf_pq.dimpq512-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 512,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-inner/raft_ivf_pq/dimpq512-cluster1024-float-float",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/nytimes-256-inner/raft_ivf_pq/dimpq512-cluster1024-float-float"
},
{
"name": "raft_ivf_flat.nlist1024",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 1024,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-inner/raft_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-inner/raft_ivf_flat/nlist1024"
},
{
"name": "raft_ivf_flat.nlist16384",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 16384,
"ratio": 2,
"niter": 20
},
"file": "index/nytimes-256-inner/raft_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/nytimes-256-inner/raft_ivf_flat/nlist16384"
},
{
"name" : "raft_cagra.dim32",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 32
},
"file" : "index/nytimes-256-inner/raft_cagra/dim32",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/nytimes-256-inner/raft_cagra/dim32"
},
{
"name" : "raft_cagra.dim64",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 64
},
"file" : "index/nytimes-256-inner/raft_cagra/dim64",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/nytimes-256-inner/raft_cagra/dim64"
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/lastfm-65-angular.json | {
"dataset": {
"name": "lastfm-65-angular",
"base_file": "lastfm-65-angular/base.fbin",
"query_file": "lastfm-65-angular/query.fbin",
"distance": "euclidean"
},
"search_basic_param": {
"batch_size": 5000,
"k": 10,
"run_count": 3
},
"index": [
{
"name" : "hnswlib.M12",
"algo" : "hnswlib",
"build_param": {"M":12, "efConstruction":500, "numThreads":32},
"file" : "index/lastfm-65-angular/hnswlib/M12",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/lastfm-65-angular/hnswlib/M12"
},
{
"name" : "hnswlib.M16",
"algo" : "hnswlib",
"build_param": {"M":16, "efConstruction":500, "numThreads":32},
"file" : "index/lastfm-65-angular/hnswlib/M16",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/lastfm-65-angular/hnswlib/M16"
},
{
"name" : "hnswlib.M24",
"algo" : "hnswlib",
"build_param": {"M":24, "efConstruction":500, "numThreads":32},
"file" : "index/lastfm-65-angular/hnswlib/M24",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/lastfm-65-angular/hnswlib/M24"
},
{
"name" : "hnswlib.M36",
"algo" : "hnswlib",
"build_param": {"M":36, "efConstruction":500, "numThreads":32},
"file" : "index/lastfm-65-angular/hnswlib/M36",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/lastfm-65-angular/hnswlib/M36"
},
{
"name": "raft_bfknn",
"algo": "raft_bfknn",
"build_param": {},
"file": "index/lastfm-65-angular/raft_bfknn/bfknn",
"search_params": [
{
"probe": 1
}
],
"search_result_file": "result/lastfm-65-angular/raft_bfknn/bfknn"
},
{
"name": "faiss_gpu_ivf_flat.nlist1024",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 1024
},
"file": "index/lastfm-65-angular/faiss_gpu_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_ivf_flat/nlist1024"
},
{
"name": "faiss_gpu_ivf_flat.nlist2048",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 2048
},
"file": "index/lastfm-65-angular/faiss_gpu_ivf_flat/nlist2048",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_ivf_flat/nlist2048"
},
{
"name": "faiss_gpu_ivf_flat.nlist4096",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 4096
},
"file": "index/lastfm-65-angular/faiss_gpu_ivf_flat/nlist4096",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_ivf_flat/nlist4096"
},
{
"name": "faiss_gpu_ivf_flat.nlist8192",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 8192
},
"file": "index/lastfm-65-angular/faiss_gpu_ivf_flat/nlist8192",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_ivf_flat/nlist8192"
},
{
"name": "faiss_gpu_ivf_flat.nlist16384",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 16384
},
"file": "index/lastfm-65-angular/faiss_gpu_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_ivf_flat/nlist16384"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": true
},
"file": "index/lastfm-65-angular/faiss_gpu_ivf_pq/M64-nlist1024",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024.noprecomp",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": false
},
"file": "index/lastfm-65-angular/faiss_gpu_ivf_pq/M64-nlist1024.noprecomp",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "fp16"
},
"file": "index/lastfm-65-angular/faiss_gpu_ivf_sq/nlist1024-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_ivf_sq/nlist1024-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "fp16"
},
"file": "index/lastfm-65-angular/faiss_gpu_ivf_sq/nlist2048-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_ivf_sq/nlist2048-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "fp16"
},
"file": "index/lastfm-65-angular/faiss_gpu_ivf_sq/nlist4096-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_ivf_sq/nlist4096-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "fp16"
},
"file": "index/lastfm-65-angular/faiss_gpu_ivf_sq/nlist8192-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_ivf_sq/nlist8192-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "fp16"
},
"file": "index/lastfm-65-angular/faiss_gpu_ivf_sq/nlist16384-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_ivf_sq/nlist16384-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "int8"
},
"file": "index/lastfm-65-angular/faiss_gpu_ivf_sq/nlist1024-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_ivf_sq/nlist1024-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "int8"
},
"file": "index/lastfm-65-angular/faiss_gpu_ivf_sq/nlist2048-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_ivf_sq/nlist2048-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "int8"
},
"file": "index/lastfm-65-angular/faiss_gpu_ivf_sq/nlist4096-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_ivf_sq/nlist4096-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "int8"
},
"file": "index/lastfm-65-angular/faiss_gpu_ivf_sq/nlist8192-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_ivf_sq/nlist8192-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "int8"
},
"file": "index/lastfm-65-angular/faiss_gpu_ivf_sq/nlist16384-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_ivf_sq/nlist16384-int8"
},
{
"name": "faiss_gpu_flat",
"algo": "faiss_gpu_flat",
"build_param": {},
"file": "index/lastfm-65-angular/faiss_gpu_flat/flat",
"search_params": [
{}
],
"search_result_file": "result/lastfm-65-angular/faiss_gpu_flat/flat"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/lastfm-65-angular/raft_ivf_pq/dimpq128-cluster1024",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
}
],
"search_result_file": "result/lastfm-65-angular/raft_ivf_pq/dimpq128-cluster1024"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/lastfm-65-angular/raft_ivf_pq/dimpq128-cluster1024-float-float",
"search_params": [
{
"k": 10,
"numProbes": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 5,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/lastfm-65-angular/raft_ivf_pq/dimpq128-cluster1024-float-float"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/lastfm-65-angular/raft_ivf_pq/dimpq128-cluster1024-float-half",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/lastfm-65-angular/raft_ivf_pq/dimpq128-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/lastfm-65-angular/raft_ivf_pq/dimpq128-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/lastfm-65-angular/raft_ivf_pq/dimpq128-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/lastfm-65-angular/raft_ivf_pq/dimpq64-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/lastfm-65-angular/raft_ivf_pq/dimpq64-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/lastfm-65-angular/raft_ivf_pq/dimpq64-cluster1024-float-half",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/lastfm-65-angular/raft_ivf_pq/dimpq64-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq32-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 32,
"ratio": 1,
"niter": 25
},
"file": "index/lastfm-65-angular/raft_ivf_pq/dimpq32-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/lastfm-65-angular/raft_ivf_pq/dimpq32-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq16-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 16,
"ratio": 1,
"niter": 25
},
"file": "index/lastfm-65-angular/raft_ivf_pq/dimpq16-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/lastfm-65-angular/raft_ivf_pq/dimpq16-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-half-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/lastfm-65-angular/raft_ivf_pq/dimpq128-cluster1024-half-float",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
}
],
"search_result_file": "result/lastfm-65-angular/raft_ivf_pq/dimpq128-cluster1024-half-float"
},
{
"name": "raft_ivf_pq.dimpq512-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 512,
"ratio": 1,
"niter": 25
},
"file": "index/lastfm-65-angular/raft_ivf_pq/dimpq512-cluster1024-float-float",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/lastfm-65-angular/raft_ivf_pq/dimpq512-cluster1024-float-float"
},
{
"name": "raft_ivf_flat.nlist1024",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 1024,
"ratio": 1,
"niter": 25
},
"file": "index/lastfm-65-angular/raft_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/lastfm-65-angular/raft_ivf_flat/nlist1024"
},
{
"name": "raft_ivf_flat.nlist16384",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 16384,
"ratio": 2,
"niter": 20
},
"file": "index/lastfm-65-angular/raft_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/lastfm-65-angular/raft_ivf_flat/nlist16384"
},
{
"name" : "raft_cagra.dim32",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 32
},
"file" : "index/lastfm-65-angular/raft_cagra/dim32",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/lastfm-65-angular/raft_cagra/dim32"
},
{
"name" : "raft_cagra.dim64",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 64
},
"file" : "index/lastfm-65-angular/raft_cagra/dim64",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/lastfm-65-angular/raft_cagra/dim64"
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/nytimes-256-angular.json | {
"dataset": {
"name": "nytimes-256-angular",
"base_file": "nytimes-256-angular/base.fbin",
"query_file": "nytimes-256-angular/query.fbin",
"groundtruth_neighbors_file": "nytimes-256-angular/groundtruth.neighbors.ibin",
"distance": "euclidean"
},
"search_basic_param": {
"batch_size": 5000,
"k": 10,
"run_count": 3
},
"index": [
{
"name" : "hnswlib.M12",
"algo" : "hnswlib",
"build_param": {"M":12, "efConstruction":500, "numThreads":32},
"file" : "index/nytimes-256-angular/hnswlib/M12",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/nytimes-256-angular/hnswlib/M12"
},
{
"name" : "hnswlib.M16",
"algo" : "hnswlib",
"build_param": {"M":16, "efConstruction":500, "numThreads":32},
"file" : "index/nytimes-256-angular/hnswlib/M16",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/nytimes-256-angular/hnswlib/M16"
},
{
"name" : "hnswlib.M24",
"algo" : "hnswlib",
"build_param": {"M":24, "efConstruction":500, "numThreads":32},
"file" : "index/nytimes-256-angular/hnswlib/M24",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/nytimes-256-angular/hnswlib/M24"
},
{
"name" : "hnswlib.M36",
"algo" : "hnswlib",
"build_param": {"M":36, "efConstruction":500, "numThreads":32},
"file" : "index/nytimes-256-angular/hnswlib/M36",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/nytimes-256-angular/hnswlib/M36"
},
{
"name": "raft_bfknn",
"algo": "raft_bfknn",
"build_param": {},
"file": "index/nytimes-256-angular/raft_bfknn/bfknn",
"search_params": [
{
"probe": 1
}
],
"search_result_file": "result/nytimes-256-angular/raft_bfknn/bfknn"
},
{
"name": "faiss_gpu_ivf_flat.nlist1024",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 1024
},
"file": "index/nytimes-256-angular/faiss_gpu_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_ivf_flat/nlist1024"
},
{
"name": "faiss_gpu_ivf_flat.nlist2048",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 2048
},
"file": "index/nytimes-256-angular/faiss_gpu_ivf_flat/nlist2048",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_ivf_flat/nlist2048"
},
{
"name": "faiss_gpu_ivf_flat.nlist4096",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 4096
},
"file": "index/nytimes-256-angular/faiss_gpu_ivf_flat/nlist4096",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_ivf_flat/nlist4096"
},
{
"name": "faiss_gpu_ivf_flat.nlist8192",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 8192
},
"file": "index/nytimes-256-angular/faiss_gpu_ivf_flat/nlist8192",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_ivf_flat/nlist8192"
},
{
"name": "faiss_gpu_ivf_flat.nlist16384",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 16384
},
"file": "index/nytimes-256-angular/faiss_gpu_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_ivf_flat/nlist16384"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": true
},
"file": "index/nytimes-256-angular/faiss_gpu_ivf_pq/M64-nlist1024",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024.noprecomp",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": false
},
"file": "index/nytimes-256-angular/faiss_gpu_ivf_pq/M64-nlist1024.noprecomp",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "fp16"
},
"file": "index/nytimes-256-angular/faiss_gpu_ivf_sq/nlist1024-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_ivf_sq/nlist1024-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "fp16"
},
"file": "index/nytimes-256-angular/faiss_gpu_ivf_sq/nlist2048-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_ivf_sq/nlist2048-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "fp16"
},
"file": "index/nytimes-256-angular/faiss_gpu_ivf_sq/nlist4096-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_ivf_sq/nlist4096-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "fp16"
},
"file": "index/nytimes-256-angular/faiss_gpu_ivf_sq/nlist8192-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_ivf_sq/nlist8192-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "fp16"
},
"file": "index/nytimes-256-angular/faiss_gpu_ivf_sq/nlist16384-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_ivf_sq/nlist16384-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "int8"
},
"file": "index/nytimes-256-angular/faiss_gpu_ivf_sq/nlist1024-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_ivf_sq/nlist1024-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "int8"
},
"file": "index/nytimes-256-angular/faiss_gpu_ivf_sq/nlist2048-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_ivf_sq/nlist2048-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "int8"
},
"file": "index/nytimes-256-angular/faiss_gpu_ivf_sq/nlist4096-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_ivf_sq/nlist4096-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "int8"
},
"file": "index/nytimes-256-angular/faiss_gpu_ivf_sq/nlist8192-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_ivf_sq/nlist8192-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "int8"
},
"file": "index/nytimes-256-angular/faiss_gpu_ivf_sq/nlist16384-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_ivf_sq/nlist16384-int8"
},
{
"name": "faiss_gpu_flat",
"algo": "faiss_gpu_flat",
"build_param": {},
"file": "index/nytimes-256-angular/faiss_gpu_flat/flat",
"search_params": [
{}
],
"search_result_file": "result/nytimes-256-angular/faiss_gpu_flat/flat"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-angular/raft_ivf_pq/dimpq128-cluster1024",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
}
],
"search_result_file": "result/nytimes-256-angular/raft_ivf_pq/dimpq128-cluster1024"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-angular/raft_ivf_pq/dimpq128-cluster1024-float-float",
"search_params": [
{
"k": 10,
"numProbes": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 5,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/nytimes-256-angular/raft_ivf_pq/dimpq128-cluster1024-float-float"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-angular/raft_ivf_pq/dimpq128-cluster1024-float-half",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/nytimes-256-angular/raft_ivf_pq/dimpq128-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-angular/raft_ivf_pq/dimpq128-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/nytimes-256-angular/raft_ivf_pq/dimpq128-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-angular/raft_ivf_pq/dimpq64-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/nytimes-256-angular/raft_ivf_pq/dimpq64-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-angular/raft_ivf_pq/dimpq64-cluster1024-float-half",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/nytimes-256-angular/raft_ivf_pq/dimpq64-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq32-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 32,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-angular/raft_ivf_pq/dimpq32-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/nytimes-256-angular/raft_ivf_pq/dimpq32-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq16-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 16,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-angular/raft_ivf_pq/dimpq16-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/nytimes-256-angular/raft_ivf_pq/dimpq16-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-half-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-angular/raft_ivf_pq/dimpq128-cluster1024-half-float",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
}
],
"search_result_file": "result/nytimes-256-angular/raft_ivf_pq/dimpq128-cluster1024-half-float"
},
{
"name": "raft_ivf_pq.dimpq512-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 512,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-angular/raft_ivf_pq/dimpq512-cluster1024-float-float",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/nytimes-256-angular/raft_ivf_pq/dimpq512-cluster1024-float-float"
},
{
"name": "raft_ivf_flat.nlist1024",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 1024,
"ratio": 1,
"niter": 25
},
"file": "index/nytimes-256-angular/raft_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/nytimes-256-angular/raft_ivf_flat/nlist1024"
},
{
"name": "raft_ivf_flat.nlist16384",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 16384,
"ratio": 2,
"niter": 20
},
"file": "index/nytimes-256-angular/raft_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/nytimes-256-angular/raft_ivf_flat/nlist16384"
},
{
"name" : "raft_cagra.dim32",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 32
},
"file" : "index/nytimes-256-angular/raft_cagra/dim32",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/nytimes-256-angular/raft_cagra/dim32"
},
{
"name" : "raft_cagra.dim64",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 64
},
"file" : "index/nytimes-256-angular/raft_cagra/dim64",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/nytimes-256-angular/raft_cagra/dim64"
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/wiki_all_88M.json | {
"dataset": {
"name": "wiki_all_88M",
"base_file": "wiki_all_88M/base.88M.fbin",
"query_file": "wiki_all_88M/queries.fbin",
"groundtruth_neighbors_file": "wiki_all_88M/groundtruth.88M.neighbors.ibin",
"distance": "euclidean"
},
"search_basic_param": {
"batch_size": 10000,
"k": 10
},
"index": [
{
"name": "hnswlib.M16.ef50",
"algo": "hnswlib",
"build_param": { "M": 16, "efConstruction": 50, "numThreads": 56 },
"file": "wiki_all_88M/hnswlib/M16.ef50",
"search_params": [
{ "ef": 10, "numThreads": 56 },
{ "ef": 20, "numThreads": 56 },
{ "ef": 40, "numThreads": 56 },
{ "ef": 60, "numThreads": 56 },
{ "ef": 80, "numThreads": 56 },
{ "ef": 120, "numThreads": 56 },
{ "ef": 200, "numThreads": 56 },
{ "ef": 400, "numThreads": 56 },
{ "ef": 600, "numThreads": 56 },
{ "ef": 800, "numThreads": 56 }
]
},
{
"name": "faiss_ivf_pq.M32-nlist16K",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"M": 32,
"nlist": 16384,
"ratio": 2
},
"file": "wiki_all_88M/faiss_ivf_pq/M32-nlist16K_ratio2",
"search_params": [
{ "nprobe": 10 },
{ "nprobe": 20 },
{ "nprobe": 30 },
{ "nprobe": 40 },
{ "nprobe": 50 },
{ "nprobe": 100 },
{ "nprobe": 200 },
{ "nprobe": 500 }
]
},
{
"name": "faiss_ivf_pq.M64-nlist16K",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"M": 64,
"nlist": 16384,
"ratio": 2
},
"file": "wiki_all_88M/faiss_ivf_pq/M64-nlist16K_ratio2",
"search_params": [
{ "nprobe": 10 },
{ "nprobe": 20 },
{ "nprobe": 30 },
{ "nprobe": 40 },
{ "nprobe": 50 },
{ "nprobe": 100 },
{ "nprobe": 200 },
{ "nprobe": 500 }
]
},
{
"name": "raft_ivf_pq.d128-nlist16K",
"algo": "raft_ivf_pq",
"build_param": {
"pq_dim": 128,
"pq_bits": 8,
"nlist": 16384,
"niter": 10,
"ratio": 10
},
"file": "wiki_all_88M/raft_ivf_pq/d128-nlist16K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 }
]
},
{
"name": "raft_ivf_pq.d64-nlist16K",
"algo": "raft_ivf_pq",
"build_param": {
"pq_dim": 64,
"pq_bits": 8,
"nlist": 16384,
"niter": 10,
"ratio": 10
},
"file": "wiki_all_88M/raft_ivf_pq/d64-nlist16K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 }
]
},
{
"name": "raft_ivf_pq.d32-nlist16K",
"algo": "raft_ivf_pq",
"build_param": {
"pq_dim": 32,
"pq_bits": 8,
"nlist": 16384,
"niter": 10,
"ratio": 10
},
"file": "wiki_all_88M/raft_ivf_pq/d32-nlist16K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 }
]
},
{
"name": "raft_ivf_pq.d32X-nlist16K",
"algo": "raft_ivf_pq",
"build_param": {
"pq_dim": 32,
"pq_bits": 8,
"nlist": 16384,
"niter": 10,
"ratio": 10
},
"file": "wiki_all_88M/raft_ivf_pq/d32-nlist16K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 }
]
},
{
"name": "raft_cagra.dim32.multi_cta",
"algo": "raft_cagra",
"build_param": { "graph_degree": 32, "intermediate_graph_degree": 48 },
"file": "wiki_all_88M/raft_cagra/dim32.ibin",
"search_params": [
{ "itopk": 32, "search_width": 1, "max_iterations": 0, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 1, "max_iterations": 32, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 1, "max_iterations": 36, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 1, "max_iterations": 40, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 1, "max_iterations": 44, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 1, "max_iterations": 48, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 2, "max_iterations": 16, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 2, "max_iterations": 24, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 2, "max_iterations": 26, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 2, "max_iterations": 32, "algo": "multi_cta" },
{ "itopk": 64, "search_width": 4, "max_iterations": 16, "algo": "multi_cta" },
{ "itopk": 64, "search_width": 1, "max_iterations": 64, "algo": "multi_cta" },
{ "itopk": 96, "search_width": 2, "max_iterations": 48, "algo": "multi_cta" },
{ "itopk": 128, "search_width": 8, "max_iterations": 16, "algo": "multi_cta" },
{ "itopk": 128, "search_width": 2, "max_iterations": 64, "algo": "multi_cta" },
{ "itopk": 192, "search_width": 8, "max_iterations": 24, "algo": "multi_cta" },
{ "itopk": 192, "search_width": 2, "max_iterations": 96, "algo": "multi_cta" },
{ "itopk": 256, "search_width": 8, "max_iterations": 32, "algo": "multi_cta" },
{ "itopk": 384, "search_width": 8, "max_iterations": 48, "algo": "multi_cta" },
{ "itopk": 512, "search_width": 8, "max_iterations": 64, "algo": "multi_cta" }
]
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/glove-100-inner.json | {
"dataset": {
"name": "glove-100-inner",
"base_file": "glove-100-inner/base.fbin",
"query_file": "glove-100-inner/query.fbin",
"distance": "euclidean"
},
"search_basic_param": {
"batch_size": 5000,
"k": 10,
"run_count": 3
},
"index": [
{
"name" : "hnswlib.M12",
"algo" : "hnswlib",
"build_param": {"M":12, "efConstruction":500, "numThreads":32},
"file" : "index/glove-100-inner/hnswlib/M12",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/glove-100-inner/hnswlib/M12"
},
{
"name" : "hnswlib.M16",
"algo" : "hnswlib",
"build_param": {"M":16, "efConstruction":500, "numThreads":32},
"file" : "index/glove-100-inner/hnswlib/M16",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/glove-100-inner/hnswlib/M16"
},
{
"name" : "hnswlib.M24",
"algo" : "hnswlib",
"build_param": {"M":24, "efConstruction":500, "numThreads":32},
"file" : "index/glove-100-inner/hnswlib/M24",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/glove-100-inner/hnswlib/M24"
},
{
"name" : "hnswlib.M36",
"algo" : "hnswlib",
"build_param": {"M":36, "efConstruction":500, "numThreads":32},
"file" : "index/glove-100-inner/hnswlib/M36",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/glove-100-inner/hnswlib/M36"
},
{
"name": "raft_bfknn",
"algo": "raft_bfknn",
"build_param": {},
"file": "index/glove-100-inner/raft_bfknn/bfknn",
"search_params": [
{
"probe": 1
}
],
"search_result_file": "result/glove-100-inner/raft_bfknn/bfknn"
},
{
"name": "faiss_gpu_ivf_flat.nlist1024",
"algo": "faiss_gpu_ivf_flat",
"build_param": {"nlist":1024},
"file": "glove-100-inner/faiss_gpu_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-inner/faiss_ivf_flat/nlist1024"
},
{
"name": "faiss_gpu_ivf_flat.nlist2048",
"algo": "faiss_gpu_ivf_flat",
"build_param": {"nlist":2048},
"file": "glove-100-inner/faiss_gpu_ivf_flat/nlist2048",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-inner/faiss_ivf_flat/nlist2048"
},
{
"name": "faiss_gpu_ivf_flat.nlist4096",
"algo": "faiss_gpu_ivf_flat",
"build_param": {"nlist":4096},
"file": "glove-100-inner/faiss_gpu_ivf_flat/nlist4096",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-inner/faiss_ivf_flat/nlist4096"
},
{
"name": "faiss_gpu_ivf_flat.nlist8192",
"algo": "faiss_gpu_ivf_flat",
"build_param": {"nlist":8192},
"file": "glove-100-inner/faiss_gpu_ivf_flat/nlist8192",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-inner/faiss_ivf_flat/nlist8192"
},
{
"name": "faiss_gpu_ivf_flat.nlist16384",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 16384
},
"file": "index/glove-100-inner/faiss_gpu_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/glove-100-inner/faiss_gpu_ivf_flat/nlist16384"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": true
},
"file": "index/glove-100-inner/faiss_gpu_ivf_pq/M64-nlist1024",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-inner/faiss_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024.noprecomp",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": false
},
"file": "index/glove-100-inner/faiss_gpu_ivf_pq/M64-nlist1024.noprecomp",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-inner/faiss_gpu_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "fp16"
},
"file": "index/glove-100-inner/faiss_gpu_ivf_sq/nlist1024-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-inner/faiss_gpu_ivf_sq/nlist1024-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist":2048, "quantizer_type":"fp16"},
"file": "glove-100-inner/faiss_gpu_ivf_sq/nlist2048-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-inner/faiss_ivf_sq/nlist2048-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist":4096, "quantizer_type":"fp16"},
"file": "glove-100-inner/faiss_gpu_ivf_sq/nlist4096-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-inner/faiss_ivf_sq/nlist4096-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist":8192, "quantizer_type":"fp16"},
"file": "glove-100-inner/faiss_gpu_ivf_sq/nlist8192-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-inner/faiss_ivf_sq/nlist8192-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist":16384, "quantizer_type":"fp16"},
"file": "glove-100-inner/faiss_gpu_ivf_sq/nlist16384-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/glove-100-inner/faiss_ivf_sq/nlist16384-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist":1024, "quantizer_type":"int8"},
"file": "glove-100-inner/faiss_gpu_ivf_sq/nlist1024-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-inner/faiss_ivf_sq/nlist1024-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist":2048, "quantizer_type":"int8"},
"file": "glove-100-inner/faiss_gpu_ivf_sq/nlist2048-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-inner/faiss_ivf_sq/nlist2048-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist":4096, "quantizer_type":"int8"},
"file": "glove-100-inner/faiss_gpu_ivf_sq/nlist4096-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-inner/faiss_ivf_sq/nlist4096-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist":8192, "quantizer_type":"int8"},
"file": "glove-100-inner/faiss_gpu_ivf_sq/nlist8192-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-inner/faiss_ivf_sq/nlist8192-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist":16384, "quantizer_type":"int8"},
"file": "glove-100-inner/faiss_gpu_ivf_sq/nlist16384-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/glove-100-inner/faiss_ivf_sq/nlist16384-int8"
},
{
"name": "faiss_gpu_flat",
"algo": "faiss_gpu_flat",
"build_param": {},
"file": "glove-100-inner/faiss_gpu_flat/flat",
"search_params": [{}],
"search_result_file": "result/glove-100-inner/faiss_gpu_flat/flat"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-inner/raft_ivf_pq/dimpq128-cluster1024",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
}
],
"search_result_file": "result/glove-100-inner/raft_gpu_ivf_pq/dimpq128-cluster1024"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-inner/raft_ivf_pq/dimpq128-cluster1024-float-float",
"search_params": [
{
"k": 10,
"nprobe": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 5,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/glove-100-inner/raft_ivf_pq/dimpq128-cluster1024-float-float"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-inner/raft_ivf_pq/dimpq128-cluster1024-float-half",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/glove-100-inner/raft_ivf_pq/dimpq128-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-inner/raft_ivf_pq/dimpq128-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/glove-100-inner/raft_ivf_pq/dimpq128-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-inner/raft_ivf_pq/dimpq64-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/glove-100-inner/raft_ivf_pq/dimpq64-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-inner/raft_ivf_pq/dimpq64-cluster1024-float-half",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/glove-100-inner/raft_ivf_pq/dimpq64-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq32-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 32,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-inner/raft_ivf_pq/dimpq32-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/glove-100-inner/raft_ivf_pq/dimpq32-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq16-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 16,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-inner/raft_ivf_pq/dimpq16-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/glove-100-inner/raft_ivf_pq/dimpq16-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-half-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-inner/raft_ivf_pq/dimpq128-cluster1024-half-float",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
}
],
"search_result_file": "result/glove-100-inner/raft_ivf_pq/dimpq128-cluster1024-half-float"
},
{
"name": "raft_ivf_pq.dimpq512-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 512,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-inner/raft_ivf_pq/dimpq512-cluster1024-float-float",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/glove-100-inner/raft_ivf_pq/dimpq512-cluster1024-float-float"
},
{
"name": "raft_ivf_flat.nlist1024",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 1024,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-inner/raft_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-inner/raft_ivf_flat/nlist1024"
},
{
"name": "raft_ivf_flat.nlist16384",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 16384,
"ratio": 2,
"niter": 20
},
"file": "index/glove-100-inner/raft_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/glove-100-inner/raft_ivf_flat/nlist16384"
},
{
"name" : "raft_cagra.dim32",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 32
},
"file" : "index/glove-100-inner/raft_cagra/dim32",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/glove-100-inner/raft_cagra/dim32"
},
{
"name" : "raft_cagra.dim64",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 64
},
"file" : "index/glove-100-inner/raft_cagra/dim64",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/glove-100-inner/raft_cagra/dim64"
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/bigann-100M.json | {
"dataset": {
"name": "bigann-100M",
"base_file": "bigann-1B/base.1B.u8bin",
"subset_size": 100000000,
"query_file": "bigann-1B/query.public.10K.u8bin",
"groundtruth_neighbors_file": "bigann-100M/groundtruth.neighbors.ibin",
"distance": "euclidean"
},
"search_basic_param": {
"batch_size": 10000,
"k": 10
},
"index": [
{
"name": "raft_ivf_pq.dimpq64-cluster5K",
"algo": "raft_ivf_pq",
"build_param": {"niter": 25, "nlist": 5000, "pq_dim": 64, "ratio": 10},
"file": "bigann-100M/raft_ivf_pq/dimpq64-cluster5K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 30, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 40, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 500, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 1000, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 20, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 30, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 40, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 500, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 1000, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 1000, "internalDistanceDtype": "half", "smemLutDtype": "half" }
]
},
{
"name": "raft_ivf_pq.dimpq64-cluster10K",
"algo": "raft_ivf_pq",
"build_param": {"niter": 25, "nlist": 10000, "pq_dim": 64, "ratio": 10},
"file": "bigann-100M/raft_ivf_pq/dimpq64-cluster5K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 30, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 40, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 500, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 1000, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 20, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 30, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 40, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 500, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 1000, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 1000, "internalDistanceDtype": "half", "smemLutDtype": "half" }
]
},
{
"name": "hnswlib.M12",
"algo": "hnswlib",
"build_param": {"M":12, "efConstruction":500, "numThreads":32},
"file": "bigann-100M/hnswlib/M12",
"search_params": [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
]
},
{
"name": "hnswlib.M16",
"algo": "hnswlib",
"build_param": {"M":16, "efConstruction":500, "numThreads":32},
"file": "bigann-100M/hnswlib/M16",
"search_params": [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
]
},
{
"name": "hnswlib.M24",
"algo": "hnswlib",
"build_param": {"M":24, "efConstruction":500, "numThreads":32},
"file": "bigann-100M/hnswlib/M24",
"search_params": [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
]
},
{
"name": "hnswlib.M36",
"algo": "hnswlib",
"build_param": {"M":36, "efConstruction":500, "numThreads":32},
"file": "bigann-100M/hnswlib/M36",
"search_params": [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
]
},
{
"name": "raft_ivf_flat.nlist100K",
"algo": "raft_ivf_flat",
"build_param": {"nlist": 100000, "niter": 25, "ratio": 5},
"file": "bigann-100M/raft_ivf_flat/nlist100K",
"search_params": [
{"max_batch":10000, "max_k":10, "nprobe":20},
{"max_batch":10000, "max_k":10, "nprobe":30},
{"max_batch":10000, "max_k":10, "nprobe":40},
{"max_batch":10000, "max_k":10, "nprobe":50},
{"max_batch":10000, "max_k":10, "nprobe":100},
{"max_batch":10000, "max_k":10, "nprobe":200},
{"max_batch":10000, "max_k":10, "nprobe":500},
{"max_batch":10000, "max_k":10, "nprobe":1000}
]
},
{
"name": "raft_cagra.dim32",
"algo": "raft_cagra",
"build_param": {"graph_degree": 32},
"file": "bigann-100M/raft_cagra/dim32",
"search_params": [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
]
},
{
"name": "raft_cagra.dim64",
"algo": "raft_cagra",
"build_param": {"graph_degree": 64},
"file": "bigann-100M/raft_cagra/dim64",
"search_params": [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
]
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/deep-image-96-inner.json | {
"dataset": {
"name": "deep-image-96-inner",
"base_file": "deep-image-96-inner/base.fbin",
"query_file": "deep-image-96-inner/query.fbin",
"groundtruth_neighbors_file": "deep-image-96-inner/groundtruth.neighbors.ibin",
"distance": "euclidean"
},
"search_basic_param": {
"batch_size": 5000,
"k": 10,
"run_count": 3
},
"index": [
{
"name" : "hnswlib.M12",
"algo" : "hnswlib",
"build_param": {"M":12, "efConstruction":500, "numThreads":32},
"file" : "index/deep-image-96-inner/hnswlib/M12",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/deep-image-96-inner/hnswlib/M12"
},
{
"name" : "hnswlib.M16",
"algo" : "hnswlib",
"build_param": {"M":16, "efConstruction":500, "numThreads":32},
"file" : "index/deep-image-96-inner/hnswlib/M16",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/deep-image-96-inner/hnswlib/M16"
},
{
"name" : "hnswlib.M24",
"algo" : "hnswlib",
"build_param": {"M":24, "efConstruction":500, "numThreads":32},
"file" : "index/deep-image-96-inner/hnswlib/M24",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/deep-image-96-inner/hnswlib/M24"
},
{
"name" : "hnswlib.M36",
"algo" : "hnswlib",
"build_param": {"M":36, "efConstruction":500, "numThreads":32},
"file" : "index/deep-image-96-inner/hnswlib/M36",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/deep-image-96-inner/hnswlib/M36"
},
{
"name": "raft_bfknn",
"algo": "raft_bfknn",
"build_param": {},
"file": "index/deep-image-96-inner/raft_bfknn/bfknn",
"search_params": [
{
"probe": 1
}
],
"search_result_file": "result/deep-image-96-inner/raft_bfknn/bfknn"
},
{
"name": "faiss_gpu_ivf_flat.nlist1024",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 1024
},
"file": "index/deep-image-96-inner/faiss_gpu_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_ivf_flat/nlist1024"
},
{
"name": "faiss_gpu_ivf_flat.nlist2048",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 2048
},
"file": "index/deep-image-96-inner/faiss_gpu_ivf_flat/nlist2048",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_ivf_flat/nlist2048"
},
{
"name": "faiss_gpu_ivf_flat.nlist4096",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 4096
},
"file": "index/deep-image-96-inner/faiss_gpu_ivf_flat/nlist4096",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_ivf_flat/nlist4096"
},
{
"name": "faiss_gpu_ivf_flat.nlist8192",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 8192
},
"file": "index/deep-image-96-inner/faiss_gpu_ivf_flat/nlist8192",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_ivf_flat/nlist8192"
},
{
"name": "faiss_gpu_ivf_flat.nlist16384",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 16384
},
"file": "index/deep-image-96-inner/faiss_gpu_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_ivf_flat/nlist16384"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": true
},
"file": "index/deep-image-96-inner/faiss_gpu_ivf_pq/M64-nlist1024",
"search_params": [
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024.noprecomp",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": false
},
"file": "index/deep-image-96-inner/faiss_gpu_ivf_pq/M64-nlist1024.noprecomp",
"search_params": [
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "fp16"
},
"file": "index/deep-image-96-inner/faiss_gpu_ivf_sq/nlist1024-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_ivf_sq/nlist1024-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "fp16"
},
"file": "index/deep-image-96-inner/faiss_gpu_ivf_sq/nlist2048-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_ivf_sq/nlist2048-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "fp16"
},
"file": "index/deep-image-96-inner/faiss_gpu_ivf_sq/nlist4096-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_ivf_sq/nlist4096-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "fp16"
},
"file": "index/deep-image-96-inner/faiss_gpu_ivf_sq/nlist8192-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_ivf_sq/nlist8192-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "fp16"
},
"file": "index/deep-image-96-inner/faiss_gpu_ivf_sq/nlist16384-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_ivf_sq/nlist16384-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "int8"
},
"file": "index/deep-image-96-inner/faiss_gpu_ivf_sq/nlist1024-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_ivf_sq/nlist1024-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "int8"
},
"file": "index/deep-image-96-inner/faiss_gpu_ivf_sq/nlist2048-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_ivf_sq/nlist2048-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "int8"
},
"file": "index/deep-image-96-inner/faiss_gpu_ivf_sq/nlist4096-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_ivf_sq/nlist4096-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "int8"
},
"file": "index/deep-image-96-inner/faiss_gpu_ivf_sq/nlist8192-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_ivf_sq/nlist8192-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "int8"
},
"file": "index/deep-image-96-inner/faiss_gpu_ivf_sq/nlist16384-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_ivf_sq/nlist16384-int8"
},
{
"name": "faiss_gpu_flat",
"algo": "faiss_gpu_flat",
"build_param": {},
"file": "index/deep-image-96-inner/faiss_gpu_flat/flat",
"search_params": [
{}
],
"search_result_file": "result/deep-image-96-inner/faiss_gpu_flat/flat"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024",
"algo": "raft_ivf_pq",
"build_param": {"nlist": 1024, "pq_dim": 128, "ratio": 1, "niter": 25
},
"file": "index/deep-image-96-inner/raft_ivf_pq/dimpq128-cluster1024",
"search_params": [
{"nprobe": 10, "internalDistanceDtype": "half", "smemLutDtype": "half"},
{"nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half"},
{"nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half"},
{"nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half"},
{"nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half"},
{"nprobe": 1024, "internalDistanceDtype": "half", "smemLutDtype": "half"}
],
"search_result_file": "result/deep-image-96-inner/raft_ivf_pq/dimpq128-cluster1024"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/deep-image-96-inner/raft_ivf_pq/dimpq128-cluster1024-float-float",
"search_params": [
{"nprobe": 1, "internalDistanceDtype": "float", "smemLutDtype": "float"},
{"nprobe": 5, "internalDistanceDtype": "float", "smemLutDtype": "float"},
{"nprobe": 10, "internalDistanceDtype": "float", "smemLutDtype": "float"},
{"nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "float"},
{"nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "float"},
{"nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "float"},
{"nprobe": 500, "internalDistanceDtype": "float", "smemLutDtype": "float"},
{"nprobe": 1024, "internalDistanceDtype": "float", "smemLutDtype": "float"}
],
"search_result_file": "result/deep-image-96-inner/raft_ivf_pq/dimpq128-cluster1024-float-float"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/deep-image-96-inner/raft_ivf_pq/dimpq128-cluster1024-float-half",
"search_params": [
{"nprobe": 10, "internalDistanceDtype": "float", "smemLutDtype": "half"},
{"nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "half"},
{"nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "half"},
{"nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "half"},
{"nprobe": 500, "internalDistanceDtype": "float", "smemLutDtype": "half"},
{"nprobe": 1024, "internalDistanceDtype": "float", "smemLutDtype": "half"}
],
"search_result_file": "result/deep-image-96-inner/raft_ivf_pq/dimpq128-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/deep-image-96-inner/raft_ivf_pq/dimpq128-cluster1024-float-fp8",
"search_params": [
{"nprobe": 10, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 500, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 1024, "internalDistanceDtype": "float", "smemLutDtype": "fp8"}
],
"search_result_file": "result/deep-image-96-inner/raft_ivf_pq/dimpq128-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/deep-image-96-inner/raft_ivf_pq/dimpq64-cluster1024-float-fp8",
"search_params": [
{"nprobe": 10, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 500, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 1024, "internalDistanceDtype": "float", "smemLutDtype": "fp8"}
],
"search_result_file": "result/deep-image-96-inner/raft_ivf_pq/dimpq64-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/deep-image-96-inner/raft_ivf_pq/dimpq64-cluster1024-float-half",
"search_params": [
{"nprobe": 10, "internalDistanceDtype": "float", "smemLutDtype": "half"},
{"nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "half"},
{"nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "half"},
{"nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "half"},
{"nprobe": 500, "internalDistanceDtype": "float", "smemLutDtype": "half"},
{"nprobe": 1024, "internalDistanceDtype": "float", "smemLutDtype": "half"}
],
"search_result_file": "result/deep-image-96-inner/raft_ivf_pq/dimpq64-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq32-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 32,
"ratio": 1,
"niter": 25
},
"file": "index/deep-image-96-inner/raft_ivf_pq/dimpq32-cluster1024-float-fp8",
"search_params": [
{"nprobe": 10, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 500, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 1024, "internalDistanceDtype": "float", "smemLutDtype": "fp8"}
],
"search_result_file": "result/deep-image-96-inner/raft_ivf_pq/dimpq32-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq16-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 16,
"ratio": 1,
"niter": 25
},
"file": "index/deep-image-96-inner/raft_ivf_pq/dimpq16-cluster1024-float-fp8",
"search_params": [
{"nprobe": 10, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 500, "internalDistanceDtype": "float", "smemLutDtype": "fp8"},
{"nprobe": 1024, "internalDistanceDtype": "float", "smemLutDtype": "fp8"}
],
"search_result_file": "result/deep-image-96-inner/raft_ivf_pq/dimpq16-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-half-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/deep-image-96-inner/raft_ivf_pq/dimpq128-cluster1024-half-float",
"search_params": [
{"nprobe": 10, "internalDistanceDtype": "half", "smemLutDtype": "float"},
{"nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "float"},
{"nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "float"},
{"nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "float"},
{"nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "float"},
{"nprobe": 1024, "internalDistanceDtype": "half", "smemLutDtype": "float"}
],
"search_result_file": "result/deep-image-96-inner/raft_ivf_pq/dimpq128-cluster1024-half-float"
},
{
"name": "raft_ivf_pq.dimpq512-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 512,
"ratio": 1,
"niter": 25
},
"file": "index/deep-image-96-inner/raft_ivf_pq/dimpq512-cluster1024-float-float",
"search_params": [
{"nprobe": 10, "internalDistanceDtype": "float", "smemLutDtype": "float"},
{"nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "float"},
{"nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "float"},
{"nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "float"},
{"nprobe": 500, "internalDistanceDtype": "float", "smemLutDtype": "float"},
{"nprobe": 1024, "internalDistanceDtype": "float", "smemLutDtype": "float"}
],
"search_result_file": "result/deep-image-96-inner/raft_ivf_pq/dimpq512-cluster1024-float-float"
},
{
"name": "raft_ivf_flat.nlist1024",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 1024,
"ratio": 1,
"niter": 25
},
"file": "index/deep-image-96-inner/raft_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/deep-image-96-inner/raft_ivf_flat/nlist1024"
},
{
"name": "raft_ivf_flat.nlist16384",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 16384,
"ratio": 2,
"niter": 20
},
"file": "index/deep-image-96-inner/raft_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/deep-image-96-inner/raft_ivf_flat/nlist16384"
},
{
"name" : "raft_cagra.dim32",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 32
},
"file" : "index/deep-image-96-inner/raft_cagra/dim32",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/deep-image-96-inner/raft_cagra/dim32"
},
{
"name" : "raft_cagra.dim64",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 64
},
"file" : "index/deep-image-96-inner/raft_cagra/dim64",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/deep-image-96-inner/raft_cagra/dim64"
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/gist-960-euclidean.json | {
"dataset": {
"name": "gist-960-euclidean",
"base_file": "gist-960-euclidean/base.fbin",
"query_file": "gist-960-euclidean/query.fbin",
"distance": "euclidean"
},
"search_basic_param": {
"batch_size": 5000,
"k": 10,
"run_count": 3
},
"index": [
{
"name" : "hnswlib.M12",
"algo" : "hnswlib",
"build_param": {"M":12, "efConstruction":500, "numThreads":32},
"file" : "index/gist-960-euclidean/hnswlib/M12",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/gist-960-euclidean/hnswlib/M12"
},
{
"name" : "hnswlib.M16",
"algo" : "hnswlib",
"build_param": {"M":16, "efConstruction":500, "numThreads":32},
"file" : "index/gist-960-euclidean/hnswlib/M16",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/gist-960-euclidean/hnswlib/M16"
},
{
"name" : "hnswlib.M24",
"algo" : "hnswlib",
"build_param": {"M":24, "efConstruction":500, "numThreads":32},
"file" : "index/gist-960-euclidean/hnswlib/M24",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/gist-960-euclidean/hnswlib/M24"
},
{
"name" : "hnswlib.M36",
"algo" : "hnswlib",
"build_param": {"M":36, "efConstruction":500, "numThreads":32},
"file" : "index/gist-960-euclidean/hnswlib/M36",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/gist-960-euclidean/hnswlib/M36"
},
{
"name": "raft_bfknn",
"algo": "raft_bfknn",
"build_param": {},
"file": "index/gist-960-euclidean/raft_bfknn/bfknn",
"search_params": [
{
"probe": 1
}
],
"search_result_file": "result/gist-960-euclidean/raft_bfknn/bfknn"
},
{
"name": "faiss_gpu_ivf_flat.nlist1024",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 1024
},
"file": "index/gist-960-euclidean/faiss_gpu_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_ivf_flat/nlist1024"
},
{
"name": "faiss_gpu_ivf_flat.nlist2048",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 2048
},
"file": "index/gist-960-euclidean/faiss_gpu_ivf_flat/nlist2048",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_ivf_flat/nlist2048"
},
{
"name": "faiss_gpu_ivf_flat.nlist4096",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 4096
},
"file": "index/gist-960-euclidean/faiss_gpu_ivf_flat/nlist4096",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_ivf_flat/nlist4096"
},
{
"name": "faiss_gpu_ivf_flat.nlist8192",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 8192
},
"file": "index/gist-960-euclidean/faiss_gpu_ivf_flat/nlist8192",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_ivf_flat/nlist8192"
},
{
"name": "faiss_gpu_ivf_flat.nlist16384",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 16384
},
"file": "index/gist-960-euclidean/faiss_gpu_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_ivf_flat/nlist16384"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": true
},
"file": "index/gist-960-euclidean/faiss_gpu_ivf_pq/M64-nlist1024",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024.noprecomp",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": false
},
"file": "index/gist-960-euclidean/faiss_gpu_ivf_pq/M64-nlist1024.noprecomp",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "fp16"
},
"file": "index/gist-960-euclidean/faiss_gpu_ivf_sq/nlist1024-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_ivf_sq/nlist1024-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "fp16"
},
"file": "index/gist-960-euclidean/faiss_gpu_ivf_sq/nlist2048-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_ivf_sq/nlist2048-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "fp16"
},
"file": "index/gist-960-euclidean/faiss_gpu_ivf_sq/nlist4096-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_ivf_sq/nlist4096-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "fp16"
},
"file": "index/gist-960-euclidean/faiss_gpu_ivf_sq/nlist8192-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_ivf_sq/nlist8192-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "fp16"
},
"file": "index/gist-960-euclidean/faiss_gpu_ivf_sq/nlist16384-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_ivf_sq/nlist16384-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "int8"
},
"file": "index/gist-960-euclidean/faiss_gpu_ivf_sq/nlist1024-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_ivf_sq/nlist1024-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "int8"
},
"file": "index/gist-960-euclidean/faiss_gpu_ivf_sq/nlist2048-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_ivf_sq/nlist2048-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "int8"
},
"file": "index/gist-960-euclidean/faiss_gpu_ivf_sq/nlist4096-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_ivf_sq/nlist4096-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "int8"
},
"file": "index/gist-960-euclidean/faiss_gpu_ivf_sq/nlist8192-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_ivf_sq/nlist8192-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "int8"
},
"file": "index/gist-960-euclidean/faiss_gpu_ivf_sq/nlist16384-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_ivf_sq/nlist16384-int8"
},
{
"name": "faiss_gpu_flat",
"algo": "faiss_gpu_flat",
"build_param": {},
"file": "index/gist-960-euclidean/faiss_gpu_flat/flat",
"search_params": [
{}
],
"search_result_file": "result/gist-960-euclidean/faiss_gpu_flat/flat"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/gist-960-euclidean/raft_ivf_pq/dimpq128-cluster1024",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
}
],
"search_result_file": "result/gist-960-euclidean/raft_ivf_pq/dimpq128-cluster1024"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/gist-960-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-float",
"search_params": [
{
"k": 10,
"numProbes": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 5,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/gist-960-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-float"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/gist-960-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-half",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/gist-960-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/gist-960-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/gist-960-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/gist-960-euclidean/raft_ivf_pq/dimpq64-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/gist-960-euclidean/raft_ivf_pq/dimpq64-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/gist-960-euclidean/raft_ivf_pq/dimpq64-cluster1024-float-half",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/gist-960-euclidean/raft_ivf_pq/dimpq64-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq32-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 32,
"ratio": 1,
"niter": 25
},
"file": "index/gist-960-euclidean/raft_ivf_pq/dimpq32-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/gist-960-euclidean/raft_ivf_pq/dimpq32-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq16-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 16,
"ratio": 1,
"niter": 25
},
"file": "index/gist-960-euclidean/raft_ivf_pq/dimpq16-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/gist-960-euclidean/raft_ivf_pq/dimpq16-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-half-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/gist-960-euclidean/raft_ivf_pq/dimpq128-cluster1024-half-float",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
}
],
"search_result_file": "result/gist-960-euclidean/raft_ivf_pq/dimpq128-cluster1024-half-float"
},
{
"name": "raft_ivf_pq.dimpq512-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 512,
"ratio": 1,
"niter": 25
},
"file": "index/gist-960-euclidean/raft_ivf_pq/dimpq512-cluster1024-float-float",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/gist-960-euclidean/raft_ivf_pq/dimpq512-cluster1024-float-float"
},
{
"name": "raft_ivf_flat.nlist1024",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 1024,
"ratio": 1,
"niter": 25
},
"file": "index/gist-960-euclidean/raft_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/gist-960-euclidean/raft_ivf_flat/nlist1024"
},
{
"name": "raft_ivf_flat.nlist16384",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 16384,
"ratio": 2,
"niter": 20
},
"file": "index/gist-960-euclidean/raft_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/gist-960-euclidean/raft_ivf_flat/nlist16384"
},
{
"name" : "raft_cagra.dim32",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 32
},
"file" : "index/gist-960-euclidean/raft_cagra/dim32",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/gist-960-euclidean/raft_cagra/dim32"
},
{
"name" : "raft_cagra.dim64",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 64
},
"file" : "index/gist-960-euclidean/raft_cagra/dim64",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/gist-960-euclidean/raft_cagra/dim64"
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/fashion-mnist-784-euclidean.json | {
"dataset": {
"name": "fashion-mnist-784-euclidean",
"base_file": "fashion-mnist-784-euclidean/base.fbin",
"query_file": "fashion-mnist-784-euclidean/query.fbin",
"groundtruth_neighbors_file": "fashion-mnist-784-euclidean/groundtruth.neighbors.ibin",
"distance": "euclidean"
},
"search_basic_param": {
"batch_size": 5000,
"k": 10,
"run_count": 3
},
"index": [
{
"name" : "hnswlib.M12",
"algo" : "hnswlib",
"build_param": {"M":12, "efConstruction":500, "numThreads":32},
"file" : "index/fashion-mnist-784-euclidean/hnswlib/M12",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/fashion-mnist-784-euclidean/hnswlib/M12"
},
{
"name" : "hnswlib.M16",
"algo" : "hnswlib",
"build_param": {"M":16, "efConstruction":500, "numThreads":32},
"file" : "index/fashion-mnist-784-euclidean/hnswlib/M16",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/fashion-mnist-784-euclidean/hnswlib/M16"
},
{
"name" : "hnswlib.M24",
"algo" : "hnswlib",
"build_param": {"M":24, "efConstruction":500, "numThreads":32},
"file" : "index/fashion-mnist-784-euclidean/hnswlib/M24",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/fashion-mnist-784-euclidean/hnswlib/M24"
},
{
"name" : "hnswlib.M36",
"algo" : "hnswlib",
"build_param": {"M":36, "efConstruction":500, "numThreads":32},
"file" : "index/fashion-mnist-784-euclidean/hnswlib/M36",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/fashion-mnist-784-euclidean/hnswlib/M36"
},
{
"name": "raft_bfknn",
"algo": "raft_bfknn",
"build_param": {},
"file": "index/fashion-mnist-784-euclidean/raft_bfknn/bfknn",
"search_params": [
{
"probe": 1
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/raft_bfknn/bfknn"
},
{
"name": "faiss_gpu_ivf_flat.nlist1024",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 1024
},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_ivf_flat/nlist1024"
},
{
"name": "faiss_gpu_ivf_flat.nlist2048",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 2048
},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_ivf_flat/nlist2048",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_ivf_flat/nlist2048"
},
{
"name": "faiss_gpu_ivf_flat.nlist4096",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 4096
},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_ivf_flat/nlist4096",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_ivf_flat/nlist4096"
},
{
"name": "faiss_gpu_ivf_flat.nlist8192",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 8192
},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_ivf_flat/nlist8192",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_ivf_flat/nlist8192"
},
{
"name": "faiss_gpu_ivf_flat.nlist16384",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 16384
},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_ivf_flat/nlist16384"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": true
},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_ivf_pq/M64-nlist1024",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024.noprecomp",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": false
},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_ivf_pq/M64-nlist1024.noprecomp",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "fp16"
},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist1024-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist1024-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "fp16"
},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist2048-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist2048-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "fp16"
},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist4096-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist4096-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "fp16"
},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist8192-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist8192-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "fp16"
},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist16384-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist16384-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "int8"
},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist1024-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist1024-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "int8"
},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist2048-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist2048-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "int8"
},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist4096-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist4096-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "int8"
},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist8192-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist8192-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "int8"
},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist16384-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_ivf_sq/nlist16384-int8"
},
{
"name": "faiss_gpu_flat",
"algo": "faiss_gpu_flat",
"build_param": {},
"file": "index/fashion-mnist-784-euclidean/faiss_gpu_flat/flat",
"search_params": [
{}
],
"search_result_file": "result/fashion-mnist-784-euclidean/faiss_gpu_flat/flat"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-float",
"search_params": [
{
"k": 10,
"numProbes": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 5,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-float"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-half",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq64-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq64-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq64-cluster1024-float-half",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq64-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq32-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 32,
"ratio": 1,
"niter": 25
},
"file": "index/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq32-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq32-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq16-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 16,
"ratio": 1,
"niter": 25
},
"file": "index/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq16-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq16-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-half-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024-half-float",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq128-cluster1024-half-float"
},
{
"name": "raft_ivf_pq.dimpq512-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 512,
"ratio": 1,
"niter": 25
},
"file": "index/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq512-cluster1024-float-float",
"search_params": [
{
"k": 10,
"numProbes": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"numProbes": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/raft_ivf_pq/dimpq512-cluster1024-float-float"
},
{
"name": "raft_ivf_flat.nlist1024",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 1024,
"ratio": 1,
"niter": 25
},
"file": "index/fashion-mnist-784-euclidean/raft_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/raft_ivf_flat/nlist1024"
},
{
"name": "raft_ivf_flat.nlist16384",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 16384,
"ratio": 2,
"niter": 20
},
"file": "index/fashion-mnist-784-euclidean/raft_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/fashion-mnist-784-euclidean/raft_ivf_flat/nlist16384"
},
{
"name" : "raft_cagra.dim32",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 32
},
"file" : "index/fashion-mnist-784-euclidean/raft_cagra/dim32",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/fashion-mnist-784-euclidean/raft_cagra/dim32"
},
{
"name" : "raft_cagra.dim64",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 64
},
"file" : "index/fashion-mnist-784-euclidean/raft_cagra/dim64",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/fashion-mnist-784-euclidean/raft_cagra/dim64"
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/sift-128-euclidean.json | {
"dataset": {
"name": "sift-128-euclidean",
"base_file": "sift-128-euclidean/base.fbin",
"query_file": "sift-128-euclidean/query.fbin",
"groundtruth_neighbors_file": "sift-128-euclidean/groundtruth.neighbors.ibin",
"distance": "euclidean"
},
"search_basic_param": {
"batch_size": 5000,
"k": 10
},
"index": [
{
"name": "hnswlib.M12",
"algo": "hnswlib",
"build_param": {"M":12, "efConstruction":500, "numThreads":32},
"file": "sift-128-euclidean/hnswlib/M12",
"search_params": [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
]
},
{
"name": "hnswlib.M16",
"algo": "hnswlib",
"build_param": {"M":16, "efConstruction":500, "numThreads":32},
"file": "sift-128-euclidean/hnswlib/M16",
"search_params": [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
]
},
{
"name": "hnswlib.M24",
"algo": "hnswlib",
"build_param": {"M":24, "efConstruction":500, "numThreads":32},
"file": "sift-128-euclidean/hnswlib/M24",
"search_params": [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
]
},
{
"name": "hnswlib.M36",
"algo": "hnswlib",
"build_param": {"M":36, "efConstruction":500, "numThreads":32},
"file": "sift-128-euclidean/hnswlib/M36",
"search_params": [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
]
},
{
"name": "raft_bfknn",
"algo": "raft_bfknn",
"build_param": {},
"file": "sift-128-euclidean/raft_bfknn/bfknn",
"search_params": [{"probe": 1}]
},
{
"name": "faiss_gpu_ivf_flat.nlist1024",
"algo": "faiss_gpu_ivf_flat",
"build_param": {"nlist": 1024},
"file": "sift-128-euclidean/faiss_gpu_ivf_flat/nlist1024",
"search_params": [
{"nprobe": 1},
{"nprobe": 5},
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000}
]
},
{
"name": "faiss_gpu_ivf_flat.nlist2048",
"algo": "faiss_gpu_ivf_flat",
"build_param": {"nlist": 2048},
"file": "sift-128-euclidean/faiss_gpu_ivf_flat/nlist2048",
"search_params": [
{"nprobe": 1},
{"nprobe": 5},
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000}
]
},
{
"name": "faiss_gpu_ivf_flat.nlist4096",
"algo": "faiss_gpu_ivf_flat",
"build_param": {"nlist": 4096},
"file": "sift-128-euclidean/faiss_gpu_ivf_flat/nlist4096",
"search_params": [
{"nprobe": 1},
{"nprobe": 5},
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000}
]
},
{
"name": "faiss_gpu_ivf_flat.nlist8192",
"algo": "faiss_gpu_ivf_flat",
"build_param": {"nlist": 8192},
"file": "sift-128-euclidean/faiss_gpu_ivf_flat/nlist8192",
"search_params": [
{"nprobe": 1},
{"nprobe": 5},
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000}
]
},
{
"name": "faiss_gpu_ivf_flat.nlist16384",
"algo": "faiss_gpu_ivf_flat",
"build_param": {"nlist": 16384},
"file": "sift-128-euclidean/faiss_gpu_ivf_flat/nlist16384",
"search_params": [
{"nprobe": 1},
{"nprobe": 5},
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000},
{"nprobe": 2000}
]
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024",
"algo": "faiss_gpu_ivf_pq",
"build_param": {"nlist": 1024, "M": 64, "useFloat16": true, "usePrecomputed": true},
"file": "sift-128-euclidean/faiss_gpu_ivf_pq/M64-nlist1024",
"search_params": [
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000}
]
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024.noprecomp",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": false
},
"file": "sift-128-euclidean/faiss_gpu_ivf_pq/M64-nlist1024.noprecomp",
"search_params": [
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000}
]
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist": 1024, "quantizer_type": "fp16"},
"file": "sift-128-euclidean/faiss_gpu_ivf_sq/nlist1024-fp16",
"search_params": [
{"nprobe": 1},
{"nprobe": 5},
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000}
]
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist": 2048, "quantizer_type": "fp16"},
"file": "sift-128-euclidean/faiss_gpu_ivf_sq/nlist2048-fp16",
"search_params": [
{"nprobe": 1},
{"nprobe": 5},
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000}
]
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist": 4096, "quantizer_type": "fp16"},
"file": "sift-128-euclidean/faiss_gpu_ivf_sq/nlist4096-fp16",
"search_params": [
{"nprobe": 1},
{"nprobe": 5},
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000}
]
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist": 8192, "quantizer_type": "fp16"},
"file": "sift-128-euclidean/faiss_gpu_ivf_sq/nlist8192-fp16",
"search_params": [
{"nprobe": 1},
{"nprobe": 5},
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000}
]
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist": 16384, "quantizer_type": "fp16"},
"file": "sift-128-euclidean/faiss_gpu_ivf_sq/nlist16384-fp16",
"search_params": [
{"nprobe": 1},
{"nprobe": 5},
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000},
{"nprobe": 2000}
]
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist": 1024, "quantizer_type": "int8"},
"file": "sift-128-euclidean/faiss_gpu_ivf_sq/nlist1024-int8",
"search_params": [
{"nprobe": 1},
{"nprobe": 5},
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000}
]
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist": 2048,"quantizer_type": "int8"},
"file": "sift-128-euclidean/faiss_gpu_ivf_sq/nlist2048-int8",
"search_params": [
{"nprobe": 1},
{"nprobe": 5},
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000}
]
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist": 4096, "quantizer_type": "int8"},
"file": "sift-128-euclidean/faiss_gpu_ivf_sq/nlist4096-int8",
"search_params": [
{"nprobe": 1},
{"nprobe": 5},
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000}
]
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist": 8192, "quantizer_type": "int8"},
"file": "sift-128-euclidean/faiss_gpu_ivf_sq/nlist8192-int8",
"search_params": [
{"nprobe": 1},
{"nprobe": 5},
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000}
]
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {"nlist": 16384, "quantizer_type": "int8"},
"file": "sift-128-euclidean/faiss_gpu_ivf_sq/nlist16384-int8",
"search_params": [
{"nprobe": 1},
{"nprobe": 5},
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000},
{"nprobe": 2000}
]
},
{
"name": "faiss_gpu_flat",
"algo": "faiss_gpu_flat",
"build_param": {},
"file": "sift-128-euclidean/faiss_gpu_flat/flat",
"search_params": [{}]
},
{
"name": "raft_ivf_pq.dimpq64-bitpq8-cluster1K",
"algo": "raft_ivf_pq",
"build_param": {"niter": 25, "nlist": 1000, "pq_dim": 64, "pq_bits": 8, "ratio": 1},
"file": "sift-128-euclidean/raft_ivf_pq/dimpq64-bitpq8-cluster1K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 30, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 40, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 500, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 1000, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 20, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 30, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 40, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 500, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 1000, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 1000, "internalDistanceDtype": "half", "smemLutDtype": "half" }
]
},
{
"name": "raft_ivf_pq.dimpq128-bitpq6-cluster1K",
"algo": "raft_ivf_pq",
"build_param": {"niter": 25, "nlist": 1000, "pq_dim": 128, "pq_bits": 6, "ratio": 1},
"file": "sift-128-euclidean/raft_ivf_pq/dimpq128-bitpq6-cluster1K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 30, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 40, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 500, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 1000, "internalDistanceDtype": "float", "smemLutDtype": "float" },
{ "nprobe": 20, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 30, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 40, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 500, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 1000, "internalDistanceDtype": "float", "smemLutDtype": "fp8" },
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half" },
{ "nprobe": 1000, "internalDistanceDtype": "half", "smemLutDtype": "half" }
]
},
{
"name": "raft_ivf_flat.nlist1024",
"algo": "raft_ivf_flat",
"build_param": {"nlist": 1024, "ratio": 1, "niter": 25},
"file": "sift-128-euclidean/raft_ivf_flat/nlist1024",
"search_params": [
{"nprobe": 1},
{"nprobe": 5},
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000}
]
},
{
"name": "raft_ivf_flat.nlist16384",
"algo": "raft_ivf_flat",
"build_param": {"nlist": 16384, "ratio": 2, "niter": 20},
"file": "sift-128-euclidean/raft_ivf_flat/nlist16384",
"search_params": [
{"nprobe": 1},
{"nprobe": 5},
{"nprobe": 10},
{"nprobe": 50},
{"nprobe": 100},
{"nprobe": 200},
{"nprobe": 500},
{"nprobe": 1000},
{"nprobe": 2000}
]
},
{
"name": "raft_cagra.dim32",
"algo": "raft_cagra",
"build_param": {"graph_degree": 32},
"file": "sift-128-euclidean/raft_cagra/dim32",
"search_params": [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
]
},
{
"name": "raft_cagra.dim64",
"algo": "raft_cagra",
"build_param": {"graph_degree": 64},
"file": "sift-128-euclidean/raft_cagra/dim64",
"search_params": [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
]
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/deep-100M.json | {
"dataset": {
"name": "deep-100M",
"base_file": "deep-100M/base.1B.fbin",
"subset_size": 100000000,
"query_file": "deep-100M/query.public.10K.fbin",
"groundtruth_neighbors_file": "deep-100M/groundtruth.neighbors.ibin",
"distance": "euclidean"
},
"search_basic_param": {
"batch_size": 10000,
"k": 10
},
"index": [
{
"name": "hnswlib.M12",
"algo": "hnswlib",
"build_param": {"M":12, "efConstruction":500, "numThreads":32},
"file": "deep-100M/hnswlib/M12",
"search_params": [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
]
},
{
"name": "hnswlib.M16",
"algo": "hnswlib",
"build_param": {"M":16, "efConstruction":500, "numThreads":32},
"file": "deep-100M/hnswlib/M16",
"search_params": [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
]
},
{
"name": "hnswlib.M24",
"algo": "hnswlib",
"build_param": {"M":24, "efConstruction":500, "numThreads":32},
"file": "deep-100M/hnswlib/M24",
"search_params": [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
]
},
{
"name": "hnswlib.M36",
"algo": "hnswlib",
"build_param": {"M":36, "efConstruction":500, "numThreads":32},
"file": "deep-100M/hnswlib/M36",
"search_params": [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
]
},
{
"name": "faiss_gpu_ivf_flat.nlist50K",
"algo": "faiss_gpu_ivf_flat",
"build_param": {"nlist":50000},
"file": "deep-100M/faiss_gpu_ivf_flat/nlist50K",
"search_params": [
{"nprobe":20},
{"nprobe":30},
{"nprobe":40},
{"nprobe":50},
{"nprobe":100},
{"nprobe":200},
{"nprobe":500},
{"nprobe":1000}
]
},
{
"name": "faiss_gpu_ivf_flat.nlist100K",
"algo": "faiss_gpu_ivf_flat",
"build_param": {"nlist":100000},
"file": "deep-100M/faiss_gpu_ivf_flat/nlist100K",
"search_params": [
{"nprobe":20},
{"nprobe":30},
{"nprobe":40},
{"nprobe":50},
{"nprobe":100},
{"nprobe":200},
{"nprobe":500},
{"nprobe":1000}
]
},
{
"name": "faiss_gpu_ivf_flat.nlist200K",
"algo": "faiss_gpu_ivf_flat",
"build_param": {"nlist":200000},
"file": "deep-100M/faiss_gpu_ivf_flat/nlist200K",
"search_params": [
{"nprobe":20},
{"nprobe":30},
{"nprobe":40},
{"nprobe":50},
{"nprobe":100},
{"nprobe":200},
{"nprobe":500},
{"nprobe":1000}
]
},
{
"name": "faiss_gpu_ivf_pq.M48-nlist16K",
"algo": "faiss_gpu_ivf_pq",
"build_param": {"nlist":16384, "M":48},
"file": "deep-100M/faiss_gpu_ivf_pq/M48-nlist16K",
"search_params": [
{"nprobe":10},
{"nprobe":20},
{"nprobe":30},
{"nprobe":40},
{"nprobe":50},
{"nprobe":100},
{"nprobe":200},
{"nprobe":500}
]
},
{
"name": "faiss_gpu_ivf_pq.M48-nlist50K",
"algo": "faiss_gpu_ivf_pq",
"build_param": {"nlist":50000, "M":48},
"file": "deep-100M/faiss_gpu_ivf_pq/M48-nlist50K",
"search_params": [
{"nprobe":20},
{"nprobe":30},
{"nprobe":40},
{"nprobe":50},
{"nprobe":100},
{"nprobe":200},
{"nprobe":500},
{"nprobe":1000}
]
},
{
"name": "faiss_gpu_ivf_pq.M48-nlist100K",
"algo": "faiss_gpu_ivf_pq",
"build_param": {"nlist":100000, "M":48},
"file": "deep-100M/faiss_gpu_ivf_pq/M48-nlist100K",
"search_params": [
{"nprobe":20},
{"nprobe":30},
{"nprobe":40},
{"nprobe":50},
{"nprobe":100},
{"nprobe":200},
{"nprobe":500},
{"nprobe":1000}
]
},
{
"name": "faiss_gpu_ivf_pq.M48-nlist200K",
"algo": "faiss_gpu_ivf_pq",
"build_param": {"nlist":200000, "M":48},
"file": "deep-100M/faiss_gpu_ivf_pq/M48-nlist200K",
"search_params": [
{"nprobe":20},
{"nprobe":30},
{"nprobe":40},
{"nprobe":50},
{"nprobe":100},
{"nprobe":200},
{"nprobe":500},
{"nprobe":1000}
]
},
{
"name": "raft_ivf_flat.nlist50K",
"algo": "raft_ivf_flat",
"build_param": {"nlist": 50000, "niter": 25, "ratio": 5},
"file": "deep-100M/raft_ivf_flat/nlist50K",
"search_params": [
{"max_batch":10000, "max_k":10, "nprobe":20},
{"max_batch":10000, "max_k":10, "nprobe":30},
{"max_batch":10000, "max_k":10, "nprobe":40},
{"max_batch":10000, "max_k":10, "nprobe":50},
{"max_batch":10000, "max_k":10, "nprobe":100},
{"max_batch":10000, "max_k":10, "nprobe":200},
{"max_batch":10000, "max_k":10, "nprobe":500},
{"max_batch":10000, "max_k":10, "nprobe":1000}
]
},
{
"name": "raft_ivf_flat.nlist100K",
"algo": "raft_ivf_flat",
"build_param": {"nlist": 100000, "niter": 25, "ratio": 5},
"file": "deep-100M/raft_ivf_flat/nlist100K",
"search_params": [
{"max_batch":10000, "max_k":10, "nprobe":20},
{"max_batch":10000, "max_k":10, "nprobe":30},
{"max_batch":10000, "max_k":10, "nprobe":40},
{"max_batch":10000, "max_k":10, "nprobe":50},
{"max_batch":10000, "max_k":10, "nprobe":100},
{"max_batch":10000, "max_k":10, "nprobe":200},
{"max_batch":10000, "max_k":10, "nprobe":500},
{"max_batch":10000, "max_k":10, "nprobe":1000}
]
},
{
"name": "raft_ivf_flat.nlist200K",
"algo": "raft_ivf_flat",
"build_param": {"nlist": 200000, "niter": 25, "ratio": 5},
"file": "deep-100M/raft_ivf_flat/nlist200K",
"search_params": [
{"max_batch":10000, "max_k":10, "nprobe":20},
{"max_batch":10000, "max_k":10, "nprobe":30},
{"max_batch":10000, "max_k":10, "nprobe":40},
{"max_batch":10000, "max_k":10, "nprobe":50},
{"max_batch":10000, "max_k":10, "nprobe":100},
{"max_batch":10000, "max_k":10, "nprobe":200},
{"max_batch":10000, "max_k":10, "nprobe":500},
{"max_batch":10000, "max_k":10, "nprobe":1000}
]
},
{
"name": "raft_ivf_pq.d96b5n50K",
"algo": "raft_ivf_pq",
"build_param": {"nlist": 50000, "pq_dim": 96, "pq_bits": 5, "ratio": 10, "niter": 25},
"file": "deep-100M/raft_ivf_pq/d96b5n50K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 2 },
{ "nprobe": 30, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 2 },
{ "nprobe": 40, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 2 },
{ "nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 2 },
{ "nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 2 },
{ "nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 2 },
{ "nprobe": 1000, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 2 },
{ "nprobe": 2000, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 2 },
{ "nprobe": 5000, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 2 },
{ "nprobe": 20, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 30, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 40, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 1000, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 2000, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 5000, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 20, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 2 },
{ "nprobe": 30, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 2 },
{ "nprobe": 40, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 2 },
{ "nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 2 },
{ "nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 2 },
{ "nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 2 },
{ "nprobe": 1000, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 2 },
{ "nprobe": 2000, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 2 },
{ "nprobe": 5000, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 2 },
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 1000, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 2000, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 5000, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 2 },
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 2 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 2 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 2 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 2 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 2 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 2 },
{ "nprobe": 1000, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 2 },
{ "nprobe": 2000, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 2 },
{ "nprobe": 5000, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 2 }
]
},
{
"name": "raft_ivf_pq.d64b5n50K",
"algo": "raft_ivf_pq",
"build_param": {"nlist": 50000, "pq_dim": 64, "pq_bits": 5, "ratio": 10, "niter": 25},
"file": "deep-100M/raft_ivf_pq/d64b5n50K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 4 },
{ "nprobe": 30, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 4 },
{ "nprobe": 40, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 4 },
{ "nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 4 },
{ "nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 4 },
{ "nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 4 },
{ "nprobe": 1000, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 4 },
{ "nprobe": 2000, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 4 },
{ "nprobe": 5000, "internalDistanceDtype": "float", "smemLutDtype": "float", "refine_ratio": 4 },
{ "nprobe": 20, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 30, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 40, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 1000, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 2000, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 5000, "internalDistanceDtype": "float", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 20, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 4 },
{ "nprobe": 30, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 4 },
{ "nprobe": 40, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 4 },
{ "nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 4 },
{ "nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 4 },
{ "nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 4 },
{ "nprobe": 1000, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 4 },
{ "nprobe": 2000, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 4 },
{ "nprobe": 5000, "internalDistanceDtype": "float", "smemLutDtype": "fp8", "refine_ratio": 4 },
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 1000, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 2000, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 5000, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 4 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 4 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 4 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 4 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 4 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 4 },
{ "nprobe": 1000, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 4 },
{ "nprobe": 2000, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 4 },
{ "nprobe": 5000, "internalDistanceDtype": "half", "smemLutDtype": "fp8", "refine_ratio": 4 }
]
},
{
"name": "raft_ivf_pq.dimpq512-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 512,
"ratio": 1,
"niter": 25
},
"file": "index/deep-image-96-angular/raft_ivf_pq/dimpq512-cluster1024-float-float",
"search_params": [
{"nprobe": 10, "internalDistanceDtype": "float", "smemLutDtype": "float"},
{"nprobe": 50, "internalDistanceDtype": "float", "smemLutDtype": "float"},
{"nprobe": 100, "internalDistanceDtype": "float", "smemLutDtype": "float"},
{"nprobe": 200, "internalDistanceDtype": "float", "smemLutDtype": "float"},
{"nprobe": 500, "internalDistanceDtype": "float", "smemLutDtype": "float"},
{"nprobe": 1024, "internalDistanceDtype": "float", "smemLutDtype": "float"}
],
"search_result_file": "result/deep-image-96-angular/raft_ivf_pq/dimpq512-cluster1024-float-float"
},
{
"name": "raft_cagra.dim32",
"algo": "raft_cagra",
"build_param": {"graph_degree": 32, "intermediate_graph_degree": 48},
"file": "deep-100M/raft_cagra/dim32",
"search_params": [
{"itopk": 32, "search_width": 1, "max_iterations": 0, "algo": "single_cta"},
{"itopk": 32, "search_width": 1, "max_iterations": 32, "algo": "single_cta"},
{"itopk": 64, "search_width": 4, "max_iterations": 16, "algo": "single_cta"},
{"itopk": 64, "search_width": 1, "max_iterations": 64, "algo": "single_cta"},
{"itopk": 96, "search_width": 2, "max_iterations": 48, "algo": "single_cta"},
{"itopk": 128, "search_width": 8, "max_iterations": 16, "algo": "single_cta"},
{"itopk": 128, "search_width": 2, "max_iterations": 64, "algo": "single_cta"},
{"itopk": 192, "search_width": 8, "max_iterations": 24, "algo": "single_cta"},
{"itopk": 192, "search_width": 2, "max_iterations": 96, "algo": "single_cta"},
{"itopk": 256, "search_width": 8, "max_iterations": 32, "algo": "single_cta"},
{"itopk": 384, "search_width": 8, "max_iterations": 48, "algo": "single_cta"},
{"itopk": 512, "search_width": 8, "max_iterations": 64, "algo": "single_cta"}
]
},
{
"name": "raft_cagra.dim32.multi_cta",
"algo": "raft_cagra",
"build_param": {"graph_degree": 32, "intermediate_graph_degree": 48},
"file": "deep-100M/raft_cagra/dim32",
"search_params": [
{"itopk": 32, "search_width": 1, "max_iterations": 0, "algo": "multi_cta"},
{"itopk": 32, "search_width": 1, "max_iterations": 32, "algo": "multi_cta"},
{"itopk": 64, "search_width": 4, "max_iterations": 16, "algo": "multi_cta"},
{"itopk": 64, "search_width": 1, "max_iterations": 64, "algo": "multi_cta"},
{"itopk": 96, "search_width": 2, "max_iterations": 48, "algo": "multi_cta"},
{"itopk": 128, "search_width": 8, "max_iterations": 16, "algo": "multi_cta"},
{"itopk": 128, "search_width": 2, "max_iterations": 64, "algo": "multi_cta"},
{"itopk": 192, "search_width": 8, "max_iterations": 24, "algo": "multi_cta"},
{"itopk": 192, "search_width": 2, "max_iterations": 96, "algo": "multi_cta"},
{"itopk": 256, "search_width": 8, "max_iterations": 32, "algo": "multi_cta"},
{"itopk": 384, "search_width": 8, "max_iterations": 48, "algo": "multi_cta"},
{"itopk": 512, "search_width": 8, "max_iterations": 64, "algo": "multi_cta"}
]
},
{
"name": "raft_cagra.dim32.multi_kernel",
"algo": "raft_cagra",
"build_param": {"graph_degree": 32, "intermediate_graph_degree": 48},
"file": "deep-100M/raft_cagra/dim32",
"search_params": [
{"itopk": 32, "search_width": 1, "max_iterations": 0, "algo": "multi_kernel"},
{"itopk": 32, "search_width": 1, "max_iterations": 32, "algo": "multi_kernel"},
{"itopk": 64, "search_width": 4, "max_iterations": 16, "algo": "multi_kernel"},
{"itopk": 64, "search_width": 1, "max_iterations": 64, "algo": "multi_kernel"},
{"itopk": 96, "search_width": 2, "max_iterations": 48, "algo": "multi_kernel"},
{"itopk": 128, "search_width": 8, "max_iterations": 16, "algo": "multi_kernel"},
{"itopk": 128, "search_width": 2, "max_iterations": 64, "algo": "multi_kernel"},
{"itopk": 192, "search_width": 8, "max_iterations": 24, "algo": "multi_kernel"},
{"itopk": 192, "search_width": 2, "max_iterations": 96, "algo": "multi_kernel"},
{"itopk": 256, "search_width": 8, "max_iterations": 32, "algo": "multi_kernel"},
{"itopk": 384, "search_width": 8, "max_iterations": 48, "algo": "multi_kernel"},
{"itopk": 512, "search_width": 8, "max_iterations": 64, "algo": "multi_kernel"}
]
},
{
"name": "raft_cagra.dim64",
"algo": "raft_cagra",
"build_param": {"graph_degree": 64},
"file": "deep-100M/raft_cagra/dim64",
"search_params": [
{"itopk": 32, "search_width": 1, "max_iterations": 0},
{"itopk": 32, "search_width": 1, "max_iterations": 32},
{"itopk": 64, "search_width": 4, "max_iterations": 16},
{"itopk": 64, "search_width": 1, "max_iterations": 64},
{"itopk": 96, "search_width": 2, "max_iterations": 48},
{"itopk": 128, "search_width": 8, "max_iterations": 16},
{"itopk": 128, "search_width": 2, "max_iterations": 64},
{"itopk": 192, "search_width": 8, "max_iterations": 24},
{"itopk": 192, "search_width": 2, "max_iterations": 96},
{"itopk": 256, "search_width": 8, "max_iterations": 32},
{"itopk": 384, "search_width": 8, "max_iterations": 48},
{"itopk": 512, "search_width": 8, "max_iterations": 64}
]
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/deep-1B.json | {
"dataset": {
"name": "deep-1B",
"base_file": "deep-1B/base.1B.fbin",
"query_file": "deep-1B/query.public.10K.fbin",
"groundtruth_neighbors_file": "deep-1B/groundtruth.neighbors.ibin",
"distance": "inner_product"
},
"search_basic_param": {
"batch_size": 10000,
"k": 10
},
"index": [
{
"name": "faiss_gpu_ivf_pq.M48-nlist50K",
"algo": "faiss_gpu_ivf_pq",
"build_param": {"nlist":50000, "M":48},
"file": "deep-1B/faiss_gpu_ivf_pq/M48-nlist50K",
"search_params": [
{"nprobe":1},
{"nprobe":5},
{"nprobe":10},
{"nprobe":50},
{"nprobe":100},
{"nprobe":200},
{"nprobe":500},
{"nprobe":1000},
{"nprobe":2000}
]
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/wiki_all_1M.json | {
"dataset": {
"name": "wiki_all_1M",
"base_file": "wiki_all_1M/base.1M.fbin",
"subset_size": 1000000,
"query_file": "wiki_all_1M/queries.fbin",
"groundtruth_neighbors_file": "wiki_all_1M/groundtruth.1M.neighbors.ibin",
"distance": "euclidean"
},
"search_basic_param": {
"batch_size": 10000,
"k": 10
},
"index": [
{
"name": "hnswlib.M16.ef50",
"algo": "hnswlib",
"build_param": { "M": 16, "efConstruction": 50, "numThreads": 56 },
"file": "wiki_all_1M/hnswlib/M16.ef50",
"search_params": [
{ "ef": 10, "numThreads": 56 },
{ "ef": 20, "numThreads": 56 },
{ "ef": 40, "numThreads": 56 },
{ "ef": 60, "numThreads": 56 },
{ "ef": 80, "numThreads": 56 },
{ "ef": 120, "numThreads": 56 },
{ "ef": 200, "numThreads": 56 },
{ "ef": 400, "numThreads": 56 },
{ "ef": 600, "numThreads": 56 },
{ "ef": 800, "numThreads": 56 }
]
},
{
"name": "faiss_ivf_pq.M32-nlist16K",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"M": 32,
"nlist": 16384,
"ratio": 2
},
"file": "wiki_all_1M/faiss_ivf_pq/M32-nlist16K_ratio2",
"search_params": [
{ "nprobe": 10 },
{ "nprobe": 20 },
{ "nprobe": 30 },
{ "nprobe": 40 },
{ "nprobe": 50 },
{ "nprobe": 100 },
{ "nprobe": 200 },
{ "nprobe": 500 }
]
},
{
"name": "faiss_ivf_pq.M64-nlist16K",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"M": 64,
"nlist": 16384,
"ratio": 2
},
"file": "wiki_all_1M/faiss_ivf_pq/M64-nlist16K_ratio2",
"search_params": [
{ "nprobe": 10 },
{ "nprobe": 20 },
{ "nprobe": 30 },
{ "nprobe": 40 },
{ "nprobe": 50 },
{ "nprobe": 100 },
{ "nprobe": 200 },
{ "nprobe": 500 }
]
},
{
"name": "raft_ivf_pq.d128-nlist16K",
"algo": "raft_ivf_pq",
"build_param": {
"pq_dim": 128,
"pq_bits": 8,
"nlist": 16384,
"niter": 10,
"ratio": 10
},
"file": "wiki_all_1M/raft_ivf_pq/d128-nlist16K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 }
]
},
{
"name": "raft_ivf_pq.d64-nlist16K",
"algo": "raft_ivf_pq",
"build_param": {
"pq_dim": 64,
"pq_bits": 8,
"nlist": 16384,
"niter": 10,
"ratio": 10
},
"file": "wiki_all_1M/raft_ivf_pq/d64-nlist16K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 }
]
},
{
"name": "raft_ivf_pq.d32-nlist16K",
"algo": "raft_ivf_pq",
"build_param": {
"pq_dim": 32,
"pq_bits": 8,
"nlist": 16384,
"niter": 10,
"ratio": 10
},
"file": "wiki_all_1M/raft_ivf_pq/d32-nlist16K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 }
]
},
{
"name": "raft_ivf_pq.d32X-nlist16K",
"algo": "raft_ivf_pq",
"build_param": {
"pq_dim": 32,
"pq_bits": 8,
"nlist": 16384,
"niter": 10,
"ratio": 10
},
"file": "wiki_all_1M/raft_ivf_pq/d32-nlist16K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 }
]
},
{
"name": "raft_cagra.dim32.multi_cta",
"algo": "raft_cagra",
"build_param": { "graph_degree": 32,
"intermediate_graph_degree": 48,
"graph_build_algo": "NN_DESCENT",
"ivf_pq_build_pq_dim": 32,
"ivf_pq_build_pq_bits": 8,
"ivf_pq_build_nlist": 16384,
"ivf_pq_build_niter": 10,
"ivf_pq_build_ratio": 10,
"ivf_pq_search_nprobe": 30,
"ivf_pq_search_internalDistanceDtype": "half",
"ivf_pq_search_smemLutDtype": "half",
"ivf_pq_search_refine_ratio": 8,
"nn_descent_max_iterations": 10,
"nn_descent_intermediate_graph_degree": 72,
"nn_descent_termination_threshold": 0.001
},
"file": "wiki_all_1M/raft_cagra/dim32.ibin",
"search_params": [
{ "itopk": 32, "search_width": 1, "max_iterations": 0, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 1, "max_iterations": 32, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 1, "max_iterations": 36, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 1, "max_iterations": 40, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 1, "max_iterations": 44, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 1, "max_iterations": 48, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 2, "max_iterations": 16, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 2, "max_iterations": 24, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 2, "max_iterations": 26, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 2, "max_iterations": 32, "algo": "multi_cta" },
{ "itopk": 64, "search_width": 4, "max_iterations": 16, "algo": "multi_cta" },
{ "itopk": 64, "search_width": 1, "max_iterations": 64, "algo": "multi_cta" },
{ "itopk": 96, "search_width": 2, "max_iterations": 48, "algo": "multi_cta" },
{ "itopk": 128, "search_width": 8, "max_iterations": 16, "algo": "multi_cta" },
{ "itopk": 128, "search_width": 2, "max_iterations": 64, "algo": "multi_cta" },
{ "itopk": 192, "search_width": 8, "max_iterations": 24, "algo": "multi_cta" },
{ "itopk": 192, "search_width": 2, "max_iterations": 96, "algo": "multi_cta" },
{ "itopk": 256, "search_width": 8, "max_iterations": 32, "algo": "multi_cta" },
{ "itopk": 384, "search_width": 8, "max_iterations": 48, "algo": "multi_cta" },
{ "itopk": 512, "search_width": 8, "max_iterations": 64, "algo": "multi_cta" }
]
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/wiki_all_10M.json | {
"dataset": {
"name": "wiki_all_10M",
"base_file": "wiki_all_10M/base.88M.fbin",
"query_file": "wiki_all_10M/queries.fbin",
"groundtruth_neighbors_file": "wiki_all_10M/groundtruth.88M.neighbors.ibin",
"distance": "euclidean"
},
"search_basic_param": {
"batch_size": 10000,
"k": 10
},
"index": [
{
"name": "hnswlib.M16.ef50",
"algo": "hnswlib",
"build_param": { "M": 16, "efConstruction": 50, "numThreads": 56 },
"file": "wiki_all_10M/hnswlib/M16.ef50",
"search_params": [
{ "ef": 10, "numThreads": 56 },
{ "ef": 20, "numThreads": 56 },
{ "ef": 40, "numThreads": 56 },
{ "ef": 60, "numThreads": 56 },
{ "ef": 80, "numThreads": 56 },
{ "ef": 120, "numThreads": 56 },
{ "ef": 200, "numThreads": 56 },
{ "ef": 400, "numThreads": 56 },
{ "ef": 600, "numThreads": 56 },
{ "ef": 800, "numThreads": 56 }
]
},
{
"name": "faiss_ivf_pq.M32-nlist16K",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"M": 32,
"nlist": 16384,
"ratio": 2
},
"file": "wiki_all_10M/faiss_ivf_pq/M32-nlist16K_ratio2",
"search_params": [
{ "nprobe": 10 },
{ "nprobe": 20 },
{ "nprobe": 30 },
{ "nprobe": 40 },
{ "nprobe": 50 },
{ "nprobe": 100 },
{ "nprobe": 200 },
{ "nprobe": 500 }
]
},
{
"name": "faiss_ivf_pq.M64-nlist16K",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"M": 64,
"nlist": 16384,
"ratio": 2
},
"file": "wiki_all_10M/faiss_ivf_pq/M64-nlist16K_ratio2",
"search_params": [
{ "nprobe": 10 },
{ "nprobe": 20 },
{ "nprobe": 30 },
{ "nprobe": 40 },
{ "nprobe": 50 },
{ "nprobe": 100 },
{ "nprobe": 200 },
{ "nprobe": 500 }
]
},
{
"name": "raft_ivf_pq.d128-nlist16K",
"algo": "raft_ivf_pq",
"build_param": {
"pq_dim": 128,
"pq_bits": 8,
"nlist": 16384,
"niter": 10,
"ratio": 10
},
"file": "wiki_all_10M/raft_ivf_pq/d128-nlist16K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 1 }
]
},
{
"name": "raft_ivf_pq.d64-nlist16K",
"algo": "raft_ivf_pq",
"build_param": {
"pq_dim": 64,
"pq_bits": 8,
"nlist": 16384,
"niter": 10,
"ratio": 10
},
"file": "wiki_all_10M/raft_ivf_pq/d64-nlist16K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 }
]
},
{
"name": "raft_ivf_pq.d32-nlist16K",
"algo": "raft_ivf_pq",
"build_param": {
"pq_dim": 32,
"pq_bits": 8,
"nlist": 16384,
"niter": 10,
"ratio": 10
},
"file": "wiki_all_10M/raft_ivf_pq/d32-nlist16K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 32 }
]
},
{
"name": "raft_ivf_pq.d32X-nlist16K",
"algo": "raft_ivf_pq",
"build_param": {
"pq_dim": 32,
"pq_bits": 8,
"nlist": 16384,
"niter": 10,
"ratio": 10
},
"file": "wiki_all_10M/raft_ivf_pq/d32-nlist16K",
"search_params": [
{ "nprobe": 20, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 16 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 8 },
{ "nprobe": 30, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 40, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 50, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 100, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 200, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 },
{ "nprobe": 500, "internalDistanceDtype": "half", "smemLutDtype": "half", "refine_ratio": 4 }
]
},
{
"name": "raft_cagra.dim32.multi_cta",
"algo": "raft_cagra",
"build_param": { "graph_degree": 32, "intermediate_graph_degree": 48 },
"file": "wiki_all_10M/raft_cagra/dim32.ibin",
"search_params": [
{ "itopk": 32, "search_width": 1, "max_iterations": 0, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 1, "max_iterations": 32, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 1, "max_iterations": 36, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 1, "max_iterations": 40, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 1, "max_iterations": 44, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 1, "max_iterations": 48, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 2, "max_iterations": 16, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 2, "max_iterations": 24, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 2, "max_iterations": 26, "algo": "multi_cta" },
{ "itopk": 32, "search_width": 2, "max_iterations": 32, "algo": "multi_cta" },
{ "itopk": 64, "search_width": 4, "max_iterations": 16, "algo": "multi_cta" },
{ "itopk": 64, "search_width": 1, "max_iterations": 64, "algo": "multi_cta" },
{ "itopk": 96, "search_width": 2, "max_iterations": 48, "algo": "multi_cta" },
{ "itopk": 128, "search_width": 8, "max_iterations": 16, "algo": "multi_cta" },
{ "itopk": 128, "search_width": 2, "max_iterations": 64, "algo": "multi_cta" },
{ "itopk": 192, "search_width": 8, "max_iterations": 24, "algo": "multi_cta" },
{ "itopk": 192, "search_width": 2, "max_iterations": 96, "algo": "multi_cta" },
{ "itopk": 256, "search_width": 8, "max_iterations": 32, "algo": "multi_cta" },
{ "itopk": 384, "search_width": 8, "max_iterations": 48, "algo": "multi_cta" },
{ "itopk": 512, "search_width": 8, "max_iterations": 64, "algo": "multi_cta" }
]
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/glove-100-angular.json | {
"dataset": {
"name": "glove-100-angular",
"base_file": "glove-100-angular/base.fbin",
"query_file": "glove-100-angular/query.fbin",
"distance": "euclidean"
},
"search_basic_param": {
"batch_size": 5000,
"k": 10,
"run_count": 3
},
"index": [
{
"name" : "hnswlib.M12",
"algo" : "hnswlib",
"build_param": {"M":12, "efConstruction":500, "numThreads":32},
"file" : "index/glove-100-angular/hnswlib/M12",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/glove-100-angular/hnswlib/M12"
},
{
"name" : "hnswlib.M16",
"algo" : "hnswlib",
"build_param": {"M":16, "efConstruction":500, "numThreads":32},
"file" : "index/glove-100-angular/hnswlib/M16",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/glove-100-angular/hnswlib/M16"
},
{
"name" : "hnswlib.M24",
"algo" : "hnswlib",
"build_param": {"M":24, "efConstruction":500, "numThreads":32},
"file" : "index/glove-100-angular/hnswlib/M24",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/glove-100-angular/hnswlib/M24"
},
{
"name" : "hnswlib.M36",
"algo" : "hnswlib",
"build_param": {"M":36, "efConstruction":500, "numThreads":32},
"file" : "index/glove-100-angular/hnswlib/M36",
"search_params" : [
{"ef":10},
{"ef":20},
{"ef":40},
{"ef":60},
{"ef":80},
{"ef":120},
{"ef":200},
{"ef":400},
{"ef":600},
{"ef":800}
],
"search_result_file" : "result/glove-100-angular/hnswlib/M36"
},
{
"name": "raft_bfknn",
"algo": "raft_bfknn",
"build_param": {},
"file": "index/glove-100-angular/raft_bfknn/bfknn",
"search_params": [
{
"probe": 1
}
],
"search_result_file": "result/glove-100-angular/raft_bfknn/bfknn"
},
{
"name": "faiss_gpu_ivf_flat.nlist1024",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 1024
},
"file": "index/glove-100-angular/faiss_gpu_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_ivf_flat/nlist1024"
},
{
"name": "faiss_gpu_ivf_flat.nlist2048",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 2048
},
"file": "index/glove-100-angular/faiss_gpu_ivf_flat/nlist2048",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_ivf_flat/nlist2048"
},
{
"name": "faiss_gpu_ivf_flat.nlist4096",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 4096
},
"file": "index/glove-100-angular/faiss_gpu_ivf_flat/nlist4096",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_ivf_flat/nlist4096"
},
{
"name": "faiss_gpu_ivf_flat.nlist8192",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 8192
},
"file": "index/glove-100-angular/faiss_gpu_ivf_flat/nlist8192",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_ivf_flat/nlist8192"
},
{
"name": "faiss_gpu_ivf_flat.nlist16384",
"algo": "faiss_gpu_ivf_flat",
"build_param": {
"nlist": 16384
},
"file": "index/glove-100-angular/faiss_gpu_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_ivf_flat/nlist16384"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": true
},
"file": "index/glove-100-angular/faiss_gpu_ivf_pq/M64-nlist1024",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_pq.M64-nlist1024.noprecomp",
"algo": "faiss_gpu_ivf_pq",
"build_param": {
"nlist": 1024,
"M": 64,
"useFloat16": true,
"usePrecomputed": false
},
"file": "index/glove-100-angular/faiss_gpu_ivf_pq/M64-nlist1024.noprecomp",
"search_params": [
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_ivf_pq/M64-nlist1024"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "fp16"
},
"file": "index/glove-100-angular/faiss_gpu_ivf_sq/nlist1024-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_ivf_sq/nlist1024-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "fp16"
},
"file": "index/glove-100-angular/faiss_gpu_ivf_sq/nlist2048-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_ivf_sq/nlist2048-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "fp16"
},
"file": "index/glove-100-angular/faiss_gpu_ivf_sq/nlist4096-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_ivf_sq/nlist4096-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "fp16"
},
"file": "index/glove-100-angular/faiss_gpu_ivf_sq/nlist8192-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_ivf_sq/nlist8192-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-fp16",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "fp16"
},
"file": "index/glove-100-angular/faiss_gpu_ivf_sq/nlist16384-fp16",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_ivf_sq/nlist16384-fp16"
},
{
"name": "faiss_gpu_ivf_sq.nlist1024-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 1024,
"quantizer_type": "int8"
},
"file": "index/glove-100-angular/faiss_gpu_ivf_sq/nlist1024-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_ivf_sq/nlist1024-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist2048-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 2048,
"quantizer_type": "int8"
},
"file": "index/glove-100-angular/faiss_gpu_ivf_sq/nlist2048-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_ivf_sq/nlist2048-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist4096-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 4096,
"quantizer_type": "int8"
},
"file": "index/glove-100-angular/faiss_gpu_ivf_sq/nlist4096-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_ivf_sq/nlist4096-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist8192-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 8192,
"quantizer_type": "int8"
},
"file": "index/glove-100-angular/faiss_gpu_ivf_sq/nlist8192-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_ivf_sq/nlist8192-int8"
},
{
"name": "faiss_gpu_ivf_sq.nlist16384-int8",
"algo": "faiss_gpu_ivf_sq",
"build_param": {
"nlist": 16384,
"quantizer_type": "int8"
},
"file": "index/glove-100-angular/faiss_gpu_ivf_sq/nlist16384-int8",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_ivf_sq/nlist16384-int8"
},
{
"name": "faiss_gpu_flat",
"algo": "faiss_gpu_flat",
"build_param": {},
"file": "index/glove-100-angular/faiss_gpu_flat/flat",
"search_params": [
{}
],
"search_result_file": "result/glove-100-angular/faiss_gpu_flat/flat"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-angular/raft_ivf_pq/dimpq128-cluster1024",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "half"
}
],
"search_result_file": "result/glove-100-angular/raft_ivf_pq/dimpq128-cluster1024"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-angular/raft_ivf_pq/dimpq128-cluster1024-float-float",
"search_params": [
{
"k": 10,
"nprobe": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 1,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 5,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/glove-100-angular/raft_ivf_pq/dimpq128-cluster1024-float-float"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-angular/raft_ivf_pq/dimpq128-cluster1024-float-half",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/glove-100-angular/raft_ivf_pq/dimpq128-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-angular/raft_ivf_pq/dimpq128-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/glove-100-angular/raft_ivf_pq/dimpq128-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-angular/raft_ivf_pq/dimpq64-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/glove-100-angular/raft_ivf_pq/dimpq64-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq64-cluster1024-float-half",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 64,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-angular/raft_ivf_pq/dimpq64-cluster1024-float-half",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "half"
}
],
"search_result_file": "result/glove-100-angular/raft_ivf_pq/dimpq64-cluster1024-float-half"
},
{
"name": "raft_ivf_pq.dimpq32-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 32,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-angular/raft_ivf_pq/dimpq32-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/glove-100-angular/raft_ivf_pq/dimpq32-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq16-cluster1024-float-fp8",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 16,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-angular/raft_ivf_pq/dimpq16-cluster1024-float-fp8",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "fp8"
}
],
"search_result_file": "result/glove-100-angular/raft_ivf_pq/dimpq16-cluster1024-float-fp8"
},
{
"name": "raft_ivf_pq.dimpq128-cluster1024-half-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 128,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-angular/raft_ivf_pq/dimpq128-cluster1024-half-float",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "half",
"smemLutDtype": "float"
}
],
"search_result_file": "result/glove-100-angular/raft_ivf_pq/dimpq128-cluster1024-half-float"
},
{
"name": "raft_ivf_pq.dimpq512-cluster1024-float-float",
"algo": "raft_ivf_pq",
"build_param": {
"nlist": 1024,
"pq_dim": 512,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-angular/raft_ivf_pq/dimpq512-cluster1024-float-float",
"search_params": [
{
"k": 10,
"nprobe": 10,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 50,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 100,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 200,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 500,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
},
{
"k": 10,
"nprobe": 1024,
"internalDistanceDtype": "float",
"smemLutDtype": "float"
}
],
"search_result_file": "result/glove-100-angular/raft_ivf_pq/dimpq512-cluster1024-float-float"
},
{
"name": "raft_ivf_flat.nlist1024",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 1024,
"ratio": 1,
"niter": 25
},
"file": "index/glove-100-angular/raft_ivf_flat/nlist1024",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
}
],
"search_result_file": "result/glove-100-angular/raft_ivf_flat/nlist1024"
},
{
"name": "raft_ivf_flat.nlist16384",
"algo": "raft_ivf_flat",
"build_param": {
"nlist": 16384,
"ratio": 2,
"niter": 20
},
"file": "index/glove-100-angular/raft_ivf_flat/nlist16384",
"search_params": [
{
"nprobe": 1
},
{
"nprobe": 5
},
{
"nprobe": 10
},
{
"nprobe": 50
},
{
"nprobe": 100
},
{
"nprobe": 200
},
{
"nprobe": 500
},
{
"nprobe": 1000
},
{
"nprobe": 2000
}
],
"search_result_file": "result/glove-100-angular/raft_ivf_flat/nlist16384"
},
{
"name" : "raft_cagra.dim32",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 32
},
"file" : "index/glove-100-angular/raft_cagra/dim32",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/glove-100-angular/raft_cagra/dim32"
},
{
"name" : "raft_cagra.dim64",
"algo" : "raft_cagra",
"build_param": {
"graph_degree" : 64
},
"file" : "index/glove-100-angular/raft_cagra/dim64",
"search_params" : [
{"itopk": 32},
{"itopk": 64},
{"itopk": 128}
],
"search_result_file" : "result/glove-100-angular/raft_cagra/dim64"
}
]
}
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/algos/faiss_gpu_ivf_flat.yaml | name: faiss_gpu_ivf_flat
groups:
base:
build:
nlist: [2048]
ratio: [1, 4, 10]
useFloat16: [False]
search:
nprobe: [2048]
refine_ratio: [1]
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/algos/raft_ivf_flat.yaml | name: raft_ivf_flat
groups:
base:
build:
nlist: [1024, 2048, 4096, 8192, 16384, 32000, 64000]
ratio: [1, 2, 4]
niter: [20, 25]
search:
nprobe: [1, 5, 10, 50, 100, 200, 500, 1000, 2000] | 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/algos/hnswlib.yaml | name: hnswlib
constraints:
search: raft-ann-bench.constraints.hnswlib_search_constraints
groups:
base:
build:
M: [12, 16, 24, 36]
efConstruction: [64, 128, 256, 512]
search:
ef: [10, 20, 40, 60, 80, 120, 200, 400, 600, 800]
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/algos/raft_ivf_pq.yaml | name: raft_ivf_pq
constraints:
build: raft-ann-bench.constraints.raft_ivf_pq_build_constraints
search: raft-ann-bench.constraints.raft_ivf_pq_search_constraints
groups:
base:
build:
nlist: [1024, 2048, 4096, 8192]
pq_dim: [64, 32]
pq_bits: [8, 6, 5, 4]
ratio: [10, 25]
niter: [25]
search:
nprobe: [1, 5, 10, 50, 100, 200]
internalDistanceDtype: ["float"]
smemLutDtype: ["float", "fp8", "half"]
refine_ratio: [1, 2, 4] | 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/algos/faiss_gpu_ivf_pq.yaml | name: faiss_gpu_ivf_pq
groups:
base:
build:
nlist: [1024, 2048, 4096, 8192]
M: [8, 16]
ratio: [10, 25]
usePrecomputed: [False]
useFloat16: [False]
search:
nprobe: [1, 5, 10, 50, 100, 200]
refine_ratio: [1] | 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/algos/raft_cagra.yaml | name: raft_cagra
constraints:
search: raft-ann-bench.constraints.raft_cagra_search_constraints
groups:
base:
build:
graph_degree: [32, 64, 128, 256]
intermediate_graph_degree: [32, 64, 96, 128]
graph_build_algo: ["NN_DESCENT"]
search:
itopk: [32, 64, 128, 256, 512]
search_width: [1, 2, 4, 8, 16, 32, 64]
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/run/conf/algos/raft_cagra_hnswlib.yaml | name: raft_cagra_hnswlib
constraints:
search: raft-ann-bench.constraints.hnswlib_search_constraints
groups:
base:
build:
graph_degree: [32, 64, 128, 256]
intermediate_graph_degree: [32, 64, 96, 128]
graph_build_algo: ["NN_DESCENT"]
search:
ef: [10, 20, 40, 60, 80, 120, 200, 400, 600, 800]
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/constraints/__init__.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DTYPE_SIZES = {"float": 4, "half": 2, "fp8": 1}
def cuvs_ivf_pq_build_constraints(params, dims):
if "pq_dim" in params:
return params["pq_dim"] <= dims
return True
def cuvs_ivf_pq_search_constraints(params, build_params, k, batch_size):
ret = True
if "internalDistanceDtype" in params and "smemLutDtype" in params:
ret = (
DTYPE_SIZES[params["smemLutDtype"]]
<= DTYPE_SIZES[params["internalDistanceDtype"]]
)
if "nlist" in build_params and "nprobe" in params:
ret = ret and build_params["nlist"] >= params["nprobe"]
return ret
def cuvs_cagra_search_constraints(params, build_params, k, batch_size):
if "itopk" in params:
return params["itopk"] >= k
def hnswlib_search_constraints(params, build_params, k, batch_size):
if "ef" in params:
return params["ef"] >= k
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/split_groundtruth/split_groundtruth.pl | #!/usr/bin/perl
# =============================================================================
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
use warnings;
use strict;
use autodie qw(open close);
@ARGV == 2
or die "usage: $0 input output_prefix\n";
open my $fh, '<:raw', $ARGV[0];
my $raw;
read($fh, $raw, 8);
my ($nrows, $dim) = unpack('LL', $raw);
my $expected_size = 8 + $nrows * $dim * (4 + 4);
my $size = (stat($fh))[7];
$size == $expected_size
or die("error: expected size is $expected_size, but actual size is $size\n");
open my $fh_out1, '>:raw', "$ARGV[1].neighbors.ibin";
open my $fh_out2, '>:raw', "$ARGV[1].distances.fbin";
print {$fh_out1} $raw;
print {$fh_out2} $raw;
read($fh, $raw, $nrows * $dim * 4);
print {$fh_out1} $raw;
read($fh, $raw, $nrows * $dim * 4);
print {$fh_out2} $raw;
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/split_groundtruth/__main__.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
import sys
def split_groundtruth(groundtruth_filepath):
ann_bench_scripts_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "split_groundtruth.pl"
)
pwd = os.getcwd()
path_to_groundtruth = os.path.normpath(groundtruth_filepath).split(os.sep)
if len(path_to_groundtruth) > 1:
os.chdir(os.path.join(*path_to_groundtruth[:-1]))
groundtruth_filename = path_to_groundtruth[-1]
subprocess.run(
[ann_bench_scripts_path, groundtruth_filename, "groundtruth"],
check=True,
)
os.chdir(pwd)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--groundtruth",
help="Path to billion-scale dataset groundtruth file",
required=True,
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
split_groundtruth(args.groundtruth)
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/generate_groundtruth/utils.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
def dtype_from_filename(filename):
ext = os.path.splitext(filename)[1]
if ext == ".fbin":
return np.float32
if ext == ".hbin":
return np.float16
elif ext == ".ibin":
return np.int32
elif ext == ".u8bin":
return np.ubyte
elif ext == ".i8bin":
return np.byte
else:
raise RuntimeError("Not supported file extension" + ext)
def suffix_from_dtype(dtype):
if dtype == np.float32:
return ".fbin"
if dtype == np.float16:
return ".hbin"
elif dtype == np.int32:
return ".ibin"
elif dtype == np.ubyte:
return ".u8bin"
elif dtype == np.byte:
return ".i8bin"
else:
raise RuntimeError("Not supported dtype extension" + dtype)
def memmap_bin_file(
bin_file, dtype, shape=None, mode="r", size_dtype=np.uint32
):
extent_itemsize = np.dtype(size_dtype).itemsize
offset = int(extent_itemsize) * 2
if bin_file is None:
return None
if dtype is None:
dtype = dtype_from_filename(bin_file)
if mode[0] == "r":
a = np.memmap(bin_file, mode=mode, dtype=size_dtype, shape=(2,))
if shape is None:
shape = (a[0], a[1])
else:
shape = tuple(
[
aval if sval is None else sval
for aval, sval in zip(a, shape)
]
)
return np.memmap(
bin_file, mode=mode, dtype=dtype, offset=offset, shape=shape
)
elif mode[0] == "w":
if shape is None:
raise ValueError("Need to specify shape to map file in write mode")
print("creating file", bin_file)
dirname = os.path.dirname(bin_file)
if len(dirname) > 0:
os.makedirs(dirname, exist_ok=True)
a = np.memmap(bin_file, mode=mode, dtype=size_dtype, shape=(2,))
a[0] = shape[0]
a[1] = shape[1]
a.flush()
del a
fp = np.memmap(
bin_file, mode="r+", dtype=dtype, offset=offset, shape=shape
)
return fp
# print('# {}: shape: {}, dtype: {}'.format(bin_file, shape, dtype))
def write_bin(fname, data):
print("writing", fname, data.shape, data.dtype, "...")
with open(fname, "wb") as f:
np.asarray(data.shape, dtype=np.uint32).tofile(f)
data.tofile(f)
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/generate_groundtruth/__main__.py | #!/usr/bin/env python
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import sys
import cupy as cp
import numpy as np
import rmm
from pylibraft.common import DeviceResources
from rmm.allocators.cupy import rmm_cupy_allocator
from cuvs.neighbors.brute_force import knn
from .utils import memmap_bin_file, suffix_from_dtype, write_bin
def generate_random_queries(n_queries, n_features, dtype=np.float32):
print("Generating random queries")
if np.issubdtype(dtype, np.integer):
queries = cp.random.randint(
0, 255, size=(n_queries, n_features), dtype=dtype
)
else:
queries = cp.random.uniform(size=(n_queries, n_features)).astype(dtype)
return queries
def choose_random_queries(dataset, n_queries):
print("Choosing random vector from dataset as query vectors")
query_idx = np.random.choice(
dataset.shape[0], size=(n_queries,), replace=False
)
return dataset[query_idx, :]
def calc_truth(dataset, queries, k, metric="sqeuclidean"):
handle = DeviceResources()
n_samples = dataset.shape[0]
n = 500000 # batch size for processing neighbors
i = 0
indices = None
distances = None
queries = cp.asarray(queries, dtype=cp.float32)
while i < n_samples:
print("Step {0}/{1}:".format(i // n, n_samples // n))
n_batch = n if i + n <= n_samples else n_samples - i
X = cp.asarray(dataset[i : i + n_batch, :], cp.float32)
D, Ind = knn(
X,
queries,
k,
metric=metric,
handle=handle,
global_id_offset=i, # shift neighbor index by offset i
)
handle.sync()
D, Ind = cp.asarray(D), cp.asarray(Ind)
if distances is None:
distances = D
indices = Ind
else:
distances = cp.concatenate([distances, D], axis=1)
indices = cp.concatenate([indices, Ind], axis=1)
idx = cp.argsort(distances, axis=1)[:, :k]
distances = cp.take_along_axis(distances, idx, axis=1)
indices = cp.take_along_axis(indices, idx, axis=1)
i += n_batch
return distances, indices
def main():
pool = rmm.mr.PoolMemoryResource(
rmm.mr.CudaMemoryResource(), initial_pool_size=2**30
)
rmm.mr.set_current_device_resource(pool)
cp.cuda.set_allocator(rmm_cupy_allocator)
parser = argparse.ArgumentParser(
prog="generate_groundtruth",
description="Generate true neighbors using exact NN search. "
"The input and output files are in big-ann-benchmark's binary format.",
epilog="""Example usage
# With existing query file
python -m raft-ann-bench.generate_groundtruth --dataset /dataset/base.\
fbin --output=groundtruth_dir --queries=/dataset/query.public.10K.fbin
# With randomly generated queries
python -m raft-ann-bench.generate_groundtruth --dataset /dataset/base.\
fbin --output=groundtruth_dir --queries=random --n_queries=10000
# Using only a subset of the dataset. Define queries by randomly
# selecting vectors from the (subset of the) dataset.
python -m raft-ann-bench.generate_groundtruth --dataset /dataset/base.\
fbin --nrows=2000000 --cols=128 --output=groundtruth_dir \
--queries=random-choice --n_queries=10000
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("dataset", type=str, help="input dataset file name")
parser.add_argument(
"--queries",
type=str,
default="random",
help="Queries file name, or one of 'random-choice' or 'random' "
"(default). 'random-choice': select n_queries vectors from the input "
"dataset. 'random': generate n_queries as uniform random numbers.",
)
parser.add_argument(
"--output",
type=str,
default="",
help="output directory name (default current dir)",
)
parser.add_argument(
"--n_queries",
type=int,
default=10000,
help="Number of quries to generate (if no query file is given). "
"Default: 10000.",
)
parser.add_argument(
"-N",
"--rows",
default=None,
type=int,
help="use only first N rows from dataset, by default the whole "
"dataset is used",
)
parser.add_argument(
"-D",
"--cols",
default=None,
type=int,
help="number of features (dataset columns). "
"Default: read from dataset file.",
)
parser.add_argument(
"--dtype",
type=str,
help="Dataset dtype. When not specified, then derived from extension."
" Supported types: 'float32', 'float16', 'uint8', 'int8'",
)
parser.add_argument(
"-k",
type=int,
default=100,
help="Number of neighbors (per query) to calculate",
)
parser.add_argument(
"--metric",
type=str,
default="sqeuclidean",
help="Metric to use while calculating distances. Valid metrics are "
"those that are accepted by pylibraft.neighbors.brute_force.knn. Most"
" commonly used with RAFT ANN are 'sqeuclidean' and 'inner_product'",
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.rows is not None:
print("Reading subset of the data, nrows=", args.rows)
else:
print("Reading whole dataset")
# Load input data
dataset = memmap_bin_file(
args.dataset, args.dtype, shape=(args.rows, args.cols)
)
n_features = dataset.shape[1]
dtype = dataset.dtype
print(
"Dataset size {:6.1f} GB, shape {}, dtype {}".format(
dataset.size * dataset.dtype.itemsize / 1e9,
dataset.shape,
np.dtype(dtype),
)
)
if len(args.output) > 0:
os.makedirs(args.output, exist_ok=True)
if args.queries == "random" or args.queries == "random-choice":
if args.n_queries is None:
raise RuntimeError(
"n_queries must be given to generate random queries"
)
if args.queries == "random":
queries = generate_random_queries(
args.n_queries, n_features, dtype
)
elif args.queries == "random-choice":
queries = choose_random_queries(dataset, args.n_queries)
queries_filename = os.path.join(
args.output, "queries" + suffix_from_dtype(dtype)
)
print("Writing queries file", queries_filename)
write_bin(queries_filename, queries)
else:
print("Reading queries from file", args.queries)
queries = memmap_bin_file(args.queries, dtype)
print("Calculating true nearest neighbors")
distances, indices = calc_truth(dataset, queries, args.k, args.metric)
write_bin(
os.path.join(args.output, "groundtruth.neighbors.ibin"),
indices.astype(np.uint32),
)
write_bin(
os.path.join(args.output, "groundtruth.distances.fbin"),
distances.astype(np.float32),
)
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/get_dataset/hdf5_to_fbin.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import h5py
import numpy as np
def normalize(x):
norm = np.linalg.norm(x, axis=1)
return (x.T / norm).T
def write_bin(fname, data):
with open(fname, "wb") as f:
np.asarray(data.shape, dtype=np.uint32).tofile(f)
data.tofile(f)
if __name__ == "__main__":
if len(sys.argv) != 2 and len(sys.argv) != 3:
print(
"usage: %s [-n] <input>.hdf5\n" % (sys.argv[0]),
" -n: normalize base/query set\n",
"outputs: <input>.base.fbin\n",
" <input>.query.fbin\n",
" <input>.groundtruth.neighbors.ibin\n",
" <input>.groundtruth.distances.fbin",
file=sys.stderr,
)
sys.exit(-1)
need_normalize = False
if len(sys.argv) == 3:
assert sys.argv[1] == "-n"
need_normalize = True
fname_prefix = sys.argv[-1]
assert fname_prefix.endswith(".hdf5")
fname_prefix = fname_prefix[:-5]
hdf5 = h5py.File(sys.argv[-1], "r")
assert (
hdf5.attrs["distance"] == "angular"
or hdf5.attrs["distance"] == "euclidean"
)
assert hdf5["train"].dtype == np.float32
assert hdf5["test"].dtype == np.float32
assert hdf5["neighbors"].dtype == np.int32
assert hdf5["distances"].dtype == np.float32
base = hdf5["train"][:]
query = hdf5["test"][:]
if need_normalize:
base = normalize(base)
query = normalize(query)
elif hdf5.attrs["distance"] == "angular":
print(
"warning: input has angular distance, ",
"specify -n to normalize base/query set!\n",
)
output_fname = fname_prefix + ".base.fbin"
print("writing", output_fname, "...")
write_bin(output_fname, base)
output_fname = fname_prefix + ".query.fbin"
print("writing", output_fname, "...")
write_bin(output_fname, query)
output_fname = fname_prefix + ".groundtruth.neighbors.ibin"
print("writing", output_fname, "...")
write_bin(output_fname, hdf5["neighbors"][:])
output_fname = fname_prefix + ".groundtruth.distances.fbin"
print("writing", output_fname, "...")
write_bin(output_fname, hdf5["distances"][:])
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/get_dataset/fbin_to_f16bin.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import sys
import numpy as np
def read_fbin(fname):
shape = np.fromfile(fname, dtype=np.uint32, count=2)
if float(shape[0]) * shape[1] * 4 > 2000000000:
data = np.memmap(fname, dtype=np.float32, offset=8, mode="r").reshape(
shape
)
else:
data = np.fromfile(fname, dtype=np.float32, offset=8).reshape(shape)
return data
def write_bin(fname, data):
with open(fname, "wb") as f:
np.asarray(data.shape, dtype=np.uint32).tofile(f)
data.tofile(f)
if len(sys.argv) != 3:
print(
"usage: %s input.fbin output.f16bin" % (sys.argv[0]),
file=sys.stderr,
)
sys.exit(-1)
data = read_fbin(sys.argv[1]).astype(np.float16)
write_bin(sys.argv[2], data)
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/get_dataset/__main__.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
import sys
from urllib.request import urlretrieve
def get_dataset_path(name, ann_bench_data_path):
if not os.path.exists(ann_bench_data_path):
os.mkdir(ann_bench_data_path)
return os.path.join(ann_bench_data_path, f"{name}.hdf5")
def download_dataset(url, path):
if not os.path.exists(path):
print(f"downloading {url} -> {path}...")
urlretrieve(url, path)
def convert_hdf5_to_fbin(path, normalize):
scripts_path = os.path.dirname(os.path.realpath(__file__))
ann_bench_scripts_path = os.path.join(scripts_path, "hdf5_to_fbin.py")
print(f"calling script {ann_bench_scripts_path}")
if normalize and "angular" in path:
subprocess.run(
["python", ann_bench_scripts_path, "-n", "%s" % path], check=True
)
else:
subprocess.run(
["python", ann_bench_scripts_path, "%s" % path], check=True
)
def move(name, ann_bench_data_path):
if "angular" in name:
new_name = name.replace("angular", "inner")
else:
new_name = name
new_path = os.path.join(ann_bench_data_path, new_name)
if not os.path.exists(new_path):
os.mkdir(new_path)
for bin_name in [
"base.fbin",
"query.fbin",
"groundtruth.neighbors.ibin",
"groundtruth.distances.fbin",
]:
os.rename(
f"{ann_bench_data_path}/{name}.{bin_name}",
f"{new_path}/{bin_name}",
)
def download(name, normalize, ann_bench_data_path):
path = get_dataset_path(name, ann_bench_data_path)
try:
url = f"http://ann-benchmarks.com/{name}.hdf5"
download_dataset(url, path)
convert_hdf5_to_fbin(path, normalize)
move(name, ann_bench_data_path)
except Exception:
print(f"Cannot download {url}")
raise
def main():
call_path = os.getcwd()
if "RAPIDS_DATASET_ROOT_DIR" in os.environ:
default_dataset_path = os.getenv("RAPIDS_DATASET_ROOT_DIR")
else:
default_dataset_path = os.path.join(call_path, "datasets/")
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--dataset", help="dataset to download", default="glove-100-angular"
)
parser.add_argument(
"--dataset-path",
help="path to download dataset",
default=default_dataset_path,
)
parser.add_argument(
"--normalize",
help="normalize cosine distance to inner product",
action="store_true",
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
download(args.dataset, args.normalize, args.dataset_path)
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/data_export/__main__.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import sys
import traceback
import warnings
import pandas as pd
skip_build_cols = set(
[
"algo_name",
"index_name",
"time",
"name",
"family_index",
"per_family_instance_index",
"run_name",
"run_type",
"repetitions",
"repetition_index",
"iterations",
"real_time",
"time_unit",
"index_size",
]
)
skip_search_cols = (
set(["recall", "qps", "latency", "items_per_second", "Recall", "Latency"])
| skip_build_cols
)
metrics = {
"k-nn": {
"description": "Recall",
"worst": float("-inf"),
"lim": [0.0, 1.03],
},
"throughput": {
"description": "Queries per second (1/s)",
"worst": float("-inf"),
},
"latency": {
"description": "Search Latency (s)",
"worst": float("inf"),
},
}
def read_file(dataset, dataset_path, method):
dir = os.path.join(dataset_path, dataset, "result", method)
for file in os.listdir(dir):
if file.endswith(".json"):
with open(os.path.join(dir, file), "r") as f:
try:
data = json.load(f)
df = pd.DataFrame(data["benchmarks"])
yield os.path.join(dir, file), file.split("-")[0], df
except Exception as e:
print(
"An error occurred processing file %s (%s). "
"Skipping..." % (file, e)
)
def convert_json_to_csv_build(dataset, dataset_path):
for file, algo_name, df in read_file(dataset, dataset_path, "build"):
try:
algo_name = algo_name.replace("_base", "")
df["name"] = df["name"].str.split("/").str[0]
write = pd.DataFrame(
{
"algo_name": [algo_name] * len(df),
"index_name": df["name"],
"time": df["real_time"],
}
)
for name in df:
if name not in skip_build_cols:
write[name] = df[name]
filepath = os.path.normpath(file).split(os.sep)
filename = filepath[-1].split("-")[0] + ".csv"
write.to_csv(
os.path.join(f"{os.sep}".join(filepath[:-1]), filename),
index=False,
)
except Exception as e:
print(
"An error occurred processing file %s (%s). Skipping..."
% (file, e)
)
traceback.print_exc()
def create_pointset(data, xn, yn):
xm, ym = (metrics[xn], metrics[yn])
rev_y = -1 if ym["worst"] < 0 else 1
rev_x = -1 if xm["worst"] < 0 else 1
y_idx = 3 if yn == "throughput" else 4
data.sort(key=lambda t: (rev_y * t[y_idx], rev_x * t[2]))
lines = []
last_x = xm["worst"]
comparator = (
(lambda xv, lx: xv > lx) if last_x < 0 else (lambda xv, lx: xv < lx)
)
for d in data:
if comparator(d[2], last_x):
last_x = d[2]
lines.append(d)
return lines
def get_frontier(df, metric):
lines = create_pointset(df.values.tolist(), "k-nn", metric)
return pd.DataFrame(lines, columns=df.columns)
def convert_json_to_csv_search(dataset, dataset_path):
for file, algo_name, df in read_file(dataset, dataset_path, "search"):
try:
build_file = os.path.join(
dataset_path, dataset, "result", "build", f"{algo_name}.csv"
)
algo_name = algo_name.replace("_base", "")
df["name"] = df["name"].str.split("/").str[0]
try:
write = pd.DataFrame(
{
"algo_name": [algo_name] * len(df),
"index_name": df["name"],
"recall": df["Recall"],
"throughput": df["items_per_second"],
"latency": df["Latency"],
}
)
except Exception as e:
print(
"Search file %s (%s) missing a key. Skipping..."
% (file, e)
)
for name in df:
if name not in skip_search_cols:
write[name] = df[name]
if os.path.exists(build_file):
build_df = pd.read_csv(build_file)
write_ncols = len(write.columns)
write["build time"] = None
write["build threads"] = None
write["build cpu_time"] = None
write["build GPU"] = None
try:
for col_idx in range(6, len(build_df.columns)):
col_name = build_df.columns[col_idx]
write[col_name] = None
for s_index, search_row in write.iterrows():
for b_index, build_row in build_df.iterrows():
if (
search_row["index_name"]
== build_row["index_name"]
):
write.iloc[
s_index, write_ncols
] = build_df.iloc[b_index, 2]
write.iloc[
s_index, write_ncols + 1 :
] = build_df.iloc[b_index, 3:]
break
except Exception as e:
print(
"Build file %s (%s) missing a key. Skipping..."
% (build_file, e)
)
else:
warnings.warn(
f"Build CSV not found for {algo_name}, "
f"build params won't be "
"appended in the Search CSV"
)
write.to_csv(file.replace(".json", "_raw.csv"), index=False)
throughput = get_frontier(write, "throughput")
throughput.to_csv(
file.replace(".json", "_throughput.csv"), index=False
)
latency = get_frontier(write, "latency")
latency.to_csv(file.replace(".json", "_latency.csv"), index=False)
except Exception as e:
print(
"An error occurred processing file %s (%s). Skipping..."
% (file, e)
)
traceback.print_exc()
def main():
call_path = os.getcwd()
if "RAPIDS_DATASET_ROOT_DIR" in os.environ:
default_dataset_path = os.getenv("RAPIDS_DATASET_ROOT_DIR")
else:
default_dataset_path = os.path.join(call_path, "datasets/")
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--dataset", help="dataset to download", default="glove-100-inner"
)
parser.add_argument(
"--dataset-path",
help="path to dataset folder",
default=default_dataset_path,
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
convert_json_to_csv_build(args.dataset, args.dataset_path)
convert_json_to_csv_search(args.dataset, args.dataset_path)
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench | rapidsai_public_repos/cuvs/python/cuvs-bench/src/cuvs-bench/plot/__main__.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is inspired by
# 1: https://github.com/erikbern/ann-benchmarks/blob/main/plot.py
# 2: https://github.com/erikbern/ann-benchmarks/blob/main/ann_benchmarks/plotting/utils.py # noqa: E501
# 3: https://github.com/erikbern/ann-benchmarks/blob/main/ann_benchmarks/plotting/metrics.py # noqa: E501
# Licence: https://github.com/erikbern/ann-benchmarks/blob/main/LICENSE
import argparse
import itertools
import os
import sys
from collections import OrderedDict
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
mpl.use("Agg")
metrics = {
"k-nn": {
"description": "Recall",
"worst": float("-inf"),
"lim": [0.0, 1.03],
},
"throughput": {
"description": "Queries per second (1/s)",
"worst": float("-inf"),
},
"latency": {
"description": "Search Latency (s)",
"worst": float("inf"),
},
}
def positive_int(input_str: str) -> int:
try:
i = int(input_str)
if i < 1:
raise ValueError
except ValueError:
raise argparse.ArgumentTypeError(
f"{input_str} is not a positive integer"
)
return i
def generate_n_colors(n):
vs = np.linspace(0.3, 0.9, 7)
colors = [(0.9, 0.4, 0.4, 1.0)]
def euclidean(a, b):
return sum((x - y) ** 2 for x, y in zip(a, b))
while len(colors) < n:
new_color = max(
itertools.product(vs, vs, vs),
key=lambda a: min(euclidean(a, b) for b in colors),
)
colors.append(new_color + (1.0,))
return colors
def create_linestyles(unique_algorithms):
colors = dict(
zip(unique_algorithms, generate_n_colors(len(unique_algorithms)))
)
linestyles = dict(
(algo, ["--", "-.", "-", ":"][i % 4])
for i, algo in enumerate(unique_algorithms)
)
markerstyles = dict(
(algo, ["+", "<", "o", "*", "x"][i % 5])
for i, algo in enumerate(unique_algorithms)
)
faded = dict(
(algo, (r, g, b, 0.3)) for algo, (r, g, b, a) in colors.items()
)
return dict(
(
algo,
(colors[algo], faded[algo], linestyles[algo], markerstyles[algo]),
)
for algo in unique_algorithms
)
def create_plot_search(
all_data,
x_scale,
y_scale,
fn_out,
linestyles,
dataset,
k,
batch_size,
mode,
time_unit,
):
xn = "k-nn"
xm, ym = (metrics[xn], metrics[mode])
# Now generate each plot
handles = []
labels = []
plt.figure(figsize=(12, 9))
# Sorting by mean y-value helps aligning plots with labels
def mean_y(algo):
points = np.array(all_data[algo], dtype=object)
return -np.log(np.array(points[:, 3], dtype=np.float32)).mean()
# Find range for logit x-scale
min_x, max_x = 1, 0
for algo in sorted(all_data.keys(), key=mean_y):
points = np.array(all_data[algo], dtype=object)
xs = points[:, 2]
ys = points[:, 3]
min_x = min([min_x] + [x for x in xs if x > 0])
max_x = max([max_x] + [x for x in xs if x < 1])
color, faded, linestyle, marker = linestyles[algo]
(handle,) = plt.plot(
xs,
ys,
"-",
label=algo,
color=color,
ms=7,
mew=3,
lw=3,
marker=marker,
)
handles.append(handle)
labels.append(algo)
ax = plt.gca()
y_description = ym["description"]
if mode == "latency":
y_description = y_description.replace("(s)", f"({time_unit})")
ax.set_ylabel(y_description)
ax.set_xlabel("Recall")
# Custom scales of the type --x-scale a3
if x_scale[0] == "a":
alpha = float(x_scale[1:])
def fun(x):
return 1 - (1 - x) ** (1 / alpha)
def inv_fun(x):
return 1 - (1 - x) ** alpha
ax.set_xscale("function", functions=(fun, inv_fun))
if alpha <= 3:
ticks = [inv_fun(x) for x in np.arange(0, 1.2, 0.2)]
plt.xticks(ticks)
if alpha > 3:
from matplotlib import ticker
ax.xaxis.set_major_formatter(ticker.LogitFormatter())
# plt.xticks(ticker.LogitLocator().tick_values(min_x, max_x))
plt.xticks([0, 1 / 2, 1 - 1e-1, 1 - 1e-2, 1 - 1e-3, 1 - 1e-4, 1])
# Other x-scales
else:
ax.set_xscale(x_scale)
ax.set_yscale(y_scale)
ax.set_title(f"{dataset} k={k} batch_size={batch_size}")
plt.gca().get_position()
# plt.gca().set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(
handles,
labels,
loc="center left",
bbox_to_anchor=(1, 0.5),
prop={"size": 9},
)
plt.grid(visible=True, which="major", color="0.65", linestyle="-")
plt.setp(ax.get_xminorticklabels(), visible=True)
# Logit scale has to be a subset of (0,1)
if "lim" in xm and x_scale != "logit":
x0, x1 = xm["lim"]
plt.xlim(max(x0, 0), min(x1, 1))
elif x_scale == "logit":
plt.xlim(min_x, max_x)
if "lim" in ym:
plt.ylim(ym["lim"])
# Workaround for bug https://github.com/matplotlib/matplotlib/issues/6789
ax.spines["bottom"]._adjust_location()
print(f"writing search output to {fn_out}")
plt.savefig(fn_out, bbox_inches="tight")
plt.close()
def create_plot_build(
build_results, search_results, linestyles, fn_out, dataset
):
qps_85 = [-1] * len(linestyles)
bt_85 = [0] * len(linestyles)
i_85 = [-1] * len(linestyles)
qps_90 = [-1] * len(linestyles)
bt_90 = [0] * len(linestyles)
i_90 = [-1] * len(linestyles)
qps_95 = [-1] * len(linestyles)
bt_95 = [0] * len(linestyles)
i_95 = [-1] * len(linestyles)
data = OrderedDict()
colors = OrderedDict()
# Sorting by mean y-value helps aligning plots with labels
def mean_y(algo):
points = np.array(search_results[algo], dtype=object)
return -np.log(np.array(points[:, 3], dtype=np.float32)).mean()
for pos, algo in enumerate(sorted(search_results.keys(), key=mean_y)):
points = np.array(search_results[algo], dtype=object)
xs = points[:, 2]
ys = points[:, 3]
ls = points[:, 0]
idxs = points[:, 1]
# x is recall, y is qps, ls is algo_name, idxs is index_name
for i in range(len(xs)):
if xs[i] >= 0.85 and xs[i] < 0.9 and ys[i] > qps_85[pos]:
qps_85[pos] = ys[i]
bt_85[pos] = build_results[(ls[i], idxs[i])][0][2]
i_85[pos] = idxs[i]
elif xs[i] >= 0.9 and xs[i] < 0.95 and ys[i] > qps_90[pos]:
qps_90[pos] = ys[i]
bt_90[pos] = build_results[(ls[i], idxs[i])][0][2]
i_90[pos] = idxs[i]
elif xs[i] >= 0.95 and ys[i] > qps_95[pos]:
qps_95[pos] = ys[i]
bt_95[pos] = build_results[(ls[i], idxs[i])][0][2]
i_95[pos] = idxs[i]
data[algo] = [bt_85[pos], bt_90[pos], bt_95[pos]]
colors[algo] = linestyles[algo][0]
index = ["@85% Recall", "@90% Recall", "@95% Recall"]
df = pd.DataFrame(data, index=index)
plt.figure(figsize=(12, 9))
ax = df.plot.bar(rot=0, color=colors)
fig = ax.get_figure()
print(f"writing build output to {fn_out}")
plt.title("Build Time for Highest QPS")
plt.suptitle(f"{dataset}")
plt.ylabel("Build Time (s)")
fig.savefig(fn_out)
def load_lines(results_path, result_files, method, index_key, mode, time_unit):
results = dict()
for result_filename in result_files:
try:
with open(os.path.join(results_path, result_filename), "r") as f:
lines = f.readlines()
lines = lines[:-1] if lines[-1] == "\n" else lines
if method == "build":
key_idx = [2]
elif method == "search":
y_idx = 3 if mode == "throughput" else 4
key_idx = [2, y_idx]
for line in lines[1:]:
split_lines = line.split(",")
algo_name = split_lines[0]
index_name = split_lines[1]
if index_key == "algo":
dict_key = algo_name
elif index_key == "index":
dict_key = (algo_name, index_name)
if dict_key not in results:
results[dict_key] = []
to_add = [algo_name, index_name]
for key_i in key_idx:
to_add.append(float(split_lines[key_i]))
if (
mode == "latency"
and time_unit != "s"
and method == "search"
):
to_add[-1] = (
to_add[-1] * (10**3)
if time_unit == "ms"
else to_add[-1] * (10**6)
)
results[dict_key].append(to_add)
except Exception:
print(
f"An error occurred processing file {result_filename}. "
"Skipping..."
)
return results
def load_all_results(
dataset_path,
algorithms,
groups,
algo_groups,
k,
batch_size,
method,
index_key,
raw,
mode,
time_unit,
):
results_path = os.path.join(dataset_path, "result", method)
result_files = os.listdir(results_path)
if method == "build":
result_files = [
result_file
for result_file in result_files
if ".csv" in result_file
]
elif method == "search":
if raw:
suffix = "_raw"
else:
suffix = f"_{mode}"
result_files = [
result_file
for result_file in result_files
if f"{suffix}.csv" in result_file
]
if len(result_files) == 0:
raise FileNotFoundError(f"No CSV result files found in {results_path}")
if method == "search":
result_files = [
result_filename
for result_filename in result_files
if f"{k}-{batch_size}" in result_filename
]
algo_group_files = [
result_filename.split("-")[0] for result_filename in result_files
]
else:
algo_group_files = [
result_filename for result_filename in result_files
]
for i in range(len(algo_group_files)):
algo_group = algo_group_files[i].replace(".csv", "").split("_")
algo_group_files[i] = ("_".join(algo_group[:-1]), algo_group[-1])
algo_group_files = list(zip(*algo_group_files))
if len(algorithms) > 0:
final_results = [
result_files[i]
for i in range(len(result_files))
if (algo_group_files[0][i] in algorithms)
and (algo_group_files[1][i] in groups)
]
else:
final_results = [
result_files[i]
for i in range(len(result_files))
if (algo_group_files[1][i] in groups)
]
if len(algo_groups) > 0:
split_algo_groups = [
algo_group.split(".") for algo_group in algo_groups
]
split_algo_groups = list(zip(*split_algo_groups))
final_algo_groups = [
result_files[i]
for i in range(len(result_files))
if (algo_group_files[0][i] in split_algo_groups[0])
and (algo_group_files[1][i] in split_algo_groups[1])
]
final_results = final_results + final_algo_groups
final_results = set(final_results)
results = load_lines(
results_path, final_results, method, index_key, mode, time_unit
)
return results
def main():
call_path = os.getcwd()
if "RAPIDS_DATASET_ROOT_DIR" in os.environ:
default_dataset_path = os.getenv("RAPIDS_DATASET_ROOT_DIR")
else:
default_dataset_path = os.path.join(call_path, "datasets/")
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--dataset", help="dataset to plot", default="glove-100-inner"
)
parser.add_argument(
"--dataset-path",
help="path to dataset folder",
default=default_dataset_path,
)
parser.add_argument(
"--output-filepath",
help="directory for PNG to be saved",
default=os.getcwd(),
)
parser.add_argument(
"--algorithms",
help="plot only comma separated list of named \
algorithms. If parameters `groups` and `algo-groups \
are both undefined, then group `base` is plot by default",
default=None,
)
parser.add_argument(
"--groups",
help="plot only comma separated groups of parameters",
default="base",
)
parser.add_argument(
"--algo-groups",
"--algo-groups",
help='add comma separated <algorithm>.<group> to plot. \
Example usage: "--algo-groups=raft_cagra.large,hnswlib.large"',
)
parser.add_argument(
"-k",
"--count",
default=10,
type=positive_int,
help="the number of nearest neighbors to search for",
)
parser.add_argument(
"-bs",
"--batch-size",
default=10000,
type=positive_int,
help="number of query vectors to use in each query trial",
)
parser.add_argument("--build", action="store_true")
parser.add_argument("--search", action="store_true")
parser.add_argument(
"--x-scale",
help="Scale to use when drawing the X-axis. \
Typically linear, logit or a2",
default="linear",
)
parser.add_argument(
"--y-scale",
help="Scale to use when drawing the Y-axis",
choices=["linear", "log", "symlog", "logit"],
default="linear",
)
parser.add_argument(
"--mode",
help="search mode whose Pareto frontier is used on the y-axis",
choices=["throughput", "latency"],
default="throughput",
)
parser.add_argument(
"--time-unit",
help="time unit to plot when mode is latency",
choices=["s", "ms", "us"],
default="ms",
)
parser.add_argument(
"--raw",
help="Show raw results (not just Pareto frontier) of mode arg",
action="store_true",
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.algorithms:
algorithms = args.algorithms.split(",")
else:
algorithms = []
groups = args.groups.split(",")
if args.algo_groups:
algo_groups = args.algo_groups.split(",")
else:
algo_groups = []
k = args.count
batch_size = args.batch_size
if not args.build and not args.search:
build = True
search = True
else:
build = args.build
search = args.search
search_output_filepath = os.path.join(
args.output_filepath,
f"search-{args.dataset}-k{k}-batch_size{batch_size}.png",
)
build_output_filepath = os.path.join(
args.output_filepath,
f"build-{args.dataset}.png",
)
search_results = load_all_results(
os.path.join(args.dataset_path, args.dataset),
algorithms,
groups,
algo_groups,
k,
batch_size,
"search",
"algo",
args.raw,
args.mode,
args.time_unit,
)
linestyles = create_linestyles(sorted(search_results.keys()))
if search:
create_plot_search(
search_results,
args.x_scale,
args.y_scale,
search_output_filepath,
linestyles,
args.dataset,
k,
batch_size,
args.mode,
args.time_unit,
)
if build:
build_results = load_all_results(
os.path.join(args.dataset_path, args.dataset),
algorithms,
groups,
algo_groups,
k,
batch_size,
"build",
"index",
args.raw,
args.mode,
args.time_unit,
)
create_plot_build(
build_results,
search_results,
linestyles,
build_output_filepath,
args.dataset,
)
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/cuvs/conda | rapidsai_public_repos/cuvs/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- benchmark>=1.8.2
- c-compiler
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-nvtx=11.8
- cuda-profiler-api=11.8.86
- cuda-version=11.8
- cudatoolkit
- cxx-compiler
- cython>=3.0.0
- gcc_linux-64=11.*
- glog>=0.6.0
- h5py>=3.8.0
- hnswlib=0.7.0
- libcublas-dev=11.11.3.6
- libcublas=11.11.3.6
- libcurand-dev=10.3.0.86
- libcurand=10.3.0.86
- libcusolver-dev=11.4.1.48
- libcusolver=11.4.1.48
- libcusparse-dev=11.7.5.86
- libcusparse=11.7.5.86
- matplotlib
- nccl>=2.9.9
- ninja
- nlohmann_json>=3.11.2
- nvcc_linux-64=11.8
- openblas
- pandas
- pyyaml
- rmm==24.2.*
- scikit-build>=0.13.1
- sysroot_linux-64==2.17
name: bench_ann_cuda-118_arch-x86_64
| 0 |
rapidsai_public_repos/cuvs/conda | rapidsai_public_repos/cuvs/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- benchmark>=1.8.2
- c-compiler
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-nvtx=11.8
- cuda-profiler-api=11.8.86
- cuda-version=11.8
- cudatoolkit
- cxx-compiler
- cython>=3.0.0
- gcc_linux-aarch64=11.*
- glog>=0.6.0
- h5py>=3.8.0
- hnswlib=0.7.0
- libcublas-dev=11.11.3.6
- libcublas=11.11.3.6
- libcurand-dev=10.3.0.86
- libcurand=10.3.0.86
- libcusolver-dev=11.4.1.48
- libcusolver=11.4.1.48
- libcusparse-dev=11.7.5.86
- libcusparse=11.7.5.86
- matplotlib
- nccl>=2.9.9
- ninja
- nlohmann_json>=3.11.2
- nvcc_linux-aarch64=11.8
- openblas
- pandas
- pyyaml
- rmm==24.2.*
- scikit-build>=0.13.1
- sysroot_linux-aarch64==2.17
name: bench_ann_cuda-118_arch-aarch64
| 0 |
rapidsai_public_repos/cuvs/conda | rapidsai_public_repos/cuvs/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- benchmark>=1.8.2
- c-compiler
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-cudart-dev
- cuda-nvcc
- cuda-nvtx-dev
- cuda-profiler-api
- cuda-version=12.0
- cxx-compiler
- cython>=3.0.0
- gcc_linux-aarch64=11.*
- glog>=0.6.0
- h5py>=3.8.0
- hnswlib=0.7.0
- libcublas-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
- matplotlib
- nccl>=2.9.9
- ninja
- nlohmann_json>=3.11.2
- openblas
- pandas
- pyyaml
- rmm==24.2.*
- scikit-build>=0.13.1
- sysroot_linux-aarch64==2.17
name: bench_ann_cuda-120_arch-aarch64
| 0 |
rapidsai_public_repos/cuvs/conda | rapidsai_public_repos/cuvs/conda/environments/all_cuda-120_arch-x86_64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- breathe
- c-compiler
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-cudart-dev
- cuda-nvcc
- cuda-nvtx-dev
- cuda-profiler-api
- cuda-python>=12.0,<13.0a0
- cuda-version=12.0
- cupy>=12.0.0
- cxx-compiler
- cython>=3.0.0
- doxygen>=1.8.20
- gcc_linux-64=11.*
- gmock>=1.13.0
- graphviz
- gtest>=1.13.0
- ipython
- libcublas-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
- nccl>=2.9.9
- ninja
- numpy>=1.21
- numpydoc
- pre-commit
- pydata-sphinx-theme
- pytest
- pytest-cov
- recommonmark
- rmm==24.2.*
- scikit-build>=0.13.1
- scikit-learn
- scipy
- sphinx-copybutton
- sphinx-markdown-tables
- sysroot_linux-64==2.17
name: all_cuda-120_arch-x86_64
| 0 |
rapidsai_public_repos/cuvs/conda | rapidsai_public_repos/cuvs/conda/environments/all_cuda-120_arch-aarch64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- breathe
- c-compiler
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-cudart-dev
- cuda-nvcc
- cuda-nvtx-dev
- cuda-profiler-api
- cuda-python>=12.0,<13.0a0
- cuda-version=12.0
- cupy>=12.0.0
- cxx-compiler
- cython>=3.0.0
- doxygen>=1.8.20
- gcc_linux-aarch64=11.*
- gmock>=1.13.0
- graphviz
- gtest>=1.13.0
- ipython
- libcublas-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
- nccl>=2.9.9
- ninja
- numpy>=1.21
- numpydoc
- pre-commit
- pydata-sphinx-theme
- pytest
- pytest-cov
- recommonmark
- rmm==24.2.*
- scikit-build>=0.13.1
- scikit-learn
- scipy
- sphinx-copybutton
- sphinx-markdown-tables
- sysroot_linux-aarch64==2.17
name: all_cuda-120_arch-aarch64
| 0 |
rapidsai_public_repos/cuvs/conda | rapidsai_public_repos/cuvs/conda/environments/all_cuda-118_arch-aarch64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- breathe
- c-compiler
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-nvtx=11.8
- cuda-profiler-api=11.8.86
- cuda-python>=11.7.1,<12.0a0
- cuda-version=11.8
- cudatoolkit
- cupy>=12.0.0
- cxx-compiler
- cython>=3.0.0
- doxygen>=1.8.20
- gcc_linux-aarch64=11.*
- gmock>=1.13.0
- graphviz
- gtest>=1.13.0
- ipython
- libcublas-dev=11.11.3.6
- libcublas=11.11.3.6
- libcurand-dev=10.3.0.86
- libcurand=10.3.0.86
- libcusolver-dev=11.4.1.48
- libcusolver=11.4.1.48
- libcusparse-dev=11.7.5.86
- libcusparse=11.7.5.86
- nccl>=2.9.9
- ninja
- numpy>=1.21
- numpydoc
- nvcc_linux-aarch64=11.8
- pre-commit
- pydata-sphinx-theme
- pytest
- pytest-cov
- recommonmark
- rmm==24.2.*
- scikit-build>=0.13.1
- scikit-learn
- scipy
- sphinx-copybutton
- sphinx-markdown-tables
- sysroot_linux-aarch64==2.17
name: all_cuda-118_arch-aarch64
| 0 |
rapidsai_public_repos/cuvs/conda | rapidsai_public_repos/cuvs/conda/environments/all_cuda-118_arch-x86_64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- breathe
- c-compiler
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-nvtx=11.8
- cuda-profiler-api=11.8.86
- cuda-python>=11.7.1,<12.0a0
- cuda-version=11.8
- cudatoolkit
- cupy>=12.0.0
- cxx-compiler
- cython>=3.0.0
- doxygen>=1.8.20
- gcc_linux-64=11.*
- gmock>=1.13.0
- graphviz
- gtest>=1.13.0
- ipython
- libcublas-dev=11.11.3.6
- libcublas=11.11.3.6
- libcurand-dev=10.3.0.86
- libcurand=10.3.0.86
- libcusolver-dev=11.4.1.48
- libcusolver=11.4.1.48
- libcusparse-dev=11.7.5.86
- libcusparse=11.7.5.86
- nccl>=2.9.9
- ninja
- numpy>=1.21
- numpydoc
- nvcc_linux-64=11.8
- pre-commit
- pydata-sphinx-theme
- pytest
- pytest-cov
- recommonmark
- rmm==24.2.*
- scikit-build>=0.13.1
- scikit-learn
- scipy
- sphinx-copybutton
- sphinx-markdown-tables
- sysroot_linux-64==2.17
name: all_cuda-118_arch-x86_64
| 0 |
rapidsai_public_repos/cuvs/conda | rapidsai_public_repos/cuvs/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- benchmark>=1.8.2
- c-compiler
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-cudart-dev
- cuda-nvcc
- cuda-nvtx-dev
- cuda-profiler-api
- cuda-version=12.0
- cxx-compiler
- cython>=3.0.0
- gcc_linux-64=11.*
- glog>=0.6.0
- h5py>=3.8.0
- hnswlib=0.7.0
- libcublas-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
- matplotlib
- nccl>=2.9.9
- ninja
- nlohmann_json>=3.11.2
- openblas
- pandas
- pyyaml
- rmm==24.2.*
- scikit-build>=0.13.1
- sysroot_linux-64==2.17
name: bench_ann_cuda-120_arch-x86_64
| 0 |
rapidsai_public_repos/cuvs/conda/recipes | rapidsai_public_repos/cuvs/conda/recipes/libcuvs/conda_build_config.yaml | c_compiler_version:
- 11
cxx_compiler_version:
- 11
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
sysroot_version:
- "2.17"
cmake_version:
- ">=3.26.4"
nccl_version:
- ">=2.9.9"
gtest_version:
- ">=1.13.0"
glog_version:
- ">=0.6.0"
faiss_version:
- ">=1.7.1"
h5py_version:
- ">=3.8.0"
nlohmann_json_version:
- ">=3.11.2"
# The CTK libraries below are missing from the conda-forge::cudatoolkit package
# for CUDA 11. The "*_host_*" version specifiers correspond to `11.8` packages
# and the "*_run_*" version specifiers correspond to `11.x` packages.
cuda11_libcublas_host_version:
- "=11.11.3.6"
cuda11_libcublas_run_version:
- ">=11.5.2.43,<12.0.0"
cuda11_libcurand_host_version:
- "=10.3.0.86"
cuda11_libcurand_run_version:
- ">=10.2.5.43,<10.3.1"
cuda11_libcusolver_host_version:
- "=11.4.1.48"
cuda11_libcusolver_run_version:
- ">=11.2.0.43,<11.4.2"
cuda11_libcusparse_host_version:
- "=11.7.5.86"
cuda11_libcusparse_run_version:
- ">=11.6.0.43,<12.0.0"
# `cuda-profiler-api` only has `11.8.0` and `12.0.0` packages for all
# architectures. The "*_host_*" version specifiers correspond to `11.8` packages and the
# "*_run_*" version specifiers correspond to `11.x` packages.
cuda11_cuda_profiler_api_host_version:
- "=11.8.86"
cuda11_cuda_profiler_api_run_version:
- ">=11.4.240,<12"
| 0 |
rapidsai_public_repos/cuvs/conda/recipes | rapidsai_public_repos/cuvs/conda/recipes/libcuvs/build_libcuvs_static.sh | #!/usr/bin/env bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
./build.sh libcuvs --allgpuarch --compile-static-lib --build-metrics=compile_lib_static --incl-cache-stats --no-nvtx -n
cmake --install cpp/build --component compiled-static
| 0 |
rapidsai_public_repos/cuvs/conda/recipes | rapidsai_public_repos/cuvs/conda/recipes/libcuvs/build_libcuvs_template.sh | #!/usr/bin/env bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Just building template so we verify it uses libraft.so and fail if it doesn't build
./build.sh template
| 0 |
rapidsai_public_repos/cuvs/conda/recipes | rapidsai_public_repos/cuvs/conda/recipes/libcuvs/build_libraft_tests.sh | #!/usr/bin/env bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
./build.sh tests bench --allgpuarch --no-nvtx --build-metrics=tests_bench --incl-cache-stats
cmake --install cpp/build --component testing
| 0 |
rapidsai_public_repos/cuvs/conda/recipes | rapidsai_public_repos/cuvs/conda/recipes/libcuvs/meta.yaml | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Usage:
# conda build . -c conda-forge -c nvidia -c rapidsai
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') + environ.get('VERSION_SUFFIX', '') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set cuda_spec = ">=" + cuda_major ~ ",<" + (cuda_major | int + 1) ~ ".0a0" %} # i.e. >=11,<12.0a0
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: libcuvs-split
source:
path: ../../..
outputs:
- name: libcuvs-static
version: {{ version }}
script: build_libcuvs_static.sh
build:
script_env: *script_env
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- {{ pin_subpackage('libraft-headers', exact=True) }}
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cuda-profiler-api {{ cuda11_cuda_profiler_api_host_version }}
- libcublas {{ cuda11_libcublas_host_version }}
- libcublas-dev {{ cuda11_libcublas_host_version }}
- libcurand {{ cuda11_libcurand_host_version }}
- libcurand-dev {{ cuda11_libcurand_host_version }}
- libcusolver {{ cuda11_libcusolver_host_version }}
- libcusolver-dev {{ cuda11_libcusolver_host_version }}
- libcusparse {{ cuda11_libcusparse_host_version }}
- libcusparse-dev {{ cuda11_libcusparse_host_version }}
{% else %}
- cuda-profiler-api
- libcublas-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
{% endif %}
run:
- {{ pin_subpackage('libraft-headers', exact=True) }}
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
about:
home: https://rapids.ai/
license: Apache-2.0
summary: libcuvs static library
- name: libcuvs-tests
version: {{ version }}
script: build_libcuvs_tests.sh
build:
script_env: *script_env
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- {{ pin_subpackage('libraft-headers', exact=True) }}
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cuda-profiler-api {{ cuda11_cuda_profiler_api_run_version }}
- libcublas {{ cuda11_libcublas_host_version }}
- libcublas-dev {{ cuda11_libcublas_host_version }}
- libcurand {{ cuda11_libcurand_host_version }}
- libcurand-dev {{ cuda11_libcurand_host_version }}
- libcusolver {{ cuda11_libcusolver_host_version }}
- libcusolver-dev {{ cuda11_libcusolver_host_version }}
- libcusparse {{ cuda11_libcusparse_host_version }}
- libcusparse-dev {{ cuda11_libcusparse_host_version }}
{% else %}
- cuda-cudart-dev
- cuda-profiler-api
- libcublas-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
{% endif %}
- gmock {{ gtest_version }}
- gtest {{ gtest_version }}
run:
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- {{ pin_subpackage('libraft-headers', exact=True) }}
- gmock {{ gtest_version }}
- gtest {{ gtest_version }}
about:
home: https://rapids.ai/
license: Apache-2.0
summary: libcuvs tests
- name: libcuvs-template
version: {{ version }}
script: build_libcuvs_template.sh
build:
script_env: *script_env
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- {{ pin_subpackage('libraft-headers', exact=True) }}
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cuda-profiler-api {{ cuda11_cuda_profiler_api_run_version }}
- libcublas {{ cuda11_libcublas_host_version }}
- libcublas-dev {{ cuda11_libcublas_host_version }}
{% else %}
- cuda-profiler-api
- libcublas-dev
{% endif %}
run:
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- {{ pin_subpackage('libraft-headers', exact=True) }}
about:
home: https://rapids.ai/
license: Apache-2.0
summary: libcuvs template
| 0 |
rapidsai_public_repos/cuvs/conda/recipes | rapidsai_public_repos/cuvs/conda/recipes/libcuvs/build_libcuvs.sh | #!/usr/bin/env bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
./build.sh libcuvs --allgpuarch --compile-lib --build-metrics=compile_lib --incl-cache-stats --no-nvtx
| 0 |
rapidsai_public_repos/cuvs/conda/recipes | rapidsai_public_repos/cuvs/conda/recipes/cuvs/conda_build_config.yaml | c_compiler_version:
- 11
cxx_compiler_version:
- 11
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
sysroot_version:
- "2.17"
cmake_version:
- ">=3.26.4"
| 0 |
rapidsai_public_repos/cuvs/conda/recipes | rapidsai_public_repos/cuvs/conda/recipes/cuvs/build.sh | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#!/usr/bin/env bash
# This assumes the script is executed from the root of the repo directory
./build.sh cuvs --no-nvtx
| 0 |
rapidsai_public_repos/cuvs/conda/recipes | rapidsai_public_repos/cuvs/conda/recipes/cuvs/meta.yaml | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Usage:
# conda build . -c conda-forge -c numba -c rapidsai -c pytorch
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') + environ.get('VERSION_SUFFIX', '') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set py_version = environ['CONDA_PY'] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: cuvs
version: {{ version }}
source:
path: ../../..
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
{% if cuda_major == "11" %}
- cuda-python >=11.7.1,<12.0a0
- cudatoolkit
{% else %}
- cuda-python >=12.0,<13.0a0
{% endif %}
- cuda-version ={{ cuda_version }}
- cython >=3.0.0
- pylibraft {{ version }}
- libcuvs {{ version }}
- numpy >=1.21
- python x.x
- rmm ={{ minor_version }}
- scikit-build >=0.13.1
- setuptools
run:
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- pylibraft {{ version }}
- libcuvs {{ version }}
- python x.x
- rmm ={{ minor_version }}
tests:
requirements:
- cuda-version ={{ cuda_version }}
imports:
- cuvs
about:
home: https://rapids.ai/
license: Apache-2.0
# license_file: LICENSE
summary: cuvs python library
| 0 |
rapidsai_public_repos/cuvs/conda/recipes | rapidsai_public_repos/cuvs/conda/recipes/cuda-ann-bench/conda_build_config.yaml | c_compiler_version:
- 11
cxx_compiler_version:
- 11
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
sysroot_version:
- "2.17"
cmake_version:
- ">=3.26.4"
nccl_version:
- ">=2.9.9"
gtest_version:
- ">=1.13.0"
glog_version:
- ">=0.6.0"
h5py_version:
- ">=3.8.0"
nlohmann_json_version:
- ">=3.11.2"
# The CTK libraries below are missing from the conda-forge::cudatoolkit package
# for CUDA 11. The "*_host_*" version specifiers correspond to `11.8` packages
# and the "*_run_*" version specifiers correspond to `11.x` packages.
cuda11_libcublas_host_version:
- "=11.11.3.6"
cuda11_libcublas_run_version:
- ">=11.5.2.43,<12.0.0"
cuda11_libcurand_host_version:
- "=10.3.0.86"
cuda11_libcurand_run_version:
- ">=10.2.5.43,<10.3.1"
cuda11_libcusolver_host_version:
- "=11.4.1.48"
cuda11_libcusolver_run_version:
- ">=11.2.0.43,<11.4.2"
cuda11_libcusparse_host_version:
- "=11.7.5.86"
cuda11_libcusparse_run_version:
- ">=11.6.0.43,<12.0.0"
# `cuda-profiler-api` only has `11.8.0` and `12.0.0` packages for all
# architectures. The "*_host_*" version specifiers correspond to `11.8` packages and the
# "*_run_*" version specifiers correspond to `11.x` packages.
cuda11_cuda_profiler_api_host_version:
- "=11.8.86"
cuda11_cuda_profiler_api_run_version:
- ">=11.4.240,<12"
| 0 |
rapidsai_public_repos/cuvs/conda/recipes | rapidsai_public_repos/cuvs/conda/recipes/cuda-ann-bench/build.sh | #!/usr/bin/env bash
# Copyright (c) 2023, NVIDIA CORPORATION.
./build.sh bench-ann --allgpuarch --no-nvtx --build-metrics=bench_ann --incl-cache-stats
cmake --install cpp/build --component ann_bench
| 0 |
rapidsai_public_repos/cuvs/conda/recipes | rapidsai_public_repos/cuvs/conda/recipes/cuda-ann-bench/meta.yaml | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Usage:
# conda build . -c conda-forge -c nvidia -c rapidsai
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') + environ.get('VERSION_SUFFIX', '') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set py_version = environ['CONDA_PY'] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set cuda_spec = ">=" + cuda_major ~ ",<" + (cuda_major | int + 1) ~ ".0a0" %} # i.e. >=11,<12.0a0
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: cuda-ann-bench
version: {{ version }}
script: build.sh
source:
path: ../../..
build:
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- PARALLEL_LEVEL
- RAPIDS_ARTIFACTS_DIR
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=libcuvs-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=libcuvs-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- python
- libraft {{ version }}
- libcuvs {{ version }}
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cuda-profiler-api {{ cuda11_cuda_profiler_api_run_version }}
- libcublas {{ cuda11_libcublas_host_version }}
- libcublas-dev {{ cuda11_libcublas_host_version }}
{% else %}
- cuda-profiler-api
- libcublas-dev
{% endif %}
- glog {{ glog_version }}
- nlohmann_json {{ nlohmann_json_version }}
- h5py {{ h5py_version }}
- benchmark
- matplotlib
- python
- pandas
- pyyaml
# rmm is needed to determine if package is gpu-enabled
- rmm ={{ minor_version }}
run:
- python
- libraft {{ version }}
- libcuvs {{ version }}
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- glog {{ glog_version }}
- h5py {{ h5py_version }}
- benchmark
- glog {{ glog_version }}
- matplotlib
- python
- pandas
- pyyaml
# rmm is needed to determine if package is gpu-enabled
- rmm ={{ minor_version }}
about:
home: https://rapids.ai/
license: Apache-2.0
summary: CUDA ANN GPU and CPU benchmarks
| 0 |
rapidsai_public_repos/cuvs/conda/recipes | rapidsai_public_repos/cuvs/conda/recipes/cuda-ann-bench-cpu/conda_build_config.yaml | c_compiler_version:
- 11
cxx_compiler_version:
- 11
sysroot_version:
- "2.17"
cmake_version:
- ">=3.26.4"
glog_version:
- ">=0.6.0"
h5py_version:
- ">=3.8.0"
nlohmann_json_version:
- ">=3.11.2"
| 0 |
rapidsai_public_repos/cuvs/conda/recipes | rapidsai_public_repos/cuvs/conda/recipes/cuda-ann-bench-cpu/build.sh | #!/usr/bin/env bash
# Copyright (c) 2023, NVIDIA CORPORATION.
./build.sh bench-ann --cpu-only --no-nvtx --build-metrics=bench_ann_cpu --incl-cache-stats
cmake --install cpp/build --component ann_bench
| 0 |
rapidsai_public_repos/cuvs/conda/recipes | rapidsai_public_repos/cuvs/conda/recipes/cuda-ann-bench-cpu/meta.yaml | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Usage:
# conda build . -c conda-forge -c nvidia -c rapidsai
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') + environ.get('VERSION_SUFFIX', '') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set py_version = environ['CONDA_PY'] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: raft-ann-bench-cpu
version: {{ version }}
script: build.sh
source:
path: ../../..
build:
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- PARALLEL_LEVEL
- RAPIDS_ARTIFACTS_DIR
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=libcuvs-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=libcuvs-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
number: {{ GIT_DESCRIBE_NUMBER }}
string: py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- glog {{ glog_version }}
- matplotlib
- nlohmann_json {{ nlohmann_json_version }}
- python
- pyyaml
- pandas
run:
- glog {{ glog_version }}
- h5py {{ h5py_version }}
- matplotlib
- python
- pyyaml
- pandas
- benchmark
about:
home: https://rapids.ai/
license: Apache-2.0
summary: RAFT ANN CPU benchmarks
| 0 |
rapidsai_public_repos/cuvs | rapidsai_public_repos/cuvs/cpp/.clangd | # https://clangd.llvm.org/config
# Apply a config conditionally to all C files
If:
PathMatch: .*\.(c|h)$
---
# Apply a config conditionally to all C++ files
If:
PathMatch: .*\.(c|h)pp
---
# Apply a config conditionally to all CUDA files
If:
PathMatch: .*\.cuh?
CompileFlags:
Add:
- "-x"
- "cuda"
# No error on unknown CUDA versions
- "-Wno-unknown-cuda-version"
# Allow variadic CUDA functions
- "-Xclang=-fcuda-allow-variadic-functions"
Diagnostics:
Suppress:
- "variadic_device_fn"
- "attributes_not_allowed"
---
# Tweak the clangd parse settings for all files
CompileFlags:
Add:
# report all errors
- "-ferror-limit=0"
- "-fmacro-backtrace-limit=0"
- "-ftemplate-backtrace-limit=0"
# Skip the CUDA version check
- "--no-cuda-version-check"
Remove:
# remove gcc's -fcoroutines
- -fcoroutines
# remove nvc++ flags unknown to clang
- "-gpu=*"
- "-stdpar*"
# remove nvcc flags unknown to clang
- "-arch*"
- "-gencode*"
- "--generate-code*"
- "-ccbin*"
- "-t=*"
- "--threads*"
- "-Xptxas*"
- "-Xcudafe*"
- "-Xfatbin*"
- "-Xcompiler*"
- "--diag-suppress*"
- "--diag_suppress*"
- "--compiler-options*"
- "--expt-extended-lambda"
- "--expt-relaxed-constexpr"
- "-forward-unknown-to-host-compiler"
- "-Werror=cross-execution-space-call"
| 0 |
rapidsai_public_repos/cuvs | rapidsai_public_repos/cuvs/cpp/CMakeLists.txt | # =============================================================================
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
set(RAPIDS_VERSION "24.02")
set(CUVS_VERSION "24.02.00")
cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR)
include(../fetch_rapids.cmake)
include(rapids-cmake)
include(rapids-cpm)
include(rapids-export)
include(rapids-find)
option(BUILD_CPU_ONLY "Build CPU only components. Applies to CUVS benchmarks currently" OFF)
# workaround for rapids_cuda_init_architectures not working for arch detection with
# enable_language(CUDA)
set(lang_list "CXX")
if(NOT BUILD_CPU_ONLY)
include(rapids-cuda)
rapids_cuda_init_architectures(cuVS)
list(APPEND lang_list "CUDA")
endif()
project(
cuVS
VERSION ${CUVS_VERSION}
LANGUAGES ${lang_list}
)
# Write the version header
rapids_cmake_write_version_file(include/cuvs/version_config.hpp)
# ##################################################################################################
# * build type ---------------------------------------------------------------
# Set a default build type if none was specified
rapids_cmake_build_type(Release)
# this is needed for clang-tidy runs
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
# ##################################################################################################
# * User Options ------------------------------------------------------------
option(BUILD_SHARED_LIBS "Build cuvs shared libraries" ON)
option(BUILD_TESTS "Build cuvs unit-tests" ON)
option(BUILD_MICRO_BENCH "Build cuvs C++ micro benchmarks" OFF)
option(BUILD_ANN_BENCH "Build cuvs ann benchmarks" OFF)
option(CUDA_ENABLE_KERNELINFO "Enable kernel resource usage info" OFF)
option(CUDA_ENABLE_LINEINFO
"Enable the -lineinfo option for nvcc (useful for cuda-memcheck / profiler)" OFF
)
option(CUDA_STATIC_RUNTIME "Statically link the CUDA toolkit runtime and libraries" OFF)
option(CUDA_LOG_COMPILE_TIME "Write a log of compilation times to nvcc_compile_log.csv" OFF)
option(DETECT_CONDA_ENV "Enable detection of conda environment for dependencies" ON)
option(DISABLE_DEPRECATION_WARNINGS "Disable deprecaction warnings " ON)
option(DISABLE_OPENMP "Disable OpenMP" OFF)
option(CUVS_NVTX "Enable nvtx markers" OFF)
if((BUILD_TESTS
OR BUILD_MICRO_BENCH
OR BUILD_ANN_BENCH
)
AND NOT BUILD_CPU_ONLY
)
endif()
if(BUILD_CPU_ONLY)
set(BUILD_SHARED_LIBS OFF)
set(BUILD_TESTS OFF)
endif()
# Needed because GoogleBenchmark changes the state of FindThreads.cmake, causing subsequent runs to
# have different values for the `Threads::Threads` target. Setting this flag ensures
# `Threads::Threads` is the same value across all builds so that cache hits occur
set(THREADS_PREFER_PTHREAD_FLAG ON)
include(CMakeDependentOption)
# cmake_dependent_option( CUVS_USE_FAISS_STATIC "Build and statically link the FAISS library for
# nearest neighbors search on GPU" ON CUVS_COMPILE_LIBRARY OFF )
message(VERBOSE "cuVS: Build cuVS unit-tests: ${BUILD_TESTS}")
message(VERBOSE "cuVS: Building cuvs C++ benchmarks: ${BUILD_MICRO_BENCH}")
message(VERBOSE "cuVS: Building ANN benchmarks: ${BUILD_ANN_BENCH}")
message(VERBOSE "cuVS: Build CPU only components: ${BUILD_CPU_ONLY}")
message(VERBOSE "cuVS: Enable detection of conda environment for dependencies: ${DETECT_CONDA_ENV}")
message(VERBOSE "cuVS: Disable depreaction warnings " ${DISABLE_DEPRECATION_WARNINGS})
message(VERBOSE "cuVS: Disable OpenMP: ${DISABLE_OPENMP}")
message(VERBOSE "cuVS: Enable kernel resource usage info: ${CUDA_ENABLE_KERNELINFO}")
message(VERBOSE "cuVS: Enable lineinfo in nvcc: ${CUDA_ENABLE_LINEINFO}")
message(VERBOSE "cuVS: Enable nvtx markers: ${CUVS_NVTX}")
message(VERBOSE
"cuVS: Statically link the CUDA toolkit runtime and libraries: ${CUDA_STATIC_RUNTIME}"
)
# Set RMM logging level
set(RMM_LOGGING_LEVEL
"INFO"
CACHE STRING "Choose the logging level."
)
set_property(
CACHE RMM_LOGGING_LEVEL PROPERTY STRINGS "TRACE" "DEBUG" "INFO" "WARN" "ERROR" "CRITICAL" "OFF"
)
message(VERBOSE "cuVS: RMM_LOGGING_LEVEL = '${RMM_LOGGING_LEVEL}'.")
# ##################################################################################################
# * Conda environment detection ----------------------------------------------
if(DETECT_CONDA_ENV)
rapids_cmake_support_conda_env(conda_env MODIFY_PREFIX_PATH)
if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT AND DEFINED ENV{CONDA_PREFIX})
message(
STATUS "cuVS: No CMAKE_INSTALL_PREFIX argument detected, setting to: $ENV{CONDA_PREFIX}"
)
set(CMAKE_INSTALL_PREFIX "$ENV{CONDA_PREFIX}")
endif()
endif()
# ##################################################################################################
# * compiler options ----------------------------------------------------------
set(_ctk_static_suffix "")
if(CUDA_STATIC_RUNTIME)
set(_ctk_static_suffix "_static")
endif()
if(NOT BUILD_CPU_ONLY)
# CUDA runtime
rapids_cuda_init_runtime(USE_STATIC ${CUDA_STATIC_RUNTIME})
# * find CUDAToolkit package
# * determine GPU architectures
# * enable the CMake CUDA language
# * set other CUDA compilation flags
rapids_find_package(
CUDAToolkit REQUIRED
BUILD_EXPORT_SET cuvs-exports
INSTALL_EXPORT_SET cuvs-exports
)
else()
add_compile_definitions(BUILD_CPU_ONLY)
endif()
if(NOT DISABLE_OPENMP)
rapids_find_package(
OpenMP REQUIRED
BUILD_EXPORT_SET cuvs-exports
INSTALL_EXPORT_SET cuvs-exports
)
if(OPENMP_FOUND)
message(VERBOSE "cuVS: OpenMP found in ${OpenMP_CXX_INCLUDE_DIRS}")
endif()
endif()
include(cmake/modules/ConfigureCUDA.cmake)
# ##################################################################################################
# * Requirements -------------------------------------------------------------
# add third party dependencies using CPM
rapids_cpm_init()
if(NOT BUILD_CPU_ONLY)
include(cmake/thirdparty/get_raft.cmake)
endif()
if(BUILD_TESTS)
include(cmake/thirdparty/get_gtest.cmake)
endif()
if(BUILD_MICRO_BENCH OR BUILD_ANN_BENCH)
include(${rapids-cmake-dir}/cpm/gbench.cmake)
rapids_cpm_gbench()
endif()
include(cmake/thirdparty/get_cutlass.cmake)
# ##################################################################################################
# * cuvs ---------------------------------------------------------------------
add_library(
cuvs SHARED
src/distance/detail/pairwise_matrix/dispatch_canberra_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_canberra_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_correlation_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_correlation_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_cosine_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_cosine_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_hamming_unexpanded_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_hamming_unexpanded_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_hellinger_expanded_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_hellinger_expanded_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_jensen_shannon_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_jensen_shannon_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_kl_divergence_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_kl_divergence_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_l1_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_l1_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_l2_expanded_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_l2_expanded_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_l2_unexpanded_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_l2_unexpanded_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_l_inf_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_l_inf_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_lp_unexpanded_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_lp_unexpanded_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_rbf.cu
src/distance/detail/pairwise_matrix/dispatch_russel_rao_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_russel_rao_float_float_float_int.cu
src/distance/distance.cu
src/distance/fused_l2_nn.cu
src/matrix/detail/select_k_double_int64_t.cu
src/matrix/detail/select_k_double_uint32_t.cu
src/matrix/detail/select_k_float_int64_t.cu
src/matrix/detail/select_k_float_uint32_t.cu
src/matrix/detail/select_k_float_int32.cu
src/matrix/detail/select_k_half_int64_t.cu
src/matrix/detail/select_k_half_uint32_t.cu
src/neighbors/ball_cover.cu
src/neighbors/brute_force_fused_l2_knn_float_int64_t.cu
src/neighbors/brute_force_knn_int64_t_float_int64_t.cu
src/neighbors/brute_force_knn_int64_t_float_uint32_t.cu
src/neighbors/brute_force_knn_int_float_int.cu
src/neighbors/brute_force_knn_uint32_t_float_uint32_t.cu
src/neighbors/brute_force_knn_index_float.cu
src/neighbors/detail/cagra/search_multi_cta_float_uint32_dim128_t8.cu
src/neighbors/detail/cagra/search_multi_cta_float_uint32_dim256_t16.cu
src/neighbors/detail/cagra/search_multi_cta_float_uint32_dim512_t32.cu
src/neighbors/detail/cagra/search_multi_cta_float_uint32_dim1024_t32.cu
src/neighbors/detail/cagra/search_multi_cta_int8_uint32_dim128_t8.cu
src/neighbors/detail/cagra/search_multi_cta_int8_uint32_dim256_t16.cu
src/neighbors/detail/cagra/search_multi_cta_int8_uint32_dim512_t32.cu
src/neighbors/detail/cagra/search_multi_cta_int8_uint32_dim1024_t32.cu
src/neighbors/detail/cagra/search_multi_cta_uint8_uint32_dim128_t8.cu
src/neighbors/detail/cagra/search_multi_cta_uint8_uint32_dim256_t16.cu
src/neighbors/detail/cagra/search_multi_cta_uint8_uint32_dim512_t32.cu
src/neighbors/detail/cagra/search_multi_cta_uint8_uint32_dim1024_t32.cu
src/neighbors/detail/cagra/search_single_cta_float_uint32_dim128_t8.cu
src/neighbors/detail/cagra/search_single_cta_float_uint32_dim256_t16.cu
src/neighbors/detail/cagra/search_single_cta_float_uint32_dim512_t32.cu
src/neighbors/detail/cagra/search_single_cta_float_uint32_dim1024_t32.cu
src/neighbors/detail/cagra/search_single_cta_int8_uint32_dim128_t8.cu
src/neighbors/detail/cagra/search_single_cta_int8_uint32_dim256_t16.cu
src/neighbors/detail/cagra/search_single_cta_int8_uint32_dim512_t32.cu
src/neighbors/detail/cagra/search_single_cta_int8_uint32_dim1024_t32.cu
src/neighbors/detail/cagra/search_single_cta_uint8_uint32_dim128_t8.cu
src/neighbors/detail/cagra/search_single_cta_uint8_uint32_dim256_t16.cu
src/neighbors/detail/cagra/search_single_cta_uint8_uint32_dim512_t32.cu
src/neighbors/detail/cagra/search_single_cta_uint8_uint32_dim1024_t32.cu
src/neighbors/detail/ivf_flat_interleaved_scan_float_float_int64_t.cu
src/neighbors/detail/ivf_flat_interleaved_scan_int8_t_int32_t_int64_t.cu
src/neighbors/detail/ivf_flat_interleaved_scan_uint8_t_uint32_t_int64_t.cu
src/neighbors/detail/ivf_flat_search.cu
src/neighbors/detail/ivf_pq_compute_similarity_float_float.cu
src/neighbors/detail/ivf_pq_compute_similarity_float_fp8_false.cu
src/neighbors/detail/ivf_pq_compute_similarity_float_fp8_true.cu
src/neighbors/detail/ivf_pq_compute_similarity_float_half.cu
src/neighbors/detail/ivf_pq_compute_similarity_half_fp8_false.cu
src/neighbors/detail/ivf_pq_compute_similarity_half_fp8_true.cu
src/neighbors/detail/ivf_pq_compute_similarity_half_half.cu
src/neighbors/detail/refine_host_float_float.cpp
src/neighbors/detail/refine_host_int8_t_float.cpp
src/neighbors/detail/refine_host_uint8_t_float.cpp
src/neighbors/detail/selection_faiss_int32_t_float.cu
src/neighbors/detail/selection_faiss_int_double.cu
src/neighbors/detail/selection_faiss_long_float.cu
src/neighbors/detail/selection_faiss_size_t_double.cu
src/neighbors/detail/selection_faiss_size_t_float.cu
src/neighbors/detail/selection_faiss_uint32_t_float.cu
src/neighbors/detail/selection_faiss_int64_t_double.cu
src/neighbors/detail/selection_faiss_int64_t_half.cu
src/neighbors/detail/selection_faiss_uint32_t_double.cu
src/neighbors/detail/selection_faiss_uint32_t_half.cu
src/neighbors/ivf_flat_build_float_int64_t.cu
src/neighbors/ivf_flat_build_int8_t_int64_t.cu
src/neighbors/ivf_flat_build_uint8_t_int64_t.cu
src/neighbors/ivf_flat_extend_float_int64_t.cu
src/neighbors/ivf_flat_extend_int8_t_int64_t.cu
src/neighbors/ivf_flat_extend_uint8_t_int64_t.cu
src/neighbors/ivf_flat_search_float_int64_t.cu
src/neighbors/ivf_flat_search_int8_t_int64_t.cu
src/neighbors/ivf_flat_search_uint8_t_int64_t.cu
src/neighbors/ivfpq_build_float_int64_t.cu
src/neighbors/ivfpq_build_int8_t_int64_t.cu
src/neighbors/ivfpq_build_uint8_t_int64_t.cu
src/neighbors/ivfpq_extend_float_int64_t.cu
src/neighbors/ivfpq_extend_int8_t_int64_t.cu
src/neighbors/ivfpq_extend_uint8_t_int64_t.cu
src/neighbors/ivfpq_search_float_int64_t.cu
src/neighbors/ivfpq_search_int8_t_int64_t.cu
src/neighbors/ivfpq_search_uint8_t_int64_t.cu
src/neighbors/refine_float_float.cu
src/neighbors/refine_int8_t_float.cu
src/neighbors/refine_uint8_t_float.cu
src/cuvs_runtime/cluster/cluster_cost.cuh
src/cuvs_runtime/cluster/cluster_cost_double.cu
src/cuvs_runtime/cluster/cluster_cost_float.cu
src/cuvs_runtime/cluster/kmeans_fit_double.cu
src/cuvs_runtime/cluster/kmeans_fit_float.cu
src/cuvs_runtime/cluster/kmeans_init_plus_plus_double.cu
src/cuvs_runtime/cluster/kmeans_init_plus_plus_float.cu
src/cuvs_runtime/cluster/update_centroids.cuh
src/cuvs_runtime/cluster/update_centroids_double.cu
src/cuvs_runtime/cluster/update_centroids_float.cu
src/cuvs_runtime/distance/fused_l2_min_arg.cu
src/cuvs_runtime/distance/pairwise_distance.cu
src/cuvs_runtime/matrix/select_k_float_int64_t.cu
src/cuvs_runtime/neighbors/brute_force_knn_int64_t_float.cu
src/cuvs_runtime/neighbors/cagra_build.cu
src/cuvs_runtime/neighbors/cagra_search.cu
src/cuvs_runtime/neighbors/cagra_serialize.cu
src/cuvs_runtime/neighbors/ivf_flat_build.cu
src/cuvs_runtime/neighbors/ivf_flat_search.cu
src/cuvs_runtime/neighbors/ivf_flat_serialize.cu
src/cuvs_runtime/neighbors/ivfpq_build.cu
src/cuvs_runtime/neighbors/ivfpq_deserialize.cu
src/cuvs_runtime/neighbors/ivfpq_search_float_int64_t.cu
src/cuvs_runtime/neighbors/ivfpq_search_int8_t_int64_t.cu
src/cuvs_runtime/neighbors/ivfpq_search_uint8_t_int64_t.cu
src/cuvs_runtime/neighbors/ivfpq_serialize.cu
src/cuvs_runtime/neighbors/refine_d_int64_t_float.cu
src/cuvs_runtime/neighbors/refine_d_int64_t_int8_t.cu
src/cuvs_runtime/neighbors/refine_d_int64_t_uint8_t.cu
src/cuvs_runtime/neighbors/refine_h_int64_t_float.cu
src/cuvs_runtime/neighbors/refine_h_int64_t_int8_t.cu
src/cuvs_runtime/neighbors/refine_h_int64_t_uint8_t.cu
src/cuvs_runtime/random/rmat_rectangular_generator_int64_double.cu
src/cuvs_runtime/random/rmat_rectangular_generator_int64_float.cu
src/cuvs_runtime/random/rmat_rectangular_generator_int_double.cu
src/cuvs_runtime/random/rmat_rectangular_generator_int_float.cu
src/spatial/knn/detail/ball_cover/registers_pass_one_2d_dist.cu
src/spatial/knn/detail/ball_cover/registers_pass_one_2d_euclidean.cu
src/spatial/knn/detail/ball_cover/registers_pass_one_2d_haversine.cu
src/spatial/knn/detail/ball_cover/registers_pass_one_3d_dist.cu
src/spatial/knn/detail/ball_cover/registers_pass_one_3d_euclidean.cu
src/spatial/knn/detail/ball_cover/registers_pass_one_3d_haversine.cu
src/spatial/knn/detail/ball_cover/registers_pass_two_2d_dist.cu
src/spatial/knn/detail/ball_cover/registers_pass_two_2d_euclidean.cu
src/spatial/knn/detail/ball_cover/registers_pass_two_2d_haversine.cu
src/spatial/knn/detail/ball_cover/registers_pass_two_3d_dist.cu
src/spatial/knn/detail/ball_cover/registers_pass_two_3d_euclidean.cu
src/spatial/knn/detail/ball_cover/registers_pass_two_3d_haversine.cu
src/spatial/knn/detail/fused_l2_knn_int32_t_float.cu
src/spatial/knn/detail/fused_l2_knn_int64_t_float.cu
src/spatial/knn/detail/fused_l2_knn_uint32_t_float.cu
)
target_compile_options(
cuvs INTERFACE $<$<COMPILE_LANG_AND_ID:CUDA,NVIDIA>:--expt-extended-lambda
--expt-relaxed-constexpr>
)
add_library(cuvs::cuvs ALIAS cuvs)
target_include_directories(
cuvs PUBLIC "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>"
"$<INSTALL_INTERFACE:include>"
)
if(NOT BUILD_CPU_ONLY)
# Keep cuVS as lightweight as possible. Only CUDA libs and rmm should be used in global target.
target_link_libraries(cuvs PUBLIC raft::raft nvidia::cutlass::cutlass)
endif()
# Endian detection
include(TestBigEndian)
test_big_endian(BIG_ENDIAN)
if(BIG_ENDIAN)
target_compile_definitions(cuvs PRIVATE CUVS_SYSTEM_LITTLE_ENDIAN=0)
else()
target_compile_definitions(cuvs PRIVATE CUVS_SYSTEM_LITTLE_ENDIAN=1)
endif()
file(
WRITE "${CMAKE_CURRENT_BINARY_DIR}/fatbin.ld"
[=[
SECTIONS
{
.nvFatBinSegment : { *(.nvFatBinSegment) }
.nv_fatbin : { *(.nv_fatbin) }
}
]=]
)
# ##################################################################################################
# * NVTX support in cuvs -----------------------------------------------------
if(CUVS_NVTX)
# This enables NVTX within the project with no option to disable it downstream.
target_link_libraries(cuvs PUBLIC CUDA::nvToolsExt)
target_compile_definitions(cuvs PUBLIC NVTX_ENABLED)
else()
# Allow enable NVTX downstream if not set here. This creates a new option at build/install time,
# which is set by default to OFF, but can be enabled in the dependent project.
get_property(
nvtx_option_help_string
CACHE CUVS_NVTX
PROPERTY HELPSTRING
)
string(
CONCAT
nvtx_export_string
"option(CUVS_NVTX \""
${nvtx_option_help_string}
"\" OFF)"
[=[
target_link_libraries(cuvs::cuvs INTERFACE $<$<BOOL:${CUVS_NVTX}>:CUDA::nvToolsExt>)
target_compile_definitions(cuvs::cuvs INTERFACE $<$<BOOL:${CUVS_NVTX}>:NVTX_ENABLED>)
]=]
)
endif()
set_target_properties(
cuvs
PROPERTIES CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
)
target_compile_options(
cuvs PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:${CUVS_CXX_FLAGS}>"
"$<$<COMPILE_LANGUAGE:CUDA>:${CUVS_CUDA_FLAGS}>"
)
# ensure CUDA symbols aren't relocated to the middle of the debug build binaries
target_link_options(cuvs PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/fatbin.ld")
# ##################################################################################################
# * install targets-----------------------------------------------------------
rapids_cmake_install_lib_dir(lib_dir)
include(GNUInstallDirs)
include(CPack)
install(
TARGETS cuvs
DESTINATION ${lib_dir}
COMPONENT cuvs
EXPORT cuvs-exports
)
install(
DIRECTORY include/cuvs
COMPONENT cuvs
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
)
install(
FILES ${CMAKE_CURRENT_BINARY_DIR}/include/cuvs/version_config.hpp
COMPONENT cuvs
DESTINATION include/cuvs
)
# Use `rapids_export` for 22.04 as it will have COMPONENT support
rapids_export(
INSTALL cuvs
EXPORT_SET cuvs-exports
GLOBAL_TARGETS cuvs
NAMESPACE cuvs::
)
# ##################################################################################################
# * build export -------------------------------------------------------------
rapids_export(
BUILD cuvs
EXPORT_SET cuvs-exports
GLOBAL_TARGETS cuvs
NAMESPACE cuvs::
)
# ##################################################################################################
# * shared test/bench headers ------------------------------------------------
if(BUILD_TESTS OR BUILD_MICRO_BENCH)
include(internal/CMakeLists.txt)
endif()
# ##################################################################################################
# * build test executable ----------------------------------------------------
if(BUILD_TESTS)
include(test/CMakeLists.txt)
endif()
# ##################################################################################################
# * build benchmark executable -----------------------------------------------
if(BUILD_MICRO_BENCH)
include(bench/micro/CMakeLists.txt)
endif()
# ##################################################################################################
# * build ann benchmark executable -----------------------------------------------
if(BUILD_ANN_BENCH)
include(bench/ann/CMakeLists.txt)
endif()
| 0 |
rapidsai_public_repos/cuvs | rapidsai_public_repos/cuvs/cpp/.clang-tidy | ---
# Refer to the following link for the explanation of each params:
# https://releases.llvm.org/8.0.1/tools/clang/tools/extra/docs/clang-tidy/checks/list.html
Checks: 'clang-diagnostic-*,clang-analyzer-*,modernize-*,-modernize-make-*,-modernize-raw-string-literal,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming,-*,modernize-*,-modernize-make-*,-modernize-raw-string-literal,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
WarningsAsErrors: ''
HeaderFilterRegex: ''
AnalyzeTemporaryDtors: false
FormatStyle: none
User: snanditale
CheckOptions:
- key: google-build-namespaces.HeaderFileExtensions
value: ',h,hh,hpp,hxx'
- key: google-global-names-in-headers.HeaderFileExtensions
value: ',h,hh,hpp,hxx'
- key: google-readability-braces-around-statements.ShortStatementLines
value: '1'
- key: google-readability-function-size.BranchThreshold
value: '4294967295'
- key: google-readability-function-size.LineThreshold
value: '4294967295'
- key: google-readability-function-size.NestingThreshold
value: '4294967295'
- key: google-readability-function-size.ParameterThreshold
value: '4294967295'
- key: google-readability-function-size.StatementThreshold
value: '800'
- key: google-readability-function-size.VariableThreshold
value: '4294967295'
- key: google-readability-namespace-comments.ShortNamespaceLines
value: '10'
- key: google-readability-namespace-comments.SpacesBeforeComments
value: '2'
- key: google-runtime-int.SignedTypePrefix
value: int
- key: google-runtime-int.TypeSuffix
value: ''
- key: google-runtime-int.UnsignedTypePrefix
value: uint
- key: google-runtime-references.WhiteListTypes
value: ''
- key: modernize-loop-convert.MaxCopySize
value: '16'
- key: modernize-loop-convert.MinConfidence
value: reasonable
- key: modernize-loop-convert.NamingStyle
value: CamelCase
- key: modernize-pass-by-value.IncludeStyle
value: llvm
- key: modernize-pass-by-value.ValuesOnly
value: '0'
- key: modernize-replace-auto-ptr.IncludeStyle
value: llvm
- key: modernize-replace-random-shuffle.IncludeStyle
value: llvm
- key: modernize-use-auto.MinTypeNameLength
value: '5'
- key: modernize-use-auto.RemoveStars
value: '0'
- key: modernize-use-default-member-init.IgnoreMacros
value: '1'
- key: modernize-use-default-member-init.UseAssignment
value: '0'
- key: modernize-use-emplace.ContainersWithPushBack
value: '::std::vector;::std::list;::std::deque'
- key: modernize-use-emplace.SmartPointers
value: '::std::shared_ptr;::std::unique_ptr;::std::auto_ptr;::std::weak_ptr'
- key: modernize-use-emplace.TupleMakeFunctions
value: '::std::make_pair;::std::make_tuple'
- key: modernize-use-emplace.TupleTypes
value: '::std::pair;::std::tuple'
- key: modernize-use-equals-default.IgnoreMacros
value: '1'
- key: modernize-use-noexcept.ReplacementString
value: ''
- key: modernize-use-noexcept.UseNoexceptFalse
value: '1'
- key: modernize-use-nullptr.NullMacros
value: 'NULL'
- key: modernize-use-transparent-functors.SafeMode
value: '0'
- key: modernize-use-using.IgnoreMacros
value: '1'
- key: readability-identifier-naming.AbstractClassCase
value: lower_case
- key: readability-identifier-naming.AbstractClassPrefix
value: ''
- key: readability-identifier-naming.AbstractClassSuffix
value: ''
- key: readability-identifier-naming.ClassCase
value: lower_case
- key: readability-identifier-naming.ClassPrefix
value: ''
- key: readability-identifier-naming.ClassSuffix
value: ''
- key: readability-identifier-naming.ClassConstantCase
value: CamelCase
- key: readability-identifier-naming.ClassConstantPrefix
value: 'k'
- key: readability-identifier-naming.ClassConstantSuffix
value: ''
- key: readability-identifier-naming.ClassMemberCase
value: lower_case
- key: readability-identifier-naming.ClassMemberPrefix
value: ''
- key: readability-identifier-naming.ClassMemberSuffix
value: '_'
- key: readability-identifier-naming.ClassMethodCase
value: lower_case
- key: readability-identifier-naming.ClassMethodPrefix
value: ''
- key: readability-identifier-naming.ClassMethodSuffix
value: ''
- key: readability-identifier-naming.ConstexprFunctionCase
value: lower_case
- key: readability-identifier-naming.ConstexprFunctionPrefix
value: ''
- key: readability-identifier-naming.ConstexprFunctionSuffix
value: ''
- key: readability-identifier-naming.ConstexprMethodCase
value: lower_case
- key: readability-identifier-naming.ConstexprMethodPrefix
value: ''
- key: readability-identifier-naming.ConstexprMethodSuffix
value: ''
- key: readability-identifier-naming.ConstexprVariableCase
value: CamelCase
- key: readability-identifier-naming.ConstexprVariablePrefix
value: 'k'
- key: readability-identifier-naming.ConstexprVariableSuffix
value: ''
- key: readability-identifier-naming.EnumCase
value: CamelCase
- key: readability-identifier-naming.EnumPrefix
value: ''
- key: readability-identifier-naming.EnumSuffix
value: ''
- key: readability-identifier-naming.EnumConstantCase
value: CamelCase
- key: readability-identifier-naming.EnumConstantPrefix
value: 'k'
- key: readability-identifier-naming.EnumConstantSuffix
value: ''
- key: readability-identifier-naming.FunctionCase
value: lower_case
- key: readability-identifier-naming.FunctionPrefix
value: ''
- key: readability-identifier-naming.FunctionSuffix
value: ''
- key: readability-identifier-naming.GlobalConstantCase
value: CamelCase
- key: readability-identifier-naming.GlobalConstantPrefix
value: 'k'
- key: readability-identifier-naming.GlobalConstantSuffix
value: ''
- key: readability-identifier-naming.IgnoreFailedSplit
value: '0'
- key: readability-identifier-naming.LocalVariableCase
value: 'lower_case'
- key: readability-identifier-naming.LocalVariablePrefix
value: ''
- key: readability-identifier-naming.LocalVariableSuffix
value: ''
- key: readability-identifier-naming.ConstExprVariableCase
value: 'CamelCase'
- key: readability-identifier-naming.ConstExprVariablePrefix
value: 'k'
- key: readability-identifier-naming.ConstExprVariableSuffix
value: ''
- key: readability-identifier-naming.MemberCase
value: lower_case
- key: readability-identifier-naming.MemberPrefix
value: ''
- key: readability-identifier-naming.MemberSuffix
value: ''
- key: readability-identifier-naming.NamespaceCase
value: lower_case
- key: readability-identifier-naming.NamespacePrefix
value: ''
- key: readability-identifier-naming.NamespaceSuffix
value: ''
- key: readability-identifier-naming.PrivateMemberCase
value: lower_case
- key: readability-identifier-naming.PrivateMemberPrefix
value: ''
- key: readability-identifier-naming.PrivateMemberSuffix
value: '_'
- key: readability-identifier-naming.ProtectedMemberCase
value: lower_case
- key: readability-identifier-naming.ProtectedMemberPrefix
value: ''
- key: readability-identifier-naming.ProtectedMemberSuffix
value: '_'
- key: readability-identifier-naming.StaticConstantCase
value: CamelCase
- key: readability-identifier-naming.StaticConstantPrefix
value: 'k'
- key: readability-identifier-naming.StaticConstantSuffix
value: ''
- key: readability-identifier-naming.StructCase
value: lower_case
- key: readability-identifier-naming.StructPrefix
value: ''
- key: readability-identifier-naming.StructSuffix
value: ''
- key: readability-identifier-naming.TypeAliasCase
value: lower_case
- key: readability-identifier-naming.TypeAliasPrefix
value: ''
- key: readability-identifier-naming.TypeAliasSuffix
value: ''
- key: readability-identifier-naming.TypeTemplateParameterCase
value: CamelCase
- key: readability-identifier-naming.TypeTemplateParameterPrefix
value: ''
- key: readability-identifier-naming.TypeTemplateParameterSuffix
value: ''
- key: readability-identifier-naming.TypedefCase
value: lower_case
- key: readability-identifier-naming.TypedefPrefix
value: ''
- key: readability-identifier-naming.TypedefSuffix
value: ''
- key: readability-identifier-naming.VariableCase
value: lower_case
- key: readability-identifier-naming.VariablePrefix
value: ''
- key: readability-identifier-naming.VariableSuffix
value: ''
...
| 0 |
rapidsai_public_repos/cuvs | rapidsai_public_repos/cuvs/cpp/.clang-format | ---
# Refer to the following link for the explanation of each params:
# http://releases.llvm.org/8.0.0/tools/clang/docs/ClangFormatStyleOptions.html
Language: Cpp
# BasedOnStyle: Google
AccessModifierOffset: -1
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: true
AlignConsecutiveBitFields: true
AlignConsecutiveDeclarations: false
AlignConsecutiveMacros: true
AlignEscapedNewlines: Left
AlignOperands: true
AlignTrailingComments: true
AllowAllArgumentsOnNextLine: true
AllowAllConstructorInitializersOnNextLine: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
AllowShortEnumsOnASingleLine: true
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: true
AllowShortLambdasOnASingleLine: true
AllowShortLoopsOnASingleLine: false
# This is deprecated
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: false
BinPackParameters: false
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
# disabling the below splits, else, they'll just add to the vertical length of source files!
SplitEmptyFunction: false
SplitEmptyRecord: false
SplitEmptyNamespace: false
BreakAfterJavaFieldAnnotations: false
BreakBeforeBinaryOperators: None
BreakBeforeBraces: WebKit
BreakBeforeInheritanceComma: false
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeColon
BreakInheritanceList: BeforeColon
BreakStringLiterals: true
ColumnLimit: 100
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: true
# Kept the below 2 to be the same as `IndentWidth` to keep everything uniform
ConstructorInitializerIndentWidth: 2
ContinuationIndentWidth: 2
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeBlocks: Preserve
IncludeIsMainRegex: '([-_](test|unittest))?$'
IndentCaseLabels: true
IndentPPDirectives: None
IndentWidth: 2
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Never
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Left
RawStringFormats:
- Language: Cpp
Delimiters:
- cc
- CC
- cpp
- Cpp
- CPP
- 'c++'
- 'C++'
CanonicalDelimiter: ''
- Language: TextProto
Delimiters:
- pb
- PB
- proto
- PROTO
EnclosingFunctions:
- EqualsProto
- EquivToProto
- PARSE_PARTIAL_TEXT_PROTO
- PARSE_TEST_PROTO
- PARSE_TEXT_PROTO
- ParseTextOrDie
- ParseTextProtoOrDie
CanonicalDelimiter: ''
BasedOnStyle: google
# Enabling comment reflow causes doxygen comments to be messed up in their formats!
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceBeforeSquareBrackets: false
SpaceInEmptyBlock: false
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 2
SpacesInAngles: false
SpacesInConditionalStatement: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: c++17
StatementMacros:
- Q_UNUSED
- QT_REQUIRE_VERSION
# Be consistent with indent-width, even for people who use tab for indentation!
TabWidth: 2
UseTab: Never
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/ivf_pq_types.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use epsilon_neighborhood.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the cuvs::neighbors version instead.")
#include <cuvs/neighbors/ivf_pq_types.hpp>
namespace cuvs::spatial::knn::ivf_pq {
using cuvs::neighbors::ivf_pq::codebook_gen;
using cuvs::neighbors::ivf_pq::index;
using cuvs::neighbors::ivf_pq::index_params;
using cuvs::neighbors::ivf_pq::search_params;
} // namespace cuvs::spatial::knn::ivf_pq
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/ann_common.h | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma message( \
__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the other approximate KNN implementations defined in spatial/knn/*.")
#pragma once
#include "detail/processing.hpp"
#include "ivf_flat_types.hpp"
#include <cuvs/neighbors/ivf_pq_types.hpp>
#include <cuvs/distance/distance_types.hpp>
namespace cuvs {
namespace spatial {
namespace knn {
struct knnIndex {
cuvs::distance::DistanceType metric;
float metricArg;
int nprobe;
std::unique_ptr<MetricProcessor<float>> metric_processor;
std::unique_ptr<const ivf_flat::index<float, int64_t>> ivf_flat_float_;
std::unique_ptr<const ivf_flat::index<uint8_t, int64_t>> ivf_flat_uint8_t_;
std::unique_ptr<const ivf_flat::index<int8_t, int64_t>> ivf_flat_int8_t_;
std::unique_ptr<const cuvs::neighbors::ivf_pq::index<int64_t>> ivf_pq;
int device;
template <typename T, typename IdxT>
auto ivf_flat() -> std::unique_ptr<const ivf_flat::index<T, IdxT>>&;
};
template <>
inline auto knnIndex::ivf_flat<float, int64_t>()
-> std::unique_ptr<const ivf_flat::index<float, int64_t>>&
{
return ivf_flat_float_;
}
template <>
inline auto knnIndex::ivf_flat<uint8_t, int64_t>()
-> std::unique_ptr<const ivf_flat::index<uint8_t, int64_t>>&
{
return ivf_flat_uint8_t_;
}
template <>
inline auto knnIndex::ivf_flat<int8_t, int64_t>()
-> std::unique_ptr<const ivf_flat::index<int8_t, int64_t>>&
{
return ivf_flat_int8_t_;
}
struct knnIndexParam {
virtual ~knnIndexParam() {}
};
struct IVFParam : knnIndexParam {
int nlist;
int nprobe;
};
struct IVFFlatParam : IVFParam {};
struct IVFPQParam : IVFParam {
int M;
int n_bits;
bool usePrecomputedTables;
};
inline auto from_legacy_index_params(const IVFFlatParam& legacy,
cuvs::distance::DistanceType metric,
float metric_arg)
{
ivf_flat::index_params params;
params.metric = metric;
params.metric_arg = metric_arg;
params.n_lists = legacy.nlist;
return params;
}
}; // namespace knn
}; // namespace spatial
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/ivf_flat.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use epsilon_neighborhood.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the cuvs::neighbors version instead.")
#include <cuvs/neighbors/ivf_flat.cuh>
namespace cuvs::spatial::knn::ivf_flat {
using cuvs::neighbors::ivf_flat::build;
using cuvs::neighbors::ivf_flat::extend;
using cuvs::neighbors::ivf_flat::search;
}; // namespace cuvs::spatial::knn::ivf_flat
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/epsilon_neighborhood.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use epsilon_neighborhood.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the cuvs::neighbors version instead.")
#include <cuvs/neighbors/epsilon_neighborhood.cuh>
namespace cuvs::spatial::knn {
using cuvs::neighbors::epsilon_neighborhood::eps_neighbors_l2sq;
using cuvs::neighbors::epsilon_neighborhood::epsUnexpL2SqNeighborhood;
} // namespace cuvs::spatial::knn
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/knn.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/neighbors/detail/knn_brute_force.cuh>
#include <cuvs/neighbors/detail/selection_faiss.cuh>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/nvtx.hpp>
#include <raft/matrix/detail/select_radix.cuh>
#include <raft/matrix/detail/select_warpsort.cuh>
namespace cuvs::spatial::knn {
/**
* Performs a k-select across row partitioned index/distance
* matrices formatted like the following:
* row1: k0, k1, k2
* row2: k0, k1, k2
* row3: k0, k1, k2
* row1: k0, k1, k2
* row2: k0, k1, k2
* row3: k0, k1, k2
*
* etc...
*
* @tparam idx_t
* @tparam value_t
* @param in_keys
* @param in_values
* @param out_keys
* @param out_values
* @param n_samples
* @param n_parts
* @param k
* @param stream
* @param translations
*/
template <typename idx_t = int64_t, typename value_t = float>
inline void knn_merge_parts(const value_t* in_keys,
const idx_t* in_values,
value_t* out_keys,
idx_t* out_values,
size_t n_samples,
int n_parts,
int k,
cudaStream_t stream,
idx_t* translations)
{
cuvs::neighbors::detail::knn_merge_parts(
in_keys, in_values, out_keys, out_values, n_samples, n_parts, k, stream, translations);
}
/** Choose an implementation for the select-top-k, */
enum class SelectKAlgo {
/** Adapted from the faiss project. Result: sorted (not stable). */
FAISS,
/** Incomplete series of radix sort passes, comparing 8 bits per pass. Result: unsorted. */
RADIX_8_BITS,
/** Incomplete series of radix sort passes, comparing 11 bits per pass. Result: unsorted. */
RADIX_11_BITS,
/** Filtering with a bitonic-sort-based priority queue. Result: sorted (not stable). */
WARP_SORT
};
/**
* Select k smallest or largest key/values from each row in the input data.
*
* If you think of the input data `in_keys` as a row-major matrix with input_len columns and
* n_inputs rows, then this function selects k smallest/largest values in each row and fills
* in the row-major matrix `out_keys` of size (n_inputs, k).
*
* Note, depending on the selected algorithm, the values within rows of `out_keys` are not
* necessarily sorted. See the `SelectKAlgo` enumeration for more details.
*
* Note: This call is deprecated, please use `raft/matrix/select_k.cuh`
*
* @tparam idx_t
* the payload type (what is being selected together with the keys).
* @tparam value_t
* the type of the keys (what is being compared).
*
* @param[in] in_keys
* contiguous device array of inputs of size (input_len * n_inputs);
* these are compared and selected.
* @param[in] in_values
* contiguous device array of inputs of size (input_len * n_inputs);
* typically, these are indices of the corresponding in_keys.
* You can pass `NULL` as an argument here; this would imply `in_values` is a homogeneous array
* of indices from `0` to `input_len - 1` for every input and reduce the usage of memory
* bandwidth.
* @param[in] n_inputs
* number of input rows, i.e. the batch size.
* @param[in] input_len
* length of a single input array (row); also sometimes referred as n_cols.
* Invariant: input_len >= k.
* @param[out] out_keys
* contiguous device array of outputs of size (k * n_inputs);
* the k smallest/largest values from each row of the `in_keys`.
* @param[out] out_values
* contiguous device array of outputs of size (k * n_inputs);
* the payload selected together with `out_keys`.
* @param[in] select_min
* whether to select k smallest (true) or largest (false) keys.
* @param[in] k
* the number of outputs to select in each input row.
* @param[in] stream
* @param[in] algo
* the implementation of the algorithm
*/
template <typename idx_t = int, typename value_t = float>
[[deprecated("Use function `select_k` from `raft/matrix/select_k.cuh`")]] inline void select_k(
const value_t* in_keys,
const idx_t* in_values,
size_t n_inputs,
size_t input_len,
value_t* out_keys,
idx_t* out_values,
bool select_min,
int k,
cudaStream_t stream,
SelectKAlgo algo = SelectKAlgo::FAISS)
{
raft::common::nvtx::range<raft::common::nvtx::domain::raft> fun_scope(
"select-%s-%d (%zu, %zu) algo-%d",
select_min ? "min" : "max",
k,
n_inputs,
input_len,
int(algo));
ASSERT(size_t(input_len) >= size_t(k),
"Size of the input (input_len = %zu) must be not smaller than the selection (k = %zu).",
size_t(input_len),
size_t(k));
switch (algo) {
case SelectKAlgo::FAISS:
neighbors::detail::select_k(
in_keys, in_values, n_inputs, input_len, out_keys, out_values, select_min, k, stream);
break;
case SelectKAlgo::RADIX_8_BITS:
raft::matrix::detail::select::radix::select_k<value_t, idx_t, 8, 512>(
in_keys, in_values, n_inputs, input_len, k, out_keys, out_values, select_min, true, stream);
break;
case SelectKAlgo::RADIX_11_BITS:
raft::matrix::detail::select::radix::select_k<value_t, idx_t, 11, 512>(
in_keys, in_values, n_inputs, input_len, k, out_keys, out_values, select_min, true, stream);
break;
case SelectKAlgo::WARP_SORT:
raft::matrix::detail::select::warpsort::select_k<value_t, idx_t>(
in_keys, in_values, n_inputs, input_len, k, out_keys, out_values, select_min, stream);
break;
default: ASSERT(false, "Unknown algorithm (id = %d)", int(algo));
}
}
/**
* @brief Flat C++ API function to perform a brute force knn on
* a series of input arrays and combine the results into a single
* output array for indexes and distances.
*
* @param[in] handle the cuml handle to use
* @param[in] input vector of pointers to the input arrays
* @param[in] sizes vector of sizes of input arrays
* @param[in] D the dimensionality of the arrays
* @param[in] search_items array of items to search of dimensionality D
* @param[in] n number of rows in search_items
* @param[out] res_I the resulting index array of size n * k
* @param[out] res_D the resulting distance array of size n * k
* @param[in] k the number of nearest neighbors to return
* @param[in] rowMajorIndex are the index arrays in row-major order?
* @param[in] rowMajorQuery are the query arrays in row-major order?
* @param[in] metric distance metric to use. Euclidean (L2) is used by
* default
* @param[in] metric_arg the value of `p` for Minkowski (l-p) distances. This
* is ignored if the metric_type is not Minkowski.
* @param[in] translations starting offsets for partitions. should be the same size
* as input vector.
*/
template <typename idx_t = std::int64_t, typename value_t = float, typename value_int = int>
void brute_force_knn(raft::resources const& handle,
std::vector<value_t*>& input,
std::vector<value_int>& sizes,
value_int D,
value_t* search_items,
value_int n,
idx_t* res_I,
value_t* res_D,
value_int k,
bool rowMajorIndex = true,
bool rowMajorQuery = true,
std::vector<idx_t>* translations = nullptr,
distance::DistanceType metric = distance::DistanceType::L2Unexpanded,
float metric_arg = 2.0f)
{
ASSERT(input.size() == sizes.size(), "input and sizes vectors must be the same size");
cuvs::neighbors::detail::brute_force_knn_impl(handle,
input,
sizes,
D,
search_items,
n,
res_I,
res_D,
k,
rowMajorIndex,
rowMajorQuery,
translations,
metric,
metric_arg);
}
} // namespace cuvs::spatial::knn
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/ann_types.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/distance/distance_types.hpp>
namespace cuvs::spatial::knn {
/** The base for approximate KNN index structures. */
struct index {};
/** The base for KNN index parameters. */
struct index_params {
/** Distance type. */
cuvs::distance::DistanceType metric = distance::DistanceType::L2Expanded;
/** The argument used by some distance metrics. */
float metric_arg = 2.0f;
/**
* Whether to add the dataset content to the index, i.e.:
*
* - `true` means the index is filled with the dataset vectors and ready to search after calling
* `build`.
* - `false` means `build` only trains the underlying model (e.g. quantizer or clustering), but
* the index is left empty; you'd need to call `extend` on the index afterwards to populate it.
*/
bool add_data_on_build = true;
};
struct search_params {};
}; // namespace cuvs::spatial::knn
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/ivf_pq.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use epsilon_neighborhood.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the cuvs::neighbors version instead.")
#include <cuvs/neighbors/ivf_pq.cuh>
namespace cuvs::spatial::knn::ivf_pq {
using cuvs::neighbors::ivf_pq::build;
using cuvs::neighbors::ivf_pq::extend;
using cuvs::neighbors::ivf_pq::search;
} // namespace cuvs::spatial::knn::ivf_pq
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/ball_cover.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use epsilon_neighborhood.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the cuvs::neighbors version instead.")
#include <cuvs/neighbors/ball_cover.cuh>
#include <cuvs/spatial/knn/ball_cover_types.hpp>
namespace cuvs::spatial::knn {
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void rbc_build_index(raft::resources const& handle,
BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index)
{
cuvs::neighbors::ball_cover::build_index(handle, index);
}
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void rbc_all_knn_query(raft::resources const& handle,
BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index,
int_t k,
idx_t* inds,
value_t* dists,
bool perform_post_filtering = true,
float weight = 1.0)
{
cuvs::neighbors::ball_cover::all_knn_query(
handle, index, k, inds, dists, perform_post_filtering, weight);
}
template <typename idx_t, typename value_t, typename int_t>
void rbc_knn_query(raft::resources const& handle,
const BallCoverIndex<idx_t, value_t, int_t>& index,
int_t k,
const value_t* query,
int_t n_query_pts,
idx_t* inds,
value_t* dists,
bool perform_post_filtering = true,
float weight = 1.0)
{
cuvs::neighbors::ball_cover::knn_query(
handle, index, k, query, n_query_pts, inds, dists, perform_post_filtering, weight);
}
} // namespace cuvs::spatial::knn
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/specializations.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma message( \
__FILE__ \
" is deprecated and will be removed." \
" Including specializations is not necessary any more." \
" For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/ann.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "ann_common.h"
#include "detail/ann_quantized.cuh"
#include <raft/core/nvtx.hpp>
namespace cuvs::spatial::knn {
/**
* @brief Flat C++ API function to build an approximate nearest neighbors index
* from an index array and a set of parameters.
*
* @param[in] handle RAFT handle
* @param[out] index index to be built
* @param[in] params parametrization of the index to be built
* @param[in] metric distance metric to use. Euclidean (L2) is used by default
* @param[in] metricArg metric argument
* @param[in] index_array the index array to build the index with
* @param[in] n number of rows in the index array
* @param[in] D the dimensionality of the index array
*/
template <typename T = float, typename value_idx = int>
[[deprecated("Consider using new-style cuvs::spatial::knn::*::build functions")]] inline void
approx_knn_build_index(raft::resources& handle,
cuvs::spatial::knn::knnIndex* index,
knnIndexParam* params,
cuvs::distance::DistanceType metric,
float metricArg,
T* index_array,
value_idx n,
value_idx D)
{
raft::common::nvtx::range<raft::common::nvtx::domain::raft> fun_scope(
"legacy approx_knn_build_index(n_rows = %u, dim = %u)", n, D);
detail::approx_knn_build_index(handle, index, params, metric, metricArg, index_array, n, D);
}
/**
* @brief Flat C++ API function to perform an approximate nearest neighbors
* search from previously built index and a query array
*
* @param[in] handle RAFT handle
* @param[out] distances distances of the nearest neighbors toward
* their query point
* @param[out] indices indices of the nearest neighbors
* @param[in] index index to perform a search with
* @param[in] k the number of nearest neighbors to search for
* @param[in] query_array the query to perform a search with
* @param[in] n number of rows in the query array
*/
template <typename T = float, typename value_idx = int>
[[deprecated("Consider using new-style cuvs::spatial::knn::*::search functions")]] inline void
approx_knn_search(raft::resources& handle,
float* distances,
int64_t* indices,
cuvs::spatial::knn::knnIndex* index,
value_idx k,
T* query_array,
value_idx n)
{
raft::common::nvtx::range<raft::common::nvtx::domain::raft> fun_scope(
"legacy approx_knn_search(k = %u, n_queries = %u)", k, n);
detail::approx_knn_search(handle, distances, indices, index, k, query_array, n);
}
} // namespace cuvs::spatial::knn
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/ball_cover_types.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use epsilon_neighborhood.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the cuvs::neighbors version instead.")
#include <cuvs/neighbors/ball_cover_types.hpp>
namespace cuvs::spatial::knn {
using cuvs::neighbors::ball_cover::BallCoverIndex;
} // namespace cuvs::spatial::knn
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/ivf_flat_types.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use epsilon_neighborhood.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the cuvs::neighbors version instead.")
#include <cuvs/neighbors/ivf_flat_types.hpp>
namespace cuvs::spatial::knn::ivf_flat {
using cuvs::neighbors::ivf_flat::index;
using cuvs::neighbors::ivf_flat::index_params;
using cuvs::neighbors::ivf_flat::kIndexGroupSize;
using cuvs::neighbors::ivf_flat::search_params;
}; // namespace cuvs::spatial::knn::ivf_flat
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/common.hpp | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in a future release.
* Please use the ann_types.hpp version instead.
*/
#pragma once
#include <cuvs/spatial/knn/ann_types.hpp>
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail/fused_l2_knn-ext.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstddef> // size_t
#include <cstdint> // uint32_t
#include <cuvs/distance/distance_types.hpp> // DistanceType
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#if defined(RAFT_EXPLICIT_INSTANTIATE_ONLY)
namespace cuvs::spatial::knn::detail {
template <typename value_idx, typename value_t, bool usePrevTopKs = false>
void fusedL2Knn(size_t D,
value_idx* out_inds,
value_t* out_dists,
const value_t* index,
const value_t* query,
size_t n_index_rows,
size_t n_query_rows,
int k,
bool rowMajorIndex,
bool rowMajorQuery,
cudaStream_t stream,
cuvs::distance::DistanceType metric,
const value_t* index_norms = NULL,
const value_t* query_norms = NULL) RAFT_EXPLICIT;
} // namespace cuvs::spatial::knn::detail
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
#define instantiate_raft_spatial_knn_detail_fusedL2Knn(Mvalue_idx, Mvalue_t, MusePrevTopKs) \
extern template void \
cuvs::spatial::knn::detail::fusedL2Knn<Mvalue_idx, Mvalue_t, MusePrevTopKs>( \
size_t D, \
Mvalue_idx * out_inds, \
Mvalue_t * out_dists, \
const Mvalue_t* index, \
const Mvalue_t* query, \
size_t n_index_rows, \
size_t n_query_rows, \
int k, \
bool rowMajorIndex, \
bool rowMajorQuery, \
cudaStream_t stream, \
cuvs::distance::DistanceType metric, \
const Mvalue_t* index_norms, \
const Mvalue_t* query_norms);
instantiate_raft_spatial_knn_detail_fusedL2Knn(int32_t, float, true);
instantiate_raft_spatial_knn_detail_fusedL2Knn(int32_t, float, false);
instantiate_raft_spatial_knn_detail_fusedL2Knn(int64_t, float, true);
instantiate_raft_spatial_knn_detail_fusedL2Knn(int64_t, float, false);
// These are used by brute_force_knn:
instantiate_raft_spatial_knn_detail_fusedL2Knn(uint32_t, float, true);
instantiate_raft_spatial_knn_detail_fusedL2Knn(uint32_t, float, false);
#undef instantiate_raft_spatial_knn_detail_fusedL2Knn
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail/haversine_distance.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/pow2_utils.cuh>
#include <cuvs/distance/distance_types.hpp>
#include <cuvs/neighbors/detail/faiss_select/Select.cuh>
#include <raft/core/resources.hpp>
namespace cuvs {
namespace spatial {
namespace knn {
namespace detail {
template <typename value_t>
DI value_t compute_haversine(value_t x1, value_t y1, value_t x2, value_t y2)
{
value_t sin_0 = raft::sin(0.5 * (x1 - y1));
value_t sin_1 = raft::sin(0.5 * (x2 - y2));
value_t rdist = sin_0 * sin_0 + raft::cos(x1) * raft::cos(y1) * sin_1 * sin_1;
return 2 * raft::asin(raft::sqrt(rdist));
}
/**
* @tparam value_idx data type of indices
* @tparam value_t data type of values and distances
* @tparam warp_q
* @tparam thread_q
* @tparam tpb
* @param[out] out_inds output indices
* @param[out] out_dists output distances
* @param[in] index index array
* @param[in] query query array
* @param[in] n_index_rows number of rows in index array
* @param[in] k number of closest neighbors to return
*/
template <typename value_idx, typename value_t, int warp_q = 1024, int thread_q = 8, int tpb = 128>
RAFT_KERNEL haversine_knn_kernel(value_idx* out_inds,
value_t* out_dists,
const value_t* index,
const value_t* query,
size_t n_index_rows,
int k)
{
constexpr int kNumWarps = tpb / raft::WarpSize;
__shared__ value_t smemK[kNumWarps * warp_q];
__shared__ value_idx smemV[kNumWarps * warp_q];
using namespace cuvs::neighbors::detail::faiss_select;
BlockSelect<value_t, value_idx, false, Comparator<value_t>, warp_q, thread_q, tpb> heap(
std::numeric_limits<value_t>::max(), std::numeric_limits<value_idx>::max(), smemK, smemV, k);
// Grid is exactly sized to rows available
int limit = raft::Pow2<raft::WarpSize>::roundDown(n_index_rows);
const value_t* query_ptr = query + (blockIdx.x * 2);
value_t x1 = query_ptr[0];
value_t x2 = query_ptr[1];
int i = threadIdx.x;
for (; i < limit; i += tpb) {
const value_t* idx_ptr = index + (i * 2);
value_t y1 = idx_ptr[0];
value_t y2 = idx_ptr[1];
value_t dist = compute_haversine(x1, y1, x2, y2);
heap.add(dist, i);
}
// Handle last remainder fraction of a warp of elements
if (i < n_index_rows) {
const value_t* idx_ptr = index + (i * 2);
value_t y1 = idx_ptr[0];
value_t y2 = idx_ptr[1];
value_t dist = compute_haversine(x1, y1, x2, y2);
heap.addThreadQ(dist, i);
}
heap.reduce();
for (int i = threadIdx.x; i < k; i += tpb) {
out_dists[blockIdx.x * k + i] = smemK[i];
out_inds[blockIdx.x * k + i] = smemV[i];
}
}
/**
* Conmpute the k-nearest neighbors using the Haversine
* (great circle arc) distance. Input is assumed to have
* 2 dimensions (latitude, longitude) in radians.
* @tparam value_idx
* @tparam value_t
* @param[out] out_inds output indices array on device (size n_query_rows * k)
* @param[out] out_dists output dists array on device (size n_query_rows * k)
* @param[in] index input index array on device (size n_index_rows * 2)
* @param[in] query input query array on device (size n_query_rows * 2)
* @param[in] n_index_rows number of rows in index array
* @param[in] n_query_rows number of rows in query array
* @param[in] k number of closest neighbors to return
* @param[in] stream stream to order kernel launch
*/
template <typename value_idx, typename value_t>
void haversine_knn(value_idx* out_inds,
value_t* out_dists,
const value_t* index,
const value_t* query,
size_t n_index_rows,
size_t n_query_rows,
int k,
cudaStream_t stream)
{
haversine_knn_kernel<<<n_query_rows, 128, 0, stream>>>(
out_inds, out_dists, index, query, n_index_rows, k);
}
} // namespace detail
} // namespace knn
} // namespace spatial
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail/processing.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace cuvs {
namespace spatial {
namespace knn {
/**
* @brief A virtual class defining pre- and post-processing
* for metrics. This class will temporarily modify its given
* state in `preprocess()` and undo those modifications in
* `postprocess()`
*/
template <typename math_t>
class MetricProcessor {
public:
virtual void preprocess(math_t* data) {}
virtual void revert(math_t* data) {}
virtual void postprocess(math_t* data) {}
virtual void set_num_queries(int k) {}
virtual ~MetricProcessor() = default;
};
} // namespace knn
} // namespace spatial
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail/fused_l2_knn-inl.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cub/cub.cuh>
#include <cuvs/neighbors/detail/faiss_select/Select.cuh>
#include <limits>
#include <raft/linalg/norm.cuh>
// TODO: Need to hide the PairwiseDistance class impl and expose to public API
#include "processing.cuh"
#include <cuvs/distance/detail/distance.cuh>
#include <cuvs/distance/detail/distance_ops/l2_exp.cuh>
#include <cuvs/distance/detail/distance_ops/l2_unexp.cuh>
#include <cuvs/distance/detail/pairwise_distance_base.cuh>
#include <raft/core/operators.hpp>
#include <raft/util/cuda_utils.cuh>
namespace cuvs {
namespace spatial {
namespace knn {
namespace detail {
template <typename Policy, typename Pair, typename myWarpSelect, typename IdxT>
DI void loadAllWarpQShmem(myWarpSelect** heapArr,
Pair* shDumpKV,
const IdxT m,
const unsigned int numOfNN)
{
const int lid = raft::laneId();
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto rowId = (threadIdx.x / Policy::AccThCols) + i * Policy::AccThRows;
if (rowId < m) {
#pragma unroll
for (int j = 0; j < myWarpSelect::kNumWarpQRegisters; ++j) {
const int idx = j * warpSize + lid;
if (idx < numOfNN) {
Pair KVPair = shDumpKV[rowId * numOfNN + idx];
heapArr[i]->warpV[j] = KVPair.key;
heapArr[i]->warpK[j] = KVPair.value;
}
}
}
}
}
template <typename Policy, typename Pair, typename myWarpSelect>
DI void loadWarpQShmem(myWarpSelect* heapArr,
Pair* shDumpKV,
const int rowId,
const unsigned int numOfNN)
{
const int lid = raft::laneId();
#pragma unroll
for (int j = 0; j < myWarpSelect::kNumWarpQRegisters; ++j) {
const int idx = j * warpSize + lid;
if (idx < numOfNN) {
Pair KVPair = shDumpKV[rowId * numOfNN + idx];
heapArr->warpV[j] = KVPair.key;
heapArr->warpK[j] = KVPair.value;
}
}
}
template <typename Policy, typename Pair, typename myWarpSelect, typename IdxT>
DI void storeWarpQShmem(myWarpSelect* heapArr,
Pair* shDumpKV,
const IdxT rowId,
const unsigned int numOfNN)
{
const int lid = raft::laneId();
#pragma unroll
for (int j = 0; j < myWarpSelect::kNumWarpQRegisters; ++j) {
const int idx = j * warpSize + lid;
if (idx < numOfNN) {
Pair otherKV = Pair(heapArr->warpV[j], heapArr->warpK[j]);
shDumpKV[rowId * numOfNN + idx] = otherKV;
}
}
}
template <typename Policy, typename Pair, typename myWarpSelect, typename IdxT, typename OutT>
DI void storeWarpQGmem(myWarpSelect** heapArr,
volatile OutT* out_dists,
volatile IdxT* out_inds,
const IdxT m,
const unsigned int numOfNN,
const IdxT starty)
{
const int lid = raft::laneId();
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto gmemRowId = starty + i * Policy::AccThRows;
if (gmemRowId < m) {
#pragma unroll
for (int j = 0; j < myWarpSelect::kNumWarpQRegisters; ++j) {
const auto idx = j * warpSize + lid;
if (idx < numOfNN) {
out_dists[std::size_t(gmemRowId) * numOfNN + idx] = heapArr[i]->warpK[j];
out_inds[std::size_t(gmemRowId) * numOfNN + idx] = (IdxT)heapArr[i]->warpV[j];
}
}
}
}
}
template <typename Policy, typename Pair, typename myWarpSelect, typename IdxT, typename OutT>
DI void loadPrevTopKsGmemWarpQ(myWarpSelect** heapArr,
volatile OutT* out_dists,
volatile IdxT* out_inds,
const IdxT m,
const unsigned int numOfNN,
const IdxT starty)
{
const int lid = raft::laneId();
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto gmemRowId = starty + i * Policy::AccThRows;
if (gmemRowId < m) {
#pragma unroll
for (int j = 0; j < myWarpSelect::kNumWarpQRegisters; ++j) {
const auto idx = j * warpSize + lid;
if (idx < numOfNN) {
heapArr[i]->warpK[j] = out_dists[std::size_t(gmemRowId) * numOfNN + idx];
heapArr[i]->warpV[j] = (uint32_t)out_inds[std::size_t(gmemRowId) * numOfNN + idx];
}
}
static constexpr auto kLaneWarpKTop = myWarpSelect::kNumWarpQRegisters - 1;
heapArr[i]->warpKTop = raft::shfl(heapArr[i]->warpK[kLaneWarpKTop], heapArr[i]->kLane);
}
}
}
template <typename Pair, int NumWarpQRegs, typename myWarpSelect>
DI void updateSortedWarpQ(
myWarpSelect& heapArr, Pair* allWarpTopKs, int rowId, int finalNumVals, int startId = 0)
{
constexpr uint32_t mask = 0xffffffffu;
const int lid = raft::laneId();
// calculate srcLane such that tid 0 -> 31, 1 -> 0,... 31 -> 30.
// warp around 0 to 31 required for NN > 32
const auto srcLane = (warpSize + (lid - 1)) & (warpSize - 1);
for (int k = startId; k < finalNumVals; k++) {
Pair KVPair = allWarpTopKs[rowId * (256) + k];
#pragma unroll
for (int i = 0; i < NumWarpQRegs; i++) {
unsigned activeLanes = __ballot_sync(mask, KVPair.value < heapArr->warpK[i]);
if (activeLanes) {
Pair tempKV;
tempKV.value = raft::shfl(heapArr->warpK[i], srcLane);
tempKV.key = raft::shfl(heapArr->warpV[i], srcLane);
const auto firstActiveLane = __ffs(activeLanes) - 1;
if (firstActiveLane == lid) {
heapArr->warpK[i] = KVPair.value;
heapArr->warpV[i] = KVPair.key;
} else if (lid > firstActiveLane) {
heapArr->warpK[i] = tempKV.value;
heapArr->warpV[i] = tempKV.key;
}
if (i == 0 && NumWarpQRegs > 1) {
heapArr->warpK[1] = __shfl_up_sync(mask, heapArr->warpK[1], 1);
heapArr->warpV[1] = __shfl_up_sync(mask, heapArr->warpV[1], 1);
if (lid == 0) {
heapArr->warpK[1] = tempKV.value;
heapArr->warpV[1] = tempKV.key;
}
break;
}
}
}
}
}
template <typename DataT,
typename OutT,
typename IdxT,
typename Policy,
typename OpT,
typename FinalLambda,
int NumWarpQ,
int NumThreadQ,
bool usePrevTopKs = false,
bool isRowMajor = true>
__launch_bounds__(Policy::Nthreads, 2) RAFT_KERNEL fusedL2kNN(const DataT* x,
const DataT* y,
const DataT* _xn,
const DataT* _yn,
const IdxT m,
const IdxT n,
const IdxT k,
const IdxT lda,
const IdxT ldb,
const IdxT ldd,
OpT distance_op,
FinalLambda fin_op,
unsigned int numOfNN,
volatile int* mutexes,
volatile OutT* out_dists,
volatile IdxT* out_inds)
{
using AccT = typename OpT::AccT;
extern __shared__ char smem[];
typedef cub::KeyValuePair<uint32_t, AccT> Pair;
constexpr auto identity = std::numeric_limits<AccT>::max();
constexpr auto keyMax = std::numeric_limits<uint32_t>::max();
constexpr auto Dir = false;
using namespace cuvs::neighbors::detail::faiss_select;
typedef WarpSelect<AccT, uint32_t, Dir, Comparator<AccT>, NumWarpQ, NumThreadQ, 32> myWarpSelect;
auto rowEpilog_lambda =
[m, n, &distance_op, numOfNN, out_dists, out_inds, mutexes] __device__(IdxT gridStrideY) {
if (gridDim.x == 1) { return; }
// Use ::template to disambiguate (See:
// https://en.cppreference.com/w/cpp/language/dependent_name)
int smem_offset = OpT::template shared_mem_size<Policy>();
Pair* shDumpKV = (Pair*)(&smem[smem_offset]);
const int lid = threadIdx.x % warpSize;
const IdxT starty = gridStrideY + (threadIdx.x / Policy::AccThCols);
// 0 -> consumer done consuming the buffer.
// -1 -> consumer started consuming the buffer
// -2 -> producer done filling the buffer
// 1 -> prod acquired to fill the buffer
if (blockIdx.x == 0) {
auto cta_processed = 0;
myWarpSelect heapArr1(identity, keyMax, numOfNN);
myWarpSelect heapArr2(identity, keyMax, numOfNN);
myWarpSelect* heapArr[] = {&heapArr1, &heapArr2};
__syncwarp();
loadAllWarpQShmem<Policy, Pair>(heapArr, &shDumpKV[0], m, numOfNN);
while (cta_processed < gridDim.x - 1) {
if (threadIdx.x == 0) {
while (atomicCAS((int*)&mutexes[gridStrideY / Policy::Mblk], -2, -1) != -2)
;
}
__threadfence();
__syncthreads();
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto rowId = starty + i * Policy::AccThRows;
if (rowId < m) {
#pragma unroll
for (int j = 0; j < myWarpSelect::kNumWarpQRegisters; ++j) {
Pair otherKV;
otherKV.value = identity;
otherKV.key = keyMax;
const auto idx = j * warpSize + lid;
if (idx < numOfNN) {
otherKV.value = out_dists[rowId * numOfNN + idx];
otherKV.key = (uint32_t)out_inds[rowId * numOfNN + idx];
const auto shMemRowId = (threadIdx.x / Policy::AccThCols) + i * Policy::AccThRows;
shDumpKV[shMemRowId * numOfNN + idx] = otherKV;
}
}
}
}
__threadfence();
__syncthreads();
if (threadIdx.x == 0) { atomicExch((int*)&mutexes[gridStrideY / Policy::Mblk], 0); }
__threadfence();
// Perform merging of otherKV with topk's across warp.
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto rowId = starty + i * Policy::AccThRows;
if (rowId < m) {
#pragma unroll
for (int j = 0; j < myWarpSelect::kNumWarpQRegisters; ++j) {
Pair otherKV;
otherKV.value = identity;
otherKV.key = keyMax;
const auto idx = j * warpSize + lid;
if (idx < numOfNN) {
const auto shMemRowId = (threadIdx.x / Policy::AccThCols) + i * Policy::AccThRows;
otherKV = shDumpKV[shMemRowId * numOfNN + idx];
}
heapArr[i]->add(otherKV.value, otherKV.key);
}
}
}
cta_processed++;
}
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto rowId = starty + i * Policy::AccThRows;
if (rowId < m) {
bool needSort = (heapArr[i]->numVals > 0);
needSort = __any_sync(0xffffffff, needSort);
if (needSort) { heapArr[i]->reduce(); }
}
}
storeWarpQGmem<Policy, Pair>(heapArr, out_dists, out_inds, m, numOfNN, starty);
} else {
if (threadIdx.x == 0) {
while (atomicCAS((int*)&mutexes[gridStrideY / Policy::Mblk], 0, 1) != 0)
;
}
__threadfence();
__syncthreads();
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto rowId = starty + i * Policy::AccThRows;
if (rowId < m) {
for (int idx = lid; idx < numOfNN; idx += warpSize) {
const auto shMemRowId = (threadIdx.x / Policy::AccThCols) + i * Policy::AccThRows;
Pair KVPair = shDumpKV[shMemRowId * numOfNN + idx];
out_dists[rowId * numOfNN + idx] = KVPair.value;
out_inds[rowId * numOfNN + idx] = (IdxT)KVPair.key;
}
}
}
__threadfence();
__syncthreads();
if (threadIdx.x == 0) { atomicExch((int*)&mutexes[gridStrideY / Policy::Mblk], -2); }
__threadfence();
}
};
// epilogue operation lambda for final value calculation
auto epilog_lambda =
[&distance_op, numOfNN, m, n, ldd, out_dists, out_inds, keyMax, identity] __device__(
AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT * regxn,
DataT * regyn,
IdxT gridStrideX,
IdxT gridStrideY) {
// Use ::template to disambiguate (See:
// https://en.cppreference.com/w/cpp/language/dependent_name)
int smem_offset = OpT::template shared_mem_size<Policy>();
Pair* shDumpKV = (Pair*)(&smem[smem_offset]);
constexpr uint32_t mask = 0xffffffffu;
const IdxT starty = gridStrideY + (threadIdx.x / Policy::AccThCols);
const IdxT startx = gridStrideX + (threadIdx.x % Policy::AccThCols);
const int lid = raft::laneId();
myWarpSelect heapArr1(identity, keyMax, numOfNN);
myWarpSelect heapArr2(identity, keyMax, numOfNN);
myWarpSelect* heapArr[] = {&heapArr1, &heapArr2};
if (usePrevTopKs) {
if (gridStrideX == blockIdx.x * Policy::Nblk) {
loadPrevTopKsGmemWarpQ<Policy, Pair>(heapArr, out_dists, out_inds, m, numOfNN, starty);
}
}
if (gridStrideX > blockIdx.x * Policy::Nblk) {
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto rowId = (threadIdx.x / Policy::AccThCols) + i * Policy::AccThRows;
Pair tempKV = shDumpKV[(rowId * numOfNN) + numOfNN - 1];
heapArr[i]->warpKTop = tempKV.value;
}
// total vals can atmost be 256, (32*8)
int numValsWarpTopK[Policy::AccRowsPerTh];
int anyWarpTopKs = 0;
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto rowId = starty + i * Policy::AccThRows;
numValsWarpTopK[i] = 0;
if (rowId < m) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
const auto colId = startx + j * Policy::AccThCols;
if (colId < ldd) {
if (acc[i][j] < heapArr[i]->warpKTop) { numValsWarpTopK[i]++; }
}
}
anyWarpTopKs += numValsWarpTopK[i];
}
}
anyWarpTopKs = __syncthreads_or(anyWarpTopKs > 0);
if (anyWarpTopKs) {
Pair* allWarpTopKs = (Pair*)(&smem[0]);
uint32_t needScanSort[Policy::AccRowsPerTh];
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto gmemRowId = starty + i * Policy::AccThRows;
needScanSort[i] = 0;
if (gmemRowId < m) {
int myVals = numValsWarpTopK[i];
needScanSort[i] = __ballot_sync(mask, myVals > 0);
if (needScanSort[i]) {
#pragma unroll
for (unsigned int k = 1; k <= 16; k *= 2) {
const unsigned int n = __shfl_up_sync(mask, numValsWarpTopK[i], k);
if (lid >= k) { numValsWarpTopK[i] += n; }
}
}
// As each thread will know its total vals to write.
// we only store its starting location.
numValsWarpTopK[i] -= myVals;
}
if (needScanSort[i]) {
const auto rowId = (threadIdx.x / Policy::AccThCols) + i * Policy::AccThRows;
if (gmemRowId < m) {
if (needScanSort[i] & ((uint32_t)1 << lid)) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
const auto colId = startx + j * Policy::AccThCols;
if (colId < ldd) {
if (acc[i][j] < heapArr[i]->warpKTop) {
Pair otherKV = {colId, acc[i][j]};
allWarpTopKs[rowId * (256) + numValsWarpTopK[i]] = otherKV;
numValsWarpTopK[i]++;
}
}
}
}
__syncwarp();
const int finalNumVals = raft::shfl(numValsWarpTopK[i], 31);
loadWarpQShmem<Policy, Pair>(heapArr[i], &shDumpKV[0], rowId, numOfNN);
updateSortedWarpQ<Pair, myWarpSelect::kNumWarpQRegisters>(
heapArr[i], &allWarpTopKs[0], rowId, finalNumVals);
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
if (needScanSort[i]) {
const auto rowId = (threadIdx.x / Policy::AccThCols) + i * Policy::AccThRows;
const auto gmemRowId = starty + i * Policy::AccThRows;
if (gmemRowId < m) {
storeWarpQShmem<Policy, Pair>(heapArr[i], shDumpKV, rowId, numOfNN);
}
}
}
}
} else {
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto gmemRowId = starty + i * Policy::AccThRows;
const auto shMemRowId = (threadIdx.x / Policy::AccThCols) + i * Policy::AccThRows;
if (gmemRowId < m) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
const auto colId = startx + j * Policy::AccThCols;
Pair otherKV = {keyMax, identity};
if (colId < ldd) {
otherKV.value = acc[i][j];
otherKV.key = colId;
}
heapArr[i]->add(otherKV.value, otherKV.key);
}
bool needSort = (heapArr[i]->numVals > 0);
needSort = __any_sync(mask, needSort);
if (needSort) { heapArr[i]->reduce(); }
storeWarpQShmem<Policy, Pair>(heapArr[i], shDumpKV, shMemRowId, numOfNN);
}
}
}
if (((gridStrideX + Policy::Nblk * gridDim.x) >= n) && gridDim.x == 1) {
// This is last iteration of grid stride X
loadAllWarpQShmem<Policy, Pair>(heapArr, &shDumpKV[0], m, numOfNN);
storeWarpQGmem<Policy, Pair>(heapArr, out_dists, out_inds, m, numOfNN, starty);
}
};
constexpr bool write_out = false;
cuvs::distance::detail::PairwiseDistances<DataT,
OutT,
IdxT,
Policy,
OpT,
decltype(epilog_lambda),
FinalLambda,
decltype(rowEpilog_lambda),
isRowMajor,
write_out>
obj(x,
y,
m,
n,
k,
lda,
ldb,
ldd,
_xn,
_yn,
nullptr, // output ptr, can be null as write_out == false.
smem,
distance_op,
epilog_lambda,
fin_op,
rowEpilog_lambda);
obj.run();
}
template <typename DataT,
typename AccT,
typename OutT,
typename IdxT,
int VecLen,
bool usePrevTopKs,
bool isRowMajor>
void fusedL2UnexpKnnImpl(const DataT* x,
const DataT* y,
IdxT m,
IdxT n,
IdxT k,
IdxT lda,
IdxT ldb,
IdxT ldd,
bool sqrt,
OutT* out_dists,
IdxT* out_inds,
IdxT numOfNN,
cudaStream_t stream,
void* workspace,
size_t& worksize)
{
typedef typename raft::linalg::Policy2x8<DataT, 1>::Policy RowPolicy;
typedef typename raft::linalg::Policy4x4<DataT, VecLen>::ColPolicy ColPolicy;
typedef typename std::conditional<true, RowPolicy, ColPolicy>::type KPolicy;
ASSERT(isRowMajor, "Only Row major inputs are allowed");
dim3 blk(KPolicy::Nthreads);
// Accumulation operation lambda
typedef cub::KeyValuePair<uint32_t, AccT> Pair;
cuvs::distance::detail::ops::l2_unexp_distance_op<DataT, AccT, IdxT> distance_op{sqrt};
raft::identity_op fin_op{};
if constexpr (isRowMajor) {
constexpr auto fusedL2UnexpKnn32RowMajor = fusedL2kNN<DataT,
OutT,
IdxT,
KPolicy,
decltype(distance_op),
decltype(fin_op),
32,
2,
usePrevTopKs,
isRowMajor>;
constexpr auto fusedL2UnexpKnn64RowMajor = fusedL2kNN<DataT,
OutT,
IdxT,
KPolicy,
decltype(distance_op),
decltype(fin_op),
64,
3,
usePrevTopKs,
isRowMajor>;
auto fusedL2UnexpKnnRowMajor = fusedL2UnexpKnn32RowMajor;
if (numOfNN <= 32) {
fusedL2UnexpKnnRowMajor = fusedL2UnexpKnn32RowMajor;
} else if (numOfNN <= 64) {
fusedL2UnexpKnnRowMajor = fusedL2UnexpKnn64RowMajor;
} else {
ASSERT(numOfNN <= 64, "fusedL2kNN: num of nearest neighbors must be <= 64");
}
const auto sharedMemSize =
distance_op.template shared_mem_size<KPolicy>() + KPolicy::Mblk * numOfNN * sizeof(Pair);
dim3 grid = cuvs::distance::detail::launchConfigGenerator<KPolicy>(
m, n, sharedMemSize, fusedL2UnexpKnnRowMajor);
if (grid.x > 1) {
const auto numMutexes = raft::ceildiv<int>(m, KPolicy::Mblk);
if (workspace == nullptr || worksize < (sizeof(int32_t) * numMutexes)) {
worksize = sizeof(int32_t) * numMutexes;
return;
} else {
RAFT_CUDA_TRY(cudaMemsetAsync(workspace, 0, sizeof(int32_t) * numMutexes, stream));
}
}
fusedL2UnexpKnnRowMajor<<<grid, blk, sharedMemSize, stream>>>(x,
y,
nullptr,
nullptr,
m,
n,
k,
lda,
ldb,
ldd,
distance_op,
fin_op,
(uint32_t)numOfNN,
(int*)workspace,
out_dists,
out_inds);
} else {
}
RAFT_CUDA_TRY(cudaGetLastError());
}
template <typename DataT,
typename AccT,
typename OutT,
typename IdxT,
bool usePrevTopKs,
bool isRowMajor>
void fusedL2UnexpKnn(IdxT m,
IdxT n,
IdxT k,
IdxT lda,
IdxT ldb,
IdxT ldd,
const DataT* x,
const DataT* y,
bool sqrt,
OutT* out_dists,
IdxT* out_inds,
IdxT numOfNN,
cudaStream_t stream,
void* workspace,
size_t& worksize)
{
size_t bytesA = sizeof(DataT) * lda;
size_t bytesB = sizeof(DataT) * ldb;
if (16 % sizeof(DataT) == 0 && bytesA % 16 == 0 && bytesB % 16 == 0) {
fusedL2UnexpKnnImpl<DataT, AccT, OutT, IdxT, 16 / sizeof(DataT), usePrevTopKs, isRowMajor>(
x,
y,
m,
n,
k,
lda,
ldb,
ldd,
sqrt,
out_dists,
out_inds,
numOfNN,
stream,
workspace,
worksize);
} else if (8 % sizeof(DataT) == 0 && bytesA % 8 == 0 && bytesB % 8 == 0) {
fusedL2UnexpKnnImpl<DataT, AccT, OutT, IdxT, 8 / sizeof(DataT), usePrevTopKs, isRowMajor>(
x,
y,
m,
n,
k,
lda,
ldb,
ldd,
sqrt,
out_dists,
out_inds,
numOfNN,
stream,
workspace,
worksize);
} else {
fusedL2UnexpKnnImpl<DataT, AccT, OutT, IdxT, 1, usePrevTopKs, isRowMajor>(x,
y,
m,
n,
k,
lda,
ldb,
ldd,
sqrt,
out_dists,
out_inds,
numOfNN,
stream,
workspace,
worksize);
}
}
template <typename DataT,
typename AccT,
typename OutT,
typename IdxT,
int VecLen,
bool usePrevTopKs,
bool isRowMajor>
void fusedL2ExpKnnImpl(const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
IdxT lda,
IdxT ldb,
IdxT ldd,
bool sqrt,
OutT* out_dists,
IdxT* out_inds,
IdxT numOfNN,
cudaStream_t stream,
void* workspace,
size_t& worksize)
{
typedef typename raft::linalg::Policy2x8<DataT, 1>::Policy RowPolicy;
typedef typename raft::linalg::Policy4x4<DataT, VecLen>::ColPolicy ColPolicy;
typedef typename std::conditional<true, RowPolicy, ColPolicy>::type KPolicy;
ASSERT(isRowMajor, "Only Row major inputs are allowed");
ASSERT(!(((x != y) && (worksize < (m + n) * sizeof(AccT))) || (worksize < m * sizeof(AccT))),
"workspace size error");
ASSERT(workspace != nullptr, "workspace is null");
dim3 blk(KPolicy::Nthreads);
typedef cub::KeyValuePair<uint32_t, AccT> Pair;
cuvs::distance::detail::ops::l2_exp_distance_op<DataT, AccT, IdxT> distance_op{sqrt};
raft::identity_op fin_op{};
if constexpr (isRowMajor) {
constexpr auto fusedL2ExpKnn32RowMajor = fusedL2kNN<DataT,
OutT,
IdxT,
KPolicy,
decltype(distance_op),
decltype(fin_op),
32,
2,
usePrevTopKs,
isRowMajor>;
constexpr auto fusedL2ExpKnn64RowMajor = fusedL2kNN<DataT,
OutT,
IdxT,
KPolicy,
decltype(distance_op),
decltype(fin_op),
64,
3,
usePrevTopKs,
isRowMajor>;
auto fusedL2ExpKnnRowMajor = fusedL2ExpKnn32RowMajor;
if (numOfNN <= 32) {
fusedL2ExpKnnRowMajor = fusedL2ExpKnn32RowMajor;
} else if (numOfNN <= 64) {
fusedL2ExpKnnRowMajor = fusedL2ExpKnn64RowMajor;
} else {
ASSERT(numOfNN <= 64, "fusedL2kNN: num of nearest neighbors must be <= 64");
}
const auto sharedMemSize =
distance_op.template shared_mem_size<KPolicy>() + (KPolicy::Mblk * numOfNN * sizeof(Pair));
dim3 grid = cuvs::distance::detail::launchConfigGenerator<KPolicy>(
m, n, sharedMemSize, fusedL2ExpKnnRowMajor);
int32_t* mutexes = nullptr;
if (grid.x > 1) {
const auto numMutexes = raft::ceildiv<int>(m, KPolicy::Mblk);
const auto normsSize = (x != y) ? (m + n) * sizeof(DataT) : n * sizeof(DataT);
const auto requiredSize = sizeof(int32_t) * numMutexes + normsSize;
if (worksize < requiredSize) {
worksize = requiredSize;
return;
} else {
mutexes = (int32_t*)((char*)workspace + normsSize);
RAFT_CUDA_TRY(cudaMemsetAsync(mutexes, 0, sizeof(int32_t) * numMutexes, stream));
}
}
// calculate norms if they haven't been passed in
if (!xn) {
DataT* xn_ = (DataT*)workspace;
workspace = xn_ + m;
raft::linalg::rowNorm(
xn_, x, k, m, raft::linalg::L2Norm, isRowMajor, stream, raft::identity_op{});
xn = xn_;
}
if (!yn) {
if (x == y) {
yn = xn;
} else {
DataT* yn_ = (DataT*)(workspace);
raft::linalg::rowNorm(
yn_, y, k, n, raft::linalg::L2Norm, isRowMajor, stream, raft::identity_op{});
yn = yn_;
}
}
fusedL2ExpKnnRowMajor<<<grid, blk, sharedMemSize, stream>>>(x,
y,
xn,
yn,
m,
n,
k,
lda,
ldb,
ldd,
distance_op,
fin_op,
(uint32_t)numOfNN,
mutexes,
out_dists,
out_inds);
} else {
}
RAFT_CUDA_TRY(cudaGetLastError());
}
template <typename DataT,
typename AccT,
typename OutT,
typename IdxT,
bool usePrevTopKs,
bool isRowMajor>
void fusedL2ExpKnn(IdxT m,
IdxT n,
IdxT k,
IdxT lda,
IdxT ldb,
IdxT ldd,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
bool sqrt,
OutT* out_dists,
IdxT* out_inds,
IdxT numOfNN,
cudaStream_t stream,
void* workspace,
size_t& worksize)
{
size_t bytesA = sizeof(DataT) * lda;
size_t bytesB = sizeof(DataT) * ldb;
if (16 % sizeof(DataT) == 0 && bytesA % 16 == 0 && bytesB % 16 == 0) {
fusedL2ExpKnnImpl<DataT, AccT, OutT, IdxT, 16 / sizeof(DataT), usePrevTopKs, isRowMajor>(
x,
y,
xn,
yn,
m,
n,
k,
lda,
ldb,
ldd,
sqrt,
out_dists,
out_inds,
numOfNN,
stream,
workspace,
worksize);
} else if (8 % sizeof(DataT) == 0 && bytesA % 8 == 0 && bytesB % 8 == 0) {
fusedL2ExpKnnImpl<DataT, AccT, OutT, IdxT, 8 / sizeof(DataT), usePrevTopKs, isRowMajor>(
x,
y,
xn,
yn,
m,
n,
k,
lda,
ldb,
ldd,
sqrt,
out_dists,
out_inds,
numOfNN,
stream,
workspace,
worksize);
} else {
fusedL2ExpKnnImpl<DataT, AccT, OutT, IdxT, 1, usePrevTopKs, isRowMajor>(x,
y,
xn,
yn,
m,
n,
k,
lda,
ldb,
ldd,
sqrt,
out_dists,
out_inds,
numOfNN,
stream,
workspace,
worksize);
}
}
/**
* Compute the k-nearest neighbors using L2 expanded/unexpanded distance.
* @tparam value_idx
* @tparam value_t
* @param[out] out_inds output indices array on device (size n_query_rows * k)
* @param[out] out_dists output dists array on device (size n_query_rows * k)
* @param[in] index input index array on device (size n_index_rows * D)
* @param[in] query input query array on device (size n_query_rows * D)
* @param[in] n_index_rows number of rows in index array
* @param[in] n_query_rows number of rows in query array
* @param[in] k number of closest neighbors to return
* @param[in] rowMajorIndex are the index arrays in row-major layout?
* @param[in] rowMajorQuery are the query array in row-major layout?
* @param[in] stream stream to order kernel launch
*/
template <typename value_idx, typename value_t, bool usePrevTopKs = false>
void fusedL2Knn(size_t D,
value_idx* out_inds,
value_t* out_dists,
const value_t* index,
const value_t* query,
size_t n_index_rows,
size_t n_query_rows,
int k,
bool rowMajorIndex,
bool rowMajorQuery,
cudaStream_t stream,
cuvs::distance::DistanceType metric,
const value_t* index_norms = NULL,
const value_t* query_norms = NULL)
{
// Validate the input data
ASSERT(k > 0, "l2Knn: k must be > 0");
ASSERT(D > 0, "l2Knn: D must be > 0");
ASSERT(n_index_rows > 0, "l2Knn: n_index_rows must be > 0");
ASSERT(index, "l2Knn: index must be provided (passed null)");
ASSERT(n_query_rows > 0, "l2Knn: n_query_rows must be > 0");
ASSERT(query, "l2Knn: query must be provided (passed null)");
ASSERT(out_dists, "l2Knn: out_dists must be provided (passed null)");
ASSERT(out_inds, "l2Knn: out_inds must be provided (passed null)");
// Currently we only support same layout for x & y inputs.
ASSERT(rowMajorIndex == rowMajorQuery,
"l2Knn: rowMajorIndex and rowMajorQuery should have same layout");
// TODO: Add support for column major layout
ASSERT(rowMajorIndex == true, "l2Knn: only rowMajor inputs are supported for now.");
// Even for L2 Sqrt distance case we use non-sqrt version as FAISS bfKNN only support
// non-sqrt metric & some tests in RAFT/cuML (like Linkage) fails if we use L2 sqrt.
constexpr bool sqrt = false;
size_t worksize = 0, tempWorksize = 0;
rmm::device_uvector<char> workspace(worksize, stream);
value_idx lda = D, ldb = D, ldd = n_index_rows;
switch (metric) {
case cuvs::distance::DistanceType::L2SqrtExpanded:
case cuvs::distance::DistanceType::L2Expanded:
tempWorksize = cuvs::distance::detail::
getWorkspaceSize<cuvs::distance::DistanceType::L2Expanded, float, float, float, value_idx>(
query, index, n_query_rows, n_index_rows, D);
worksize = tempWorksize;
workspace.resize(worksize, stream);
fusedL2ExpKnn<value_t, value_t, value_t, value_idx, usePrevTopKs, true>(n_query_rows,
n_index_rows,
D,
lda,
ldb,
ldd,
query,
index,
query_norms,
index_norms,
sqrt,
out_dists,
out_inds,
k,
stream,
workspace.data(),
worksize);
if (worksize > tempWorksize) {
workspace.resize(worksize, stream);
fusedL2ExpKnn<value_t, value_t, value_t, value_idx, usePrevTopKs, true>(n_query_rows,
n_index_rows,
D,
lda,
ldb,
ldd,
query,
index,
query_norms,
index_norms,
sqrt,
out_dists,
out_inds,
k,
stream,
workspace.data(),
worksize);
}
break;
case cuvs::distance::DistanceType::L2Unexpanded:
case cuvs::distance::DistanceType::L2SqrtUnexpanded:
fusedL2UnexpKnn<value_t, value_t, value_t, value_idx, usePrevTopKs, true>(n_query_rows,
n_index_rows,
D,
lda,
ldb,
ldd,
query,
index,
sqrt,
out_dists,
out_inds,
k,
stream,
workspace.data(),
worksize);
if (worksize) {
workspace.resize(worksize, stream);
fusedL2UnexpKnn<value_t, value_t, value_t, value_idx, usePrevTopKs, true>(n_query_rows,
n_index_rows,
D,
lda,
ldb,
ldd,
query,
index,
sqrt,
out_dists,
out_inds,
k,
stream,
workspace.data(),
worksize);
}
break;
default: printf("only L2 distance metric is supported\n"); break;
};
}
} // namespace detail
} // namespace knn
} // namespace spatial
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail/fused_l2_knn.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "fused_l2_knn-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "fused_l2_knn-ext.cuh"
#endif
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.