repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/run/conf | rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/run/conf/algos/hnswlib.yaml | name: hnswlib
constraints:
search: raft-ann-bench.constraints.hnswlib_search_constraints
groups:
base:
build:
M: [12, 16, 24, 36]
efConstruction: [64, 128, 256, 512]
search:
ef: [10, 20, 40, 60, 80, 120, 200, 400, 600, 800]
| 0 |
rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/run/conf | rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/run/conf/algos/raft_ivf_pq.yaml | name: raft_ivf_pq
constraints:
build: raft-ann-bench.constraints.raft_ivf_pq_build_constraints
search: raft-ann-bench.constraints.raft_ivf_pq_search_constraints
groups:
base:
build:
nlist: [1024, 2048, 4096, 8192]
pq_dim: [64, 32]
pq_bits: [8, 6, 5, 4]
ratio: [10, 25]
niter: [25]
search:
nprobe: [1, 5, 10, 50, 100, 200]
internalDistanceDtype: ["float"]
smemLutDtype: ["float", "fp8", "half"]
refine_ratio: [1, 2, 4] | 0 |
rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/run/conf | rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/run/conf/algos/faiss_gpu_ivf_pq.yaml | name: faiss_gpu_ivf_pq
groups:
base:
build:
nlist: [1024, 2048, 4096, 8192]
M: [8, 16]
ratio: [10, 25]
usePrecomputed: [False]
useFloat16: [False]
search:
nprobe: [1, 5, 10, 50, 100, 200]
refine_ratio: [1] | 0 |
rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/run/conf | rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/run/conf/algos/raft_cagra.yaml | name: raft_cagra
constraints:
search: raft-ann-bench.constraints.raft_cagra_search_constraints
groups:
base:
build:
graph_degree: [32, 64, 128, 256]
intermediate_graph_degree: [32, 64, 96, 128]
graph_build_algo: ["NN_DESCENT"]
search:
itopk: [32, 64, 128, 256, 512]
search_width: [1, 2, 4, 8, 16, 32, 64]
| 0 |
rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/run/conf | rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/run/conf/algos/raft_cagra_hnswlib.yaml | name: raft_cagra_hnswlib
constraints:
search: raft-ann-bench.constraints.hnswlib_search_constraints
groups:
base:
build:
graph_degree: [32, 64, 128, 256]
intermediate_graph_degree: [32, 64, 96, 128]
graph_build_algo: ["NN_DESCENT"]
search:
ef: [10, 20, 40, 60, 80, 120, 200, 400, 600, 800]
| 0 |
rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench | rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/constraints/__init__.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DTYPE_SIZES = {"float": 4, "half": 2, "fp8": 1}
def raft_ivf_pq_build_constraints(params, dims):
if "pq_dim" in params:
return params["pq_dim"] <= dims
return True
def raft_ivf_pq_search_constraints(params, build_params, k, batch_size):
ret = True
if "internalDistanceDtype" in params and "smemLutDtype" in params:
ret = (
DTYPE_SIZES[params["smemLutDtype"]]
<= DTYPE_SIZES[params["internalDistanceDtype"]]
)
if "nlist" in build_params and "nprobe" in params:
ret = ret and build_params["nlist"] >= params["nprobe"]
return ret
def raft_cagra_search_constraints(params, build_params, k, batch_size):
if "itopk" in params:
return params["itopk"] >= k
def hnswlib_search_constraints(params, build_params, k, batch_size):
if "ef" in params:
return params["ef"] >= k
| 0 |
rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench | rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/split_groundtruth/split_groundtruth.pl | #!/usr/bin/perl
# =============================================================================
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
use warnings;
use strict;
use autodie qw(open close);
@ARGV == 2
or die "usage: $0 input output_prefix\n";
open my $fh, '<:raw', $ARGV[0];
my $raw;
read($fh, $raw, 8);
my ($nrows, $dim) = unpack('LL', $raw);
my $expected_size = 8 + $nrows * $dim * (4 + 4);
my $size = (stat($fh))[7];
$size == $expected_size
or die("error: expected size is $expected_size, but actual size is $size\n");
open my $fh_out1, '>:raw', "$ARGV[1].neighbors.ibin";
open my $fh_out2, '>:raw', "$ARGV[1].distances.fbin";
print {$fh_out1} $raw;
print {$fh_out2} $raw;
read($fh, $raw, $nrows * $dim * 4);
print {$fh_out1} $raw;
read($fh, $raw, $nrows * $dim * 4);
print {$fh_out2} $raw;
| 0 |
rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench | rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/split_groundtruth/__main__.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
import sys
def split_groundtruth(groundtruth_filepath):
ann_bench_scripts_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "split_groundtruth.pl"
)
pwd = os.getcwd()
path_to_groundtruth = os.path.normpath(groundtruth_filepath).split(os.sep)
if len(path_to_groundtruth) > 1:
os.chdir(os.path.join(*path_to_groundtruth[:-1]))
groundtruth_filename = path_to_groundtruth[-1]
subprocess.run(
[ann_bench_scripts_path, groundtruth_filename, "groundtruth"],
check=True,
)
os.chdir(pwd)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--groundtruth",
help="Path to billion-scale dataset groundtruth file",
required=True,
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
split_groundtruth(args.groundtruth)
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench | rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/generate_groundtruth/utils.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
def dtype_from_filename(filename):
ext = os.path.splitext(filename)[1]
if ext == ".fbin":
return np.float32
if ext == ".hbin":
return np.float16
elif ext == ".ibin":
return np.int32
elif ext == ".u8bin":
return np.ubyte
elif ext == ".i8bin":
return np.byte
else:
raise RuntimeError("Not supported file extension" + ext)
def suffix_from_dtype(dtype):
if dtype == np.float32:
return ".fbin"
if dtype == np.float16:
return ".hbin"
elif dtype == np.int32:
return ".ibin"
elif dtype == np.ubyte:
return ".u8bin"
elif dtype == np.byte:
return ".i8bin"
else:
raise RuntimeError("Not supported dtype extension" + dtype)
def memmap_bin_file(
bin_file, dtype, shape=None, mode="r", size_dtype=np.uint32
):
extent_itemsize = np.dtype(size_dtype).itemsize
offset = int(extent_itemsize) * 2
if bin_file is None:
return None
if dtype is None:
dtype = dtype_from_filename(bin_file)
if mode[0] == "r":
a = np.memmap(bin_file, mode=mode, dtype=size_dtype, shape=(2,))
if shape is None:
shape = (a[0], a[1])
else:
shape = tuple(
[
aval if sval is None else sval
for aval, sval in zip(a, shape)
]
)
return np.memmap(
bin_file, mode=mode, dtype=dtype, offset=offset, shape=shape
)
elif mode[0] == "w":
if shape is None:
raise ValueError("Need to specify shape to map file in write mode")
print("creating file", bin_file)
dirname = os.path.dirname(bin_file)
if len(dirname) > 0:
os.makedirs(dirname, exist_ok=True)
a = np.memmap(bin_file, mode=mode, dtype=size_dtype, shape=(2,))
a[0] = shape[0]
a[1] = shape[1]
a.flush()
del a
fp = np.memmap(
bin_file, mode="r+", dtype=dtype, offset=offset, shape=shape
)
return fp
# print('# {}: shape: {}, dtype: {}'.format(bin_file, shape, dtype))
def write_bin(fname, data):
print("writing", fname, data.shape, data.dtype, "...")
with open(fname, "wb") as f:
np.asarray(data.shape, dtype=np.uint32).tofile(f)
data.tofile(f)
| 0 |
rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench | rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/generate_groundtruth/__main__.py | #!/usr/bin/env python
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import sys
import cupy as cp
import numpy as np
import rmm
from pylibraft.common import DeviceResources
from pylibraft.neighbors.brute_force import knn
from rmm.allocators.cupy import rmm_cupy_allocator
from .utils import memmap_bin_file, suffix_from_dtype, write_bin
def generate_random_queries(n_queries, n_features, dtype=np.float32):
print("Generating random queries")
if np.issubdtype(dtype, np.integer):
queries = cp.random.randint(
0, 255, size=(n_queries, n_features), dtype=dtype
)
else:
queries = cp.random.uniform(size=(n_queries, n_features)).astype(dtype)
return queries
def choose_random_queries(dataset, n_queries):
print("Choosing random vector from dataset as query vectors")
query_idx = np.random.choice(
dataset.shape[0], size=(n_queries,), replace=False
)
return dataset[query_idx, :]
def calc_truth(dataset, queries, k, metric="sqeuclidean"):
handle = DeviceResources()
n_samples = dataset.shape[0]
n = 500000 # batch size for processing neighbors
i = 0
indices = None
distances = None
queries = cp.asarray(queries, dtype=cp.float32)
while i < n_samples:
print("Step {0}/{1}:".format(i // n, n_samples // n))
n_batch = n if i + n <= n_samples else n_samples - i
X = cp.asarray(dataset[i : i + n_batch, :], cp.float32)
D, Ind = knn(
X,
queries,
k,
metric=metric,
handle=handle,
global_id_offset=i, # shift neighbor index by offset i
)
handle.sync()
D, Ind = cp.asarray(D), cp.asarray(Ind)
if distances is None:
distances = D
indices = Ind
else:
distances = cp.concatenate([distances, D], axis=1)
indices = cp.concatenate([indices, Ind], axis=1)
idx = cp.argsort(distances, axis=1)[:, :k]
distances = cp.take_along_axis(distances, idx, axis=1)
indices = cp.take_along_axis(indices, idx, axis=1)
i += n_batch
return distances, indices
def main():
pool = rmm.mr.PoolMemoryResource(
rmm.mr.CudaMemoryResource(), initial_pool_size=2**30
)
rmm.mr.set_current_device_resource(pool)
cp.cuda.set_allocator(rmm_cupy_allocator)
parser = argparse.ArgumentParser(
prog="generate_groundtruth",
description="Generate true neighbors using exact NN search. "
"The input and output files are in big-ann-benchmark's binary format.",
epilog="""Example usage
# With existing query file
python -m raft-ann-bench.generate_groundtruth --dataset /dataset/base.\
fbin --output=groundtruth_dir --queries=/dataset/query.public.10K.fbin
# With randomly generated queries
python -m raft-ann-bench.generate_groundtruth --dataset /dataset/base.\
fbin --output=groundtruth_dir --queries=random --n_queries=10000
# Using only a subset of the dataset. Define queries by randomly
# selecting vectors from the (subset of the) dataset.
python -m raft-ann-bench.generate_groundtruth --dataset /dataset/base.\
fbin --nrows=2000000 --cols=128 --output=groundtruth_dir \
--queries=random-choice --n_queries=10000
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("dataset", type=str, help="input dataset file name")
parser.add_argument(
"--queries",
type=str,
default="random",
help="Queries file name, or one of 'random-choice' or 'random' "
"(default). 'random-choice': select n_queries vectors from the input "
"dataset. 'random': generate n_queries as uniform random numbers.",
)
parser.add_argument(
"--output",
type=str,
default="",
help="output directory name (default current dir)",
)
parser.add_argument(
"--n_queries",
type=int,
default=10000,
help="Number of quries to generate (if no query file is given). "
"Default: 10000.",
)
parser.add_argument(
"-N",
"--rows",
default=None,
type=int,
help="use only first N rows from dataset, by default the whole "
"dataset is used",
)
parser.add_argument(
"-D",
"--cols",
default=None,
type=int,
help="number of features (dataset columns). "
"Default: read from dataset file.",
)
parser.add_argument(
"--dtype",
type=str,
help="Dataset dtype. When not specified, then derived from extension."
" Supported types: 'float32', 'float16', 'uint8', 'int8'",
)
parser.add_argument(
"-k",
type=int,
default=100,
help="Number of neighbors (per query) to calculate",
)
parser.add_argument(
"--metric",
type=str,
default="sqeuclidean",
help="Metric to use while calculating distances. Valid metrics are "
"those that are accepted by pylibraft.neighbors.brute_force.knn. Most"
" commonly used with RAFT ANN are 'sqeuclidean' and 'inner_product'",
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.rows is not None:
print("Reading subset of the data, nrows=", args.rows)
else:
print("Reading whole dataset")
# Load input data
dataset = memmap_bin_file(
args.dataset, args.dtype, shape=(args.rows, args.cols)
)
n_features = dataset.shape[1]
dtype = dataset.dtype
print(
"Dataset size {:6.1f} GB, shape {}, dtype {}".format(
dataset.size * dataset.dtype.itemsize / 1e9,
dataset.shape,
np.dtype(dtype),
)
)
if len(args.output) > 0:
os.makedirs(args.output, exist_ok=True)
if args.queries == "random" or args.queries == "random-choice":
if args.n_queries is None:
raise RuntimeError(
"n_queries must be given to generate random queries"
)
if args.queries == "random":
queries = generate_random_queries(
args.n_queries, n_features, dtype
)
elif args.queries == "random-choice":
queries = choose_random_queries(dataset, args.n_queries)
queries_filename = os.path.join(
args.output, "queries" + suffix_from_dtype(dtype)
)
print("Writing queries file", queries_filename)
write_bin(queries_filename, queries)
else:
print("Reading queries from file", args.queries)
queries = memmap_bin_file(args.queries, dtype)
print("Calculating true nearest neighbors")
distances, indices = calc_truth(dataset, queries, args.k, args.metric)
write_bin(
os.path.join(args.output, "groundtruth.neighbors.ibin"),
indices.astype(np.uint32),
)
write_bin(
os.path.join(args.output, "groundtruth.distances.fbin"),
distances.astype(np.float32),
)
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench | rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/get_dataset/hdf5_to_fbin.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import h5py
import numpy as np
def normalize(x):
norm = np.linalg.norm(x, axis=1)
return (x.T / norm).T
def write_bin(fname, data):
with open(fname, "wb") as f:
np.asarray(data.shape, dtype=np.uint32).tofile(f)
data.tofile(f)
if __name__ == "__main__":
if len(sys.argv) != 2 and len(sys.argv) != 3:
print(
"usage: %s [-n] <input>.hdf5\n" % (sys.argv[0]),
" -n: normalize base/query set\n",
"outputs: <input>.base.fbin\n",
" <input>.query.fbin\n",
" <input>.groundtruth.neighbors.ibin\n",
" <input>.groundtruth.distances.fbin",
file=sys.stderr,
)
sys.exit(-1)
need_normalize = False
if len(sys.argv) == 3:
assert sys.argv[1] == "-n"
need_normalize = True
fname_prefix = sys.argv[-1]
assert fname_prefix.endswith(".hdf5")
fname_prefix = fname_prefix[:-5]
hdf5 = h5py.File(sys.argv[-1], "r")
assert (
hdf5.attrs["distance"] == "angular"
or hdf5.attrs["distance"] == "euclidean"
)
assert hdf5["train"].dtype == np.float32
assert hdf5["test"].dtype == np.float32
assert hdf5["neighbors"].dtype == np.int32
assert hdf5["distances"].dtype == np.float32
base = hdf5["train"][:]
query = hdf5["test"][:]
if need_normalize:
base = normalize(base)
query = normalize(query)
elif hdf5.attrs["distance"] == "angular":
print(
"warning: input has angular distance, ",
"specify -n to normalize base/query set!\n",
)
output_fname = fname_prefix + ".base.fbin"
print("writing", output_fname, "...")
write_bin(output_fname, base)
output_fname = fname_prefix + ".query.fbin"
print("writing", output_fname, "...")
write_bin(output_fname, query)
output_fname = fname_prefix + ".groundtruth.neighbors.ibin"
print("writing", output_fname, "...")
write_bin(output_fname, hdf5["neighbors"][:])
output_fname = fname_prefix + ".groundtruth.distances.fbin"
print("writing", output_fname, "...")
write_bin(output_fname, hdf5["distances"][:])
| 0 |
rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench | rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/get_dataset/fbin_to_f16bin.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import sys
import numpy as np
def read_fbin(fname):
shape = np.fromfile(fname, dtype=np.uint32, count=2)
if float(shape[0]) * shape[1] * 4 > 2000000000:
data = np.memmap(fname, dtype=np.float32, offset=8, mode="r").reshape(
shape
)
else:
data = np.fromfile(fname, dtype=np.float32, offset=8).reshape(shape)
return data
def write_bin(fname, data):
with open(fname, "wb") as f:
np.asarray(data.shape, dtype=np.uint32).tofile(f)
data.tofile(f)
if len(sys.argv) != 3:
print(
"usage: %s input.fbin output.f16bin" % (sys.argv[0]),
file=sys.stderr,
)
sys.exit(-1)
data = read_fbin(sys.argv[1]).astype(np.float16)
write_bin(sys.argv[2], data)
| 0 |
rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench | rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/get_dataset/__main__.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
import sys
from urllib.request import urlretrieve
def get_dataset_path(name, ann_bench_data_path):
if not os.path.exists(ann_bench_data_path):
os.mkdir(ann_bench_data_path)
return os.path.join(ann_bench_data_path, f"{name}.hdf5")
def download_dataset(url, path):
if not os.path.exists(path):
print(f"downloading {url} -> {path}...")
urlretrieve(url, path)
def convert_hdf5_to_fbin(path, normalize):
scripts_path = os.path.dirname(os.path.realpath(__file__))
ann_bench_scripts_path = os.path.join(scripts_path, "hdf5_to_fbin.py")
print(f"calling script {ann_bench_scripts_path}")
if normalize and "angular" in path:
subprocess.run(
["python", ann_bench_scripts_path, "-n", "%s" % path], check=True
)
else:
subprocess.run(
["python", ann_bench_scripts_path, "%s" % path], check=True
)
def move(name, ann_bench_data_path):
if "angular" in name:
new_name = name.replace("angular", "inner")
else:
new_name = name
new_path = os.path.join(ann_bench_data_path, new_name)
if not os.path.exists(new_path):
os.mkdir(new_path)
for bin_name in [
"base.fbin",
"query.fbin",
"groundtruth.neighbors.ibin",
"groundtruth.distances.fbin",
]:
os.rename(
f"{ann_bench_data_path}/{name}.{bin_name}",
f"{new_path}/{bin_name}",
)
def download(name, normalize, ann_bench_data_path):
path = get_dataset_path(name, ann_bench_data_path)
try:
url = f"http://ann-benchmarks.com/{name}.hdf5"
download_dataset(url, path)
convert_hdf5_to_fbin(path, normalize)
move(name, ann_bench_data_path)
except Exception:
print(f"Cannot download {url}")
raise
def main():
call_path = os.getcwd()
if "RAPIDS_DATASET_ROOT_DIR" in os.environ:
default_dataset_path = os.getenv("RAPIDS_DATASET_ROOT_DIR")
else:
default_dataset_path = os.path.join(call_path, "datasets/")
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--dataset", help="dataset to download", default="glove-100-angular"
)
parser.add_argument(
"--dataset-path",
help="path to download dataset",
default=default_dataset_path,
)
parser.add_argument(
"--normalize",
help="normalize cosine distance to inner product",
action="store_true",
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
download(args.dataset, args.normalize, args.dataset_path)
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench | rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/data_export/__main__.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import sys
import traceback
import warnings
import pandas as pd
skip_build_cols = set(
[
"algo_name",
"index_name",
"time",
"name",
"family_index",
"per_family_instance_index",
"run_name",
"run_type",
"repetitions",
"repetition_index",
"iterations",
"real_time",
"time_unit",
"index_size",
]
)
skip_search_cols = (
set(["recall", "qps", "latency", "items_per_second", "Recall", "Latency"])
| skip_build_cols
)
metrics = {
"k-nn": {
"description": "Recall",
"worst": float("-inf"),
"lim": [0.0, 1.03],
},
"throughput": {
"description": "Queries per second (1/s)",
"worst": float("-inf"),
},
"latency": {
"description": "Search Latency (s)",
"worst": float("inf"),
},
}
def read_file(dataset, dataset_path, method):
dir = os.path.join(dataset_path, dataset, "result", method)
for file in os.listdir(dir):
if file.endswith(".json"):
with open(os.path.join(dir, file), "r") as f:
try:
data = json.load(f)
df = pd.DataFrame(data["benchmarks"])
yield os.path.join(dir, file), file.split("-")[0], df
except Exception as e:
print(
"An error occurred processing file %s (%s). "
"Skipping..." % (file, e)
)
def convert_json_to_csv_build(dataset, dataset_path):
for file, algo_name, df in read_file(dataset, dataset_path, "build"):
try:
algo_name = algo_name.replace("_base", "")
df["name"] = df["name"].str.split("/").str[0]
write = pd.DataFrame(
{
"algo_name": [algo_name] * len(df),
"index_name": df["name"],
"time": df["real_time"],
}
)
for name in df:
if name not in skip_build_cols:
write[name] = df[name]
filepath = os.path.normpath(file).split(os.sep)
filename = filepath[-1].split("-")[0] + ".csv"
write.to_csv(
os.path.join(f"{os.sep}".join(filepath[:-1]), filename),
index=False,
)
except Exception as e:
print(
"An error occurred processing file %s (%s). Skipping..."
% (file, e)
)
traceback.print_exc()
def create_pointset(data, xn, yn):
xm, ym = (metrics[xn], metrics[yn])
rev_y = -1 if ym["worst"] < 0 else 1
rev_x = -1 if xm["worst"] < 0 else 1
y_idx = 3 if yn == "throughput" else 4
data.sort(key=lambda t: (rev_y * t[y_idx], rev_x * t[2]))
lines = []
last_x = xm["worst"]
comparator = (
(lambda xv, lx: xv > lx) if last_x < 0 else (lambda xv, lx: xv < lx)
)
for d in data:
if comparator(d[2], last_x):
last_x = d[2]
lines.append(d)
return lines
def get_frontier(df, metric):
lines = create_pointset(df.values.tolist(), "k-nn", metric)
return pd.DataFrame(lines, columns=df.columns)
def convert_json_to_csv_search(dataset, dataset_path):
for file, algo_name, df in read_file(dataset, dataset_path, "search"):
try:
build_file = os.path.join(
dataset_path, dataset, "result", "build", f"{algo_name}.csv"
)
algo_name = algo_name.replace("_base", "")
df["name"] = df["name"].str.split("/").str[0]
try:
write = pd.DataFrame(
{
"algo_name": [algo_name] * len(df),
"index_name": df["name"],
"recall": df["Recall"],
"throughput": df["items_per_second"],
"latency": df["Latency"],
}
)
except Exception as e:
print(
"Search file %s (%s) missing a key. Skipping..."
% (file, e)
)
for name in df:
if name not in skip_search_cols:
write[name] = df[name]
if os.path.exists(build_file):
build_df = pd.read_csv(build_file)
write_ncols = len(write.columns)
write["build time"] = None
write["build threads"] = None
write["build cpu_time"] = None
write["build GPU"] = None
try:
for col_idx in range(6, len(build_df.columns)):
col_name = build_df.columns[col_idx]
write[col_name] = None
for s_index, search_row in write.iterrows():
for b_index, build_row in build_df.iterrows():
if (
search_row["index_name"]
== build_row["index_name"]
):
write.iloc[
s_index, write_ncols
] = build_df.iloc[b_index, 2]
write.iloc[
s_index, write_ncols + 1 :
] = build_df.iloc[b_index, 3:]
break
except Exception as e:
print(
"Build file %s (%s) missing a key. Skipping..."
% (build_file, e)
)
else:
warnings.warn(
f"Build CSV not found for {algo_name}, "
f"build params won't be "
"appended in the Search CSV"
)
write.to_csv(file.replace(".json", "_raw.csv"), index=False)
throughput = get_frontier(write, "throughput")
throughput.to_csv(
file.replace(".json", "_throughput.csv"), index=False
)
latency = get_frontier(write, "latency")
latency.to_csv(file.replace(".json", "_latency.csv"), index=False)
except Exception as e:
print(
"An error occurred processing file %s (%s). Skipping..."
% (file, e)
)
traceback.print_exc()
def main():
call_path = os.getcwd()
if "RAPIDS_DATASET_ROOT_DIR" in os.environ:
default_dataset_path = os.getenv("RAPIDS_DATASET_ROOT_DIR")
else:
default_dataset_path = os.path.join(call_path, "datasets/")
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--dataset", help="dataset to download", default="glove-100-inner"
)
parser.add_argument(
"--dataset-path",
help="path to dataset folder",
default=default_dataset_path,
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
convert_json_to_csv_build(args.dataset, args.dataset_path)
convert_json_to_csv_search(args.dataset, args.dataset_path)
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench | rapidsai_public_repos/raft/python/raft-ann-bench/src/raft-ann-bench/plot/__main__.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is inspired by
# 1: https://github.com/erikbern/ann-benchmarks/blob/main/plot.py
# 2: https://github.com/erikbern/ann-benchmarks/blob/main/ann_benchmarks/plotting/utils.py # noqa: E501
# 3: https://github.com/erikbern/ann-benchmarks/blob/main/ann_benchmarks/plotting/metrics.py # noqa: E501
# Licence: https://github.com/erikbern/ann-benchmarks/blob/main/LICENSE
import argparse
import itertools
import os
import sys
from collections import OrderedDict
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
mpl.use("Agg")
metrics = {
"k-nn": {
"description": "Recall",
"worst": float("-inf"),
"lim": [0.0, 1.03],
},
"throughput": {
"description": "Queries per second (1/s)",
"worst": float("-inf"),
},
"latency": {
"description": "Search Latency (s)",
"worst": float("inf"),
},
}
def positive_int(input_str: str) -> int:
try:
i = int(input_str)
if i < 1:
raise ValueError
except ValueError:
raise argparse.ArgumentTypeError(
f"{input_str} is not a positive integer"
)
return i
def generate_n_colors(n):
vs = np.linspace(0.3, 0.9, 7)
colors = [(0.9, 0.4, 0.4, 1.0)]
def euclidean(a, b):
return sum((x - y) ** 2 for x, y in zip(a, b))
while len(colors) < n:
new_color = max(
itertools.product(vs, vs, vs),
key=lambda a: min(euclidean(a, b) for b in colors),
)
colors.append(new_color + (1.0,))
return colors
def create_linestyles(unique_algorithms):
colors = dict(
zip(unique_algorithms, generate_n_colors(len(unique_algorithms)))
)
linestyles = dict(
(algo, ["--", "-.", "-", ":"][i % 4])
for i, algo in enumerate(unique_algorithms)
)
markerstyles = dict(
(algo, ["+", "<", "o", "*", "x"][i % 5])
for i, algo in enumerate(unique_algorithms)
)
faded = dict(
(algo, (r, g, b, 0.3)) for algo, (r, g, b, a) in colors.items()
)
return dict(
(
algo,
(colors[algo], faded[algo], linestyles[algo], markerstyles[algo]),
)
for algo in unique_algorithms
)
def create_plot_search(
all_data,
x_scale,
y_scale,
fn_out,
linestyles,
dataset,
k,
batch_size,
mode,
time_unit,
):
xn = "k-nn"
xm, ym = (metrics[xn], metrics[mode])
# Now generate each plot
handles = []
labels = []
plt.figure(figsize=(12, 9))
# Sorting by mean y-value helps aligning plots with labels
def mean_y(algo):
points = np.array(all_data[algo], dtype=object)
return -np.log(np.array(points[:, 3], dtype=np.float32)).mean()
# Find range for logit x-scale
min_x, max_x = 1, 0
for algo in sorted(all_data.keys(), key=mean_y):
points = np.array(all_data[algo], dtype=object)
xs = points[:, 2]
ys = points[:, 3]
min_x = min([min_x] + [x for x in xs if x > 0])
max_x = max([max_x] + [x for x in xs if x < 1])
color, faded, linestyle, marker = linestyles[algo]
(handle,) = plt.plot(
xs,
ys,
"-",
label=algo,
color=color,
ms=7,
mew=3,
lw=3,
marker=marker,
)
handles.append(handle)
labels.append(algo)
ax = plt.gca()
y_description = ym["description"]
if mode == "latency":
y_description = y_description.replace("(s)", f"({time_unit})")
ax.set_ylabel(y_description)
ax.set_xlabel("Recall")
# Custom scales of the type --x-scale a3
if x_scale[0] == "a":
alpha = float(x_scale[1:])
def fun(x):
return 1 - (1 - x) ** (1 / alpha)
def inv_fun(x):
return 1 - (1 - x) ** alpha
ax.set_xscale("function", functions=(fun, inv_fun))
if alpha <= 3:
ticks = [inv_fun(x) for x in np.arange(0, 1.2, 0.2)]
plt.xticks(ticks)
if alpha > 3:
from matplotlib import ticker
ax.xaxis.set_major_formatter(ticker.LogitFormatter())
# plt.xticks(ticker.LogitLocator().tick_values(min_x, max_x))
plt.xticks([0, 1 / 2, 1 - 1e-1, 1 - 1e-2, 1 - 1e-3, 1 - 1e-4, 1])
# Other x-scales
else:
ax.set_xscale(x_scale)
ax.set_yscale(y_scale)
ax.set_title(f"{dataset} k={k} batch_size={batch_size}")
plt.gca().get_position()
# plt.gca().set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(
handles,
labels,
loc="center left",
bbox_to_anchor=(1, 0.5),
prop={"size": 9},
)
plt.grid(visible=True, which="major", color="0.65", linestyle="-")
plt.setp(ax.get_xminorticklabels(), visible=True)
# Logit scale has to be a subset of (0,1)
if "lim" in xm and x_scale != "logit":
x0, x1 = xm["lim"]
plt.xlim(max(x0, 0), min(x1, 1))
elif x_scale == "logit":
plt.xlim(min_x, max_x)
if "lim" in ym:
plt.ylim(ym["lim"])
# Workaround for bug https://github.com/matplotlib/matplotlib/issues/6789
ax.spines["bottom"]._adjust_location()
print(f"writing search output to {fn_out}")
plt.savefig(fn_out, bbox_inches="tight")
plt.close()
def create_plot_build(
build_results, search_results, linestyles, fn_out, dataset
):
qps_85 = [-1] * len(linestyles)
bt_85 = [0] * len(linestyles)
i_85 = [-1] * len(linestyles)
qps_90 = [-1] * len(linestyles)
bt_90 = [0] * len(linestyles)
i_90 = [-1] * len(linestyles)
qps_95 = [-1] * len(linestyles)
bt_95 = [0] * len(linestyles)
i_95 = [-1] * len(linestyles)
data = OrderedDict()
colors = OrderedDict()
# Sorting by mean y-value helps aligning plots with labels
def mean_y(algo):
points = np.array(search_results[algo], dtype=object)
return -np.log(np.array(points[:, 3], dtype=np.float32)).mean()
for pos, algo in enumerate(sorted(search_results.keys(), key=mean_y)):
points = np.array(search_results[algo], dtype=object)
xs = points[:, 2]
ys = points[:, 3]
ls = points[:, 0]
idxs = points[:, 1]
# x is recall, y is qps, ls is algo_name, idxs is index_name
for i in range(len(xs)):
if xs[i] >= 0.85 and xs[i] < 0.9 and ys[i] > qps_85[pos]:
qps_85[pos] = ys[i]
bt_85[pos] = build_results[(ls[i], idxs[i])][0][2]
i_85[pos] = idxs[i]
elif xs[i] >= 0.9 and xs[i] < 0.95 and ys[i] > qps_90[pos]:
qps_90[pos] = ys[i]
bt_90[pos] = build_results[(ls[i], idxs[i])][0][2]
i_90[pos] = idxs[i]
elif xs[i] >= 0.95 and ys[i] > qps_95[pos]:
qps_95[pos] = ys[i]
bt_95[pos] = build_results[(ls[i], idxs[i])][0][2]
i_95[pos] = idxs[i]
data[algo] = [bt_85[pos], bt_90[pos], bt_95[pos]]
colors[algo] = linestyles[algo][0]
index = ["@85% Recall", "@90% Recall", "@95% Recall"]
df = pd.DataFrame(data, index=index)
plt.figure(figsize=(12, 9))
ax = df.plot.bar(rot=0, color=colors)
fig = ax.get_figure()
print(f"writing build output to {fn_out}")
plt.title("Build Time for Highest QPS")
plt.suptitle(f"{dataset}")
plt.ylabel("Build Time (s)")
fig.savefig(fn_out)
def load_lines(results_path, result_files, method, index_key, mode, time_unit):
results = dict()
for result_filename in result_files:
try:
with open(os.path.join(results_path, result_filename), "r") as f:
lines = f.readlines()
lines = lines[:-1] if lines[-1] == "\n" else lines
if method == "build":
key_idx = [2]
elif method == "search":
y_idx = 3 if mode == "throughput" else 4
key_idx = [2, y_idx]
for line in lines[1:]:
split_lines = line.split(",")
algo_name = split_lines[0]
index_name = split_lines[1]
if index_key == "algo":
dict_key = algo_name
elif index_key == "index":
dict_key = (algo_name, index_name)
if dict_key not in results:
results[dict_key] = []
to_add = [algo_name, index_name]
for key_i in key_idx:
to_add.append(float(split_lines[key_i]))
if (
mode == "latency"
and time_unit != "s"
and method == "search"
):
to_add[-1] = (
to_add[-1] * (10**3)
if time_unit == "ms"
else to_add[-1] * (10**6)
)
results[dict_key].append(to_add)
except Exception:
print(
f"An error occurred processing file {result_filename}. "
"Skipping..."
)
return results
def load_all_results(
dataset_path,
algorithms,
groups,
algo_groups,
k,
batch_size,
method,
index_key,
raw,
mode,
time_unit,
):
results_path = os.path.join(dataset_path, "result", method)
result_files = os.listdir(results_path)
if method == "build":
result_files = [
result_file
for result_file in result_files
if ".csv" in result_file
]
elif method == "search":
if raw:
suffix = "_raw"
else:
suffix = f"_{mode}"
result_files = [
result_file
for result_file in result_files
if f"{suffix}.csv" in result_file
]
if len(result_files) == 0:
raise FileNotFoundError(f"No CSV result files found in {results_path}")
if method == "search":
result_files = [
result_filename
for result_filename in result_files
if f"{k}-{batch_size}" in result_filename
]
algo_group_files = [
result_filename.split("-")[0] for result_filename in result_files
]
else:
algo_group_files = [
result_filename for result_filename in result_files
]
for i in range(len(algo_group_files)):
algo_group = algo_group_files[i].replace(".csv", "").split("_")
algo_group_files[i] = ("_".join(algo_group[:-1]), algo_group[-1])
algo_group_files = list(zip(*algo_group_files))
if len(algorithms) > 0:
final_results = [
result_files[i]
for i in range(len(result_files))
if (algo_group_files[0][i] in algorithms)
and (algo_group_files[1][i] in groups)
]
else:
final_results = [
result_files[i]
for i in range(len(result_files))
if (algo_group_files[1][i] in groups)
]
if len(algo_groups) > 0:
split_algo_groups = [
algo_group.split(".") for algo_group in algo_groups
]
split_algo_groups = list(zip(*split_algo_groups))
final_algo_groups = [
result_files[i]
for i in range(len(result_files))
if (algo_group_files[0][i] in split_algo_groups[0])
and (algo_group_files[1][i] in split_algo_groups[1])
]
final_results = final_results + final_algo_groups
final_results = set(final_results)
results = load_lines(
results_path, final_results, method, index_key, mode, time_unit
)
return results
def main():
call_path = os.getcwd()
if "RAPIDS_DATASET_ROOT_DIR" in os.environ:
default_dataset_path = os.getenv("RAPIDS_DATASET_ROOT_DIR")
else:
default_dataset_path = os.path.join(call_path, "datasets/")
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--dataset", help="dataset to plot", default="glove-100-inner"
)
parser.add_argument(
"--dataset-path",
help="path to dataset folder",
default=default_dataset_path,
)
parser.add_argument(
"--output-filepath",
help="directory for PNG to be saved",
default=os.getcwd(),
)
parser.add_argument(
"--algorithms",
help="plot only comma separated list of named \
algorithms. If parameters `groups` and `algo-groups \
are both undefined, then group `base` is plot by default",
default=None,
)
parser.add_argument(
"--groups",
help="plot only comma separated groups of parameters",
default="base",
)
parser.add_argument(
"--algo-groups",
"--algo-groups",
help='add comma separated <algorithm>.<group> to plot. \
Example usage: "--algo-groups=raft_cagra.large,hnswlib.large"',
)
parser.add_argument(
"-k",
"--count",
default=10,
type=positive_int,
help="the number of nearest neighbors to search for",
)
parser.add_argument(
"-bs",
"--batch-size",
default=10000,
type=positive_int,
help="number of query vectors to use in each query trial",
)
parser.add_argument("--build", action="store_true")
parser.add_argument("--search", action="store_true")
parser.add_argument(
"--x-scale",
help="Scale to use when drawing the X-axis. \
Typically linear, logit or a2",
default="linear",
)
parser.add_argument(
"--y-scale",
help="Scale to use when drawing the Y-axis",
choices=["linear", "log", "symlog", "logit"],
default="linear",
)
parser.add_argument(
"--mode",
help="search mode whose Pareto frontier is used on the y-axis",
choices=["throughput", "latency"],
default="throughput",
)
parser.add_argument(
"--time-unit",
help="time unit to plot when mode is latency",
choices=["s", "ms", "us"],
default="ms",
)
parser.add_argument(
"--raw",
help="Show raw results (not just Pareto frontier) of mode arg",
action="store_true",
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.algorithms:
algorithms = args.algorithms.split(",")
else:
algorithms = []
groups = args.groups.split(",")
if args.algo_groups:
algo_groups = args.algo_groups.split(",")
else:
algo_groups = []
k = args.count
batch_size = args.batch_size
if not args.build and not args.search:
build = True
search = True
else:
build = args.build
search = args.search
search_output_filepath = os.path.join(
args.output_filepath,
f"search-{args.dataset}-k{k}-batch_size{batch_size}.png",
)
build_output_filepath = os.path.join(
args.output_filepath,
f"build-{args.dataset}.png",
)
search_results = load_all_results(
os.path.join(args.dataset_path, args.dataset),
algorithms,
groups,
algo_groups,
k,
batch_size,
"search",
"algo",
args.raw,
args.mode,
args.time_unit,
)
linestyles = create_linestyles(sorted(search_results.keys()))
if search:
create_plot_search(
search_results,
args.x_scale,
args.y_scale,
search_output_filepath,
linestyles,
args.dataset,
k,
batch_size,
args.mode,
args.time_unit,
)
if build:
build_results = load_all_results(
os.path.join(args.dataset_path, args.dataset),
algorithms,
groups,
algo_groups,
k,
batch_size,
"build",
"index",
args.raw,
args.mode,
args.time_unit,
)
create_plot_build(
build_results,
search_results,
linestyles,
build_output_filepath,
args.dataset,
)
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/raft/python | rapidsai_public_repos/raft/python/pylibraft/setup.cfg | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
[isort]
line_length=79
multi_line_output=3
include_trailing_comma=True
force_grid_wrap=0
combine_as_imports=True
order_by_type=True
known_dask=
dask
distributed
dask_cuda
known_rapids=
nvtext
cudf
cuml
cugraph
dask_cudf
rmm
known_first_party=
raft
pylibraft
default_section=THIRDPARTY
sections=FUTURE,STDLIB,THIRDPARTY,DASK,RAPIDS,FIRSTPARTY,LOCALFOLDER
skip=
thirdparty
.eggs
.git
.hg
.mypy_cache
.tox
.venv
_build
buck-out
build
dist
__init__.py
| 0 |
rapidsai_public_repos/raft/python | rapidsai_public_repos/raft/python/pylibraft/pyproject.toml | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[build-system]
requires = [
"cmake>=3.26.4",
"cuda-python>=11.7.1,<12.0a0",
"cython>=3.0.0",
"ninja",
"rmm==24.2.*",
"scikit-build>=0.13.1",
"setuptools",
"wheel",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
build-backend = "setuptools.build_meta"
[project]
name = "pylibraft"
dynamic = ["version"]
description = "RAFT: Reusable Algorithms Functions and other Tools"
readme = { file = "README.md", content-type = "text/markdown" }
authors = [
{ name = "NVIDIA Corporation" },
]
license = { text = "Apache 2.0" }
requires-python = ">=3.9"
dependencies = [
"cuda-python>=11.7.1,<12.0a0",
"numpy>=1.21",
"rmm==24.2.*",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
classifiers = [
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
]
[project.optional-dependencies]
test = [
"cupy-cuda11x>=12.0.0",
"pytest",
"pytest-cov",
"scikit-learn",
"scipy",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
[project.urls]
Homepage = "https://github.com/rapidsai/raft"
Documentation = "https://docs.rapids.ai/api/raft/stable/"
[tool.setuptools]
license-files = ["LICENSE"]
[tool.setuptools.dynamic]
version = {file = "pylibraft/VERSION"}
[tool.isort]
line_length = 79
multi_line_output = 3
include_trailing_comma = true
force_grid_wrap = 0
combine_as_imports = true
order_by_type = true
known_dask = [
"dask",
"distributed",
"dask_cuda",
]
known_rapids = [
"rmm",
]
known_first_party = [
"pylibraft",
]
default_section = "THIRDPARTY"
sections = [
"FUTURE",
"STDLIB",
"THIRDPARTY",
"DASK",
"RAPIDS",
"FIRSTPARTY",
"LOCALFOLDER",
]
skip = [
"thirdparty",
".eggs",
".git",
".hg",
".mypy_cache",
".tox",
".venv",
"_build",
"buck-out",
"build",
"dist",
"__init__.py",
]
| 0 |
rapidsai_public_repos/raft/python | rapidsai_public_repos/raft/python/pylibraft/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR)
include(../../fetch_rapids.cmake)
set(pylibraft_version 24.02.00)
# We always need CUDA for pylibraft because the raft dependency brings in a header-only cuco
# dependency that enables CUDA unconditionally.
include(rapids-cuda)
rapids_cuda_init_architectures(pylibraft)
project(
pylibraft
VERSION ${pylibraft_version}
LANGUAGES # TODO: Building Python extension modules via the python_extension_module requires the C
# language to be enabled here. The test project that is built in scikit-build to verify
# various linking options for the python library is hardcoded to build with C, so until
# that is fixed we need to keep C.
C CXX CUDA
)
option(FIND_RAFT_CPP "Search for existing RAFT C++ installations before defaulting to local files"
ON
)
option(RAFT_BUILD_WHEELS "Whether this build is generating a Python wheel." OFF)
# If the user requested it we attempt to find RAFT.
if(FIND_RAFT_CPP)
find_package(raft ${pylibraft_version} REQUIRED COMPONENTS compiled)
if(NOT TARGET raft::raft_lib)
message(
FATAL_ERROR
"Building against a preexisting libraft library requires the compiled libraft to have been built!"
)
endif()
else()
set(raft_FOUND OFF)
endif()
include(rapids-cython)
if(NOT raft_FOUND)
set(BUILD_TESTS OFF)
set(BUILD_PRIMS_BENCH OFF)
set(BUILD_ANN_BENCH OFF)
set(RAFT_COMPILE_LIBRARY ON)
set(_exclude_from_all "")
if(RAFT_BUILD_WHEELS)
# Statically link dependencies if building wheels
set(CUDA_STATIC_RUNTIME ON)
# Don't install the raft C++ targets into wheels
set(_exclude_from_all EXCLUDE_FROM_ALL)
endif()
add_subdirectory(../../cpp raft-cpp ${_exclude_from_all})
# When building the C++ libraries from source we must copy libraft.so alongside the
# pairwise_distance and random Cython libraries TODO: when we have a single 'compiled' raft
# library, we shouldn't need this
set(cython_lib_dir pylibraft)
install(TARGETS raft_lib DESTINATION ${cython_lib_dir})
endif()
rapids_cython_init()
add_subdirectory(pylibraft/common)
add_subdirectory(pylibraft/distance)
add_subdirectory(pylibraft/matrix)
add_subdirectory(pylibraft/neighbors)
add_subdirectory(pylibraft/random)
add_subdirectory(pylibraft/cluster)
if(DEFINED cython_lib_dir)
rapids_cython_add_rpath_entries(TARGET raft PATHS "${cython_lib_dir}")
endif()
| 0 |
rapidsai_public_repos/raft/python | rapidsai_public_repos/raft/python/pylibraft/setup.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setuptools import find_packages
from skbuild import setup
def exclude_libcxx_symlink(cmake_manifest):
return list(
filter(
lambda name: not ("include/rapids/libcxx/include" in name),
cmake_manifest,
)
)
packages = find_packages(include=["pylibraft*"])
setup(
# Don't want libcxx getting pulled into wheel builds.
cmake_process_manifest_hook=exclude_libcxx_symlink,
packages=packages,
package_data={key: ["VERSION", "*.pxd"] for key in packages},
zip_safe=False,
)
| 0 |
rapidsai_public_repos/raft/python | rapidsai_public_repos/raft/python/pylibraft/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2020 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos/raft/python | rapidsai_public_repos/raft/python/pylibraft/.coveragerc | # Configuration file for Python coverage tests
[run]
source = pylibraft | 0 |
rapidsai_public_repos/raft/python/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/_version.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib.resources
__version__ = (
importlib.resources.files("pylibraft")
.joinpath("VERSION")
.read_text()
.strip()
)
__git_commit__ = ""
| 0 |
rapidsai_public_repos/raft/python/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/config.py | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
SUPPORTED_OUTPUT_TYPES = ["torch", "cupy", "raft"]
output_as_ = "raft" # By default, return device_ndarray from functions
def set_output_as(output):
"""
Set output format for RAFT functions.
Calling this function will change the output type of RAFT functions.
By default RAFT returns a `pylibraft.common.device_ndarray` for arrays
on GPU memory. Calling `set_output_as` allows you to have RAFT return
arrays as cupy arrays or pytorch tensors instead. You can also have
RAFT convert the output to other frameworks by passing a callable to
do the conversion here.
Notes
-----
Returning arrays in cupy or torch format requires you to install
cupy or torch.
Parameters
----------
output : { "raft", "cupy", "torch" } or callable
The output format to convert to. Can either be a str describing the
framework to convert to, or a callable that accepts a
device_ndarray and returns the converted type.
"""
if output not in SUPPORTED_OUTPUT_TYPES and not callable(output):
raise ValueError("Unsupported output option " % output)
global output_as_
output_as_ = output
| 0 |
rapidsai_public_repos/raft/python/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/__init__.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pylibraft._version import __git_commit__, __version__
| 0 |
rapidsai_public_repos/raft/python/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/VERSION | 24.02.00
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/random/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Set the list of Cython files to build
set(cython_sources rmat_rectangular_generator.pyx)
# TODO: should finally be replaced with 'compiled' library to be more generic, when that is
# available
set(linked_libraries raft::raft raft::compiled)
# Build all of the Cython targets
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}" ASSOCIATED_TARGETS raft MODULE_PREFIX random_
)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/random/rmat_rectangular_generator.pyx | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import numpy as np
from cython.operator cimport dereference as deref
from libc.stdint cimport int64_t, uintptr_t
from pylibraft.common import Handle, cai_wrapper
from pylibraft.common.handle import auto_sync_handle
from libcpp cimport bool
from pylibraft.common.handle cimport device_resources
from pylibraft.random.cpp.rng_state cimport RngState
cdef extern from "raft_runtime/random/rmat_rectangular_generator.hpp" \
namespace "raft::runtime::random" nogil:
cdef void rmat_rectangular_gen(const device_resources &handle,
int* out,
int* out_src,
int* out_dst,
const float* theta,
int r_scale,
int c_scale,
int n_edges,
RngState& r) except +
cdef void rmat_rectangular_gen(const device_resources &handle,
int64_t* out,
int64_t* out_src,
int64_t* out_dst,
const float* theta,
int64_t r_scale,
int64_t c_scale,
int64_t n_edges,
RngState& r) except +
cdef void rmat_rectangular_gen(const device_resources &handle,
int* out,
int* out_src,
int* out_dst,
const double* theta,
int r_scale,
int c_scale,
int n_edges,
RngState& r) except +
cdef void rmat_rectangular_gen(const device_resources &handle,
int64_t* out,
int64_t* out_src,
int64_t* out_dst,
const double* theta,
int64_t r_scale,
int64_t c_scale,
int64_t n_edges,
RngState& r) except +
@auto_sync_handle
def rmat(out, theta, r_scale, c_scale, seed=12345, handle=None):
"""
Generate RMAT adjacency list based on the input distribution.
Parameters
----------
out: CUDA array interface compliant matrix shape (n_edges, 2). This will
contain the src/dst node ids stored consecutively like a pair.
theta: CUDA array interface compliant matrix shape
(max(r_scale, c_scale) * 4) This stores the probability distribution
at each RMAT level
r_scale: log2 of number of source nodes
c_scale: log2 of number of destination nodes
seed: random seed used for reproducibility
{handle_docstring}
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import Handle
>>> from pylibraft.random import rmat
>>> n_edges = 5000
>>> r_scale = 16
>>> c_scale = 14
>>> theta_len = max(r_scale, c_scale) * 4
>>> out = cp.empty((n_edges, 2), dtype=cp.int32)
>>> theta = cp.random.random_sample(theta_len, dtype=cp.float32)
>>> # A single RAFT handle can optionally be reused across
>>> # pylibraft functions.
>>> handle = Handle()
>>> rmat(out, theta, r_scale, c_scale, handle=handle)
>>> # pylibraft functions are often asynchronous so the
>>> # handle needs to be explicitly synchronized
>>> handle.sync()
"""
if theta is None:
raise Exception("'theta' cannot be None!")
if out is None:
raise Exception("'out' cannot be None!")
out_cai = cai_wrapper(out)
theta_cai = cai_wrapper(theta)
n_edges = out_cai.shape[0]
out_ptr = <uintptr_t>out_cai.data
theta_ptr = <uintptr_t>theta_cai.data
out_dt = out_cai.dtype
theta_dt = theta_cai.dtype
cdef RngState *rng = new RngState(seed)
handle = handle if handle is not None else Handle()
cdef device_resources *h = <device_resources*><size_t>handle.getHandle()
if out_dt == np.int32 and theta_dt == np.float32:
rmat_rectangular_gen(deref(h),
<int*> out_ptr,
<int*> NULL,
<int*> NULL,
<float*> theta_ptr,
<int>r_scale,
<int>c_scale,
<int>n_edges,
deref(rng))
elif out_dt == np.int64 and theta_dt == np.float32:
rmat_rectangular_gen(deref(h),
<int64_t*> out_ptr,
<int64_t*> NULL,
<int64_t*> NULL,
<float*> theta_ptr,
<int64_t>r_scale,
<int64_t>c_scale,
<int64_t>n_edges,
deref(rng))
elif out_dt == np.int32 and theta_dt == np.float64:
rmat_rectangular_gen(deref(h),
<int*> out_ptr,
<int*> NULL,
<int*> NULL,
<double*> theta_ptr,
<int>r_scale,
<int>c_scale,
<int>n_edges,
deref(rng))
elif out_dt == np.int64 and theta_dt == np.float64:
rmat_rectangular_gen(deref(h),
<int64_t*> out_ptr,
<int64_t*> NULL,
<int64_t*> NULL,
<double*> theta_ptr,
<int64_t>r_scale,
<int64_t>c_scale,
<int64_t>n_edges,
deref(rng))
else:
raise ValueError("dtype out=%s and theta=%s not supported" %
(out_dt, theta_dt))
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/random/__init__.pxd | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/random/__init__.py | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .rmat_rectangular_generator import rmat
__all__ = ["rmat"]
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/random | rapidsai_public_repos/raft/python/pylibraft/pylibraft/random/cpp/rng_state.pxd | #
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from libc.stdint cimport uint64_t
cdef extern from "raft/random/rng_state.hpp" namespace "raft::random" nogil:
ctypedef enum GeneratorType:
GenPhilox "raft::random::GeneratorType::GenPhilox"
GenPC "raft::random::GeneratorType::GenPC"
cdef cppclass RngState:
RngState(uint64_t seed) except +
uint64_t seed
uint64_t base_subsequence
GeneratorType type
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/distance/distance_type.pxd | #
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
cdef extern from "raft/distance/distance_types.hpp" namespace "raft::distance":
ctypedef enum DistanceType:
L2Expanded "raft::distance::DistanceType::L2Expanded"
L2SqrtExpanded "raft::distance::DistanceType::L2SqrtExpanded"
CosineExpanded "raft::distance::DistanceType::CosineExpanded"
L1 "raft::distance::DistanceType::L1"
L2Unexpanded "raft::distance::DistanceType::L2Unexpanded"
L2SqrtUnexpanded "raft::distance::DistanceType::L2SqrtUnexpanded"
InnerProduct "raft::distance::DistanceType::InnerProduct"
Linf "raft::distance::DistanceType::Linf"
Canberra "raft::distance::DistanceType::Canberra"
LpUnexpanded "raft::distance::DistanceType::LpUnexpanded"
CorrelationExpanded "raft::distance::DistanceType::CorrelationExpanded"
JaccardExpanded "raft::distance::DistanceType::JaccardExpanded"
HellingerExpanded "raft::distance::DistanceType::HellingerExpanded"
Haversine "raft::distance::DistanceType::Haversine"
BrayCurtis "raft::distance::DistanceType::BrayCurtis"
JensenShannon "raft::distance::DistanceType::JensenShannon"
HammingUnexpanded "raft::distance::DistanceType::HammingUnexpanded"
KLDivergence "raft::distance::DistanceType::KLDivergence"
RusselRaoExpanded "raft::distance::DistanceType::RusselRaoExpanded"
DiceExpanded "raft::distance::DistanceType::DiceExpanded"
Precomputed "raft::distance::DistanceType::Precomputed"
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/distance/pairwise_distance.pyx | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import numpy as np
from cython.operator cimport dereference as deref
from libc.stdint cimport uintptr_t
from libcpp cimport bool
from .distance_type cimport DistanceType
from pylibraft.common import Handle
from pylibraft.common.handle import auto_sync_handle
from pylibraft.common.handle cimport device_resources
from pylibraft.common import auto_convert_output, cai_wrapper, device_ndarray
cdef extern from "raft_runtime/distance/pairwise_distance.hpp" \
namespace "raft::runtime::distance" nogil:
cdef void pairwise_distance(const device_resources &handle,
float *x,
float *y,
float *dists,
int m,
int n,
int k,
DistanceType metric,
bool isRowMajor,
float metric_arg) except +
cdef void pairwise_distance(const device_resources &handle,
double *x,
double *y,
double *dists,
int m,
int n,
int k,
DistanceType metric,
bool isRowMajor,
float metric_arg) except +
DISTANCE_TYPES = {
"l2": DistanceType.L2SqrtExpanded,
"sqeuclidean": DistanceType.L2Expanded,
"euclidean": DistanceType.L2SqrtExpanded,
"l1": DistanceType.L1,
"cityblock": DistanceType.L1,
"inner_product": DistanceType.InnerProduct,
"chebyshev": DistanceType.Linf,
"canberra": DistanceType.Canberra,
"cosine": DistanceType.CosineExpanded,
"lp": DistanceType.LpUnexpanded,
"correlation": DistanceType.CorrelationExpanded,
"jaccard": DistanceType.JaccardExpanded,
"hellinger": DistanceType.HellingerExpanded,
"braycurtis": DistanceType.BrayCurtis,
"jensenshannon": DistanceType.JensenShannon,
"hamming": DistanceType.HammingUnexpanded,
"kl_divergence": DistanceType.KLDivergence,
"minkowski": DistanceType.LpUnexpanded,
"russellrao": DistanceType.RusselRaoExpanded,
"dice": DistanceType.DiceExpanded,
}
SUPPORTED_DISTANCES = ["euclidean", "l1", "cityblock", "l2", "inner_product",
"chebyshev", "minkowski", "canberra", "kl_divergence",
"correlation", "russellrao", "hellinger", "lp",
"hamming", "jensenshannon", "cosine", "sqeuclidean"]
@auto_sync_handle
@auto_convert_output
def distance(X, Y, out=None, metric="euclidean", p=2.0, handle=None):
"""
Compute pairwise distances between X and Y
Valid values for metric:
["euclidean", "l2", "l1", "cityblock", "inner_product",
"chebyshev", "canberra", "lp", "hellinger", "jensenshannon",
"kl_divergence", "russellrao", "minkowski", "correlation",
"cosine"]
Parameters
----------
X : CUDA array interface compliant matrix shape (m, k)
Y : CUDA array interface compliant matrix shape (n, k)
out : Optional writable CUDA array interface matrix shape (m, n)
metric : string denoting the metric type (default="euclidean")
p : metric parameter (currently used only for "minkowski")
{handle_docstring}
Returns
-------
raft.device_ndarray containing pairwise distances
Examples
--------
To compute pairwise distances on cupy arrays:
>>> import cupy as cp
>>> from pylibraft.common import Handle
>>> from pylibraft.distance import pairwise_distance
>>> n_samples = 5000
>>> n_features = 50
>>> in1 = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> in2 = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
A single RAFT handle can optionally be reused across
pylibraft functions.
>>> handle = Handle()
>>> output = pairwise_distance(in1, in2, metric="euclidean", handle=handle)
pylibraft functions are often asynchronous so the
handle needs to be explicitly synchronized
>>> handle.sync()
It's also possible to write to a pre-allocated output array:
>>> import cupy as cp
>>> from pylibraft.common import Handle
>>> from pylibraft.distance import pairwise_distance
>>> n_samples = 5000
>>> n_features = 50
>>> in1 = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> in2 = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> output = cp.empty((n_samples, n_samples), dtype=cp.float32)
A single RAFT handle can optionally be reused across
pylibraft functions.
>>>
>>> handle = Handle()
>>> pairwise_distance(in1, in2, out=output,
... metric="euclidean", handle=handle)
array(...)
pylibraft functions are often asynchronous so the
handle needs to be explicitly synchronized
>>> handle.sync()
"""
x_cai = cai_wrapper(X)
y_cai = cai_wrapper(Y)
m = x_cai.shape[0]
n = y_cai.shape[0]
x_dt = x_cai.dtype
y_dt = y_cai.dtype
if out is None:
dists = device_ndarray.empty((m, n), dtype=y_dt)
else:
dists = out
x_k = x_cai.shape[1]
y_k = y_cai.shape[1]
dists_cai = cai_wrapper(dists)
if x_k != y_k:
raise ValueError("Inputs must have same number of columns. "
"a=%s, b=%s" % (x_k, y_k))
x_ptr = <uintptr_t>x_cai.data
y_ptr = <uintptr_t>y_cai.data
d_ptr = <uintptr_t>dists_cai.data
handle = handle if handle is not None else Handle()
cdef device_resources *h = <device_resources*><size_t>handle.getHandle()
d_dt = dists_cai.dtype
x_c_contiguous = x_cai.c_contiguous
y_c_contiguous = y_cai.c_contiguous
if x_c_contiguous != y_c_contiguous:
raise ValueError("Inputs must have matching strides")
if metric not in SUPPORTED_DISTANCES:
raise ValueError("metric %s is not supported" % metric)
cdef DistanceType distance_type = DISTANCE_TYPES[metric]
if x_dt != y_dt or x_dt != d_dt:
raise ValueError("Inputs must have the same dtypes")
if x_dt == np.float32:
pairwise_distance(deref(h),
<float*> x_ptr,
<float*> y_ptr,
<float*> d_ptr,
<int>m,
<int>n,
<int>x_k,
<DistanceType>distance_type,
<bool>x_c_contiguous,
<float>p)
elif x_dt == np.float64:
pairwise_distance(deref(h),
<double*> x_ptr,
<double*> y_ptr,
<double*> d_ptr,
<int>m,
<int>n,
<int>x_k,
<DistanceType>distance_type,
<bool>x_c_contiguous,
<float>p)
else:
raise ValueError("dtype %s not supported" % x_dt)
return dists
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/distance/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Set the list of Cython files to build
set(cython_sources pairwise_distance.pyx fused_l2_nn.pyx)
set(linked_libraries raft::raft raft::compiled)
# Build all of the Cython targets
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}" ASSOCIATED_TARGETS raft MODULE_PREFIX distance_
)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/distance/__init__.pxd | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/distance/fused_l2_nn.pyx | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import numpy as np
from cython.operator cimport dereference as deref
from libc.stdint cimport uintptr_t
from libcpp cimport bool
from .distance_type cimport DistanceType
from pylibraft.common import (
Handle,
auto_convert_output,
cai_wrapper,
device_ndarray,
)
from pylibraft.common.handle import auto_sync_handle
from pylibraft.common.handle cimport device_resources
cdef extern from "raft_runtime/distance/fused_l2_nn.hpp" \
namespace "raft::runtime::distance" nogil:
void fused_l2_nn_min_arg(
const device_resources &handle,
int* min,
const float* x,
const float* y,
int m,
int n,
int k,
bool sqrt) except +
void fused_l2_nn_min_arg(
const device_resources &handle,
int* min,
const double* x,
const double* y,
int m,
int n,
int k,
bool sqrt) except +
@auto_sync_handle
@auto_convert_output
def fused_l2_nn_argmin(X, Y, out=None, sqrt=True, handle=None):
"""
Compute the 1-nearest neighbors between X and Y using the L2 distance
Parameters
----------
X : CUDA array interface compliant matrix shape (m, k)
Y : CUDA array interface compliant matrix shape (n, k)
output : Writable CUDA array interface matrix shape (m, 1)
{handle_docstring}
Examples
--------
To compute the 1-nearest neighbors argmin:
>>> import cupy as cp
>>> from pylibraft.common import Handle
>>> from pylibraft.distance import fused_l2_nn_argmin
>>> n_samples = 5000
>>> n_clusters = 5
>>> n_features = 50
>>> in1 = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> in2 = cp.random.random_sample((n_clusters, n_features),
... dtype=cp.float32)
>>> # A single RAFT handle can optionally be reused across
>>> # pylibraft functions.
>>> handle = Handle()
>>> output = fused_l2_nn_argmin(in1, in2, handle=handle)
>>> # pylibraft functions are often asynchronous so the
>>> # handle needs to be explicitly synchronized
>>> handle.sync()
The output can also be computed in-place on a preallocated
array:
>>> import cupy as cp
>>> from pylibraft.common import Handle
>>> from pylibraft.distance import fused_l2_nn_argmin
>>> n_samples = 5000
>>> n_clusters = 5
>>> n_features = 50
>>> in1 = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> in2 = cp.random.random_sample((n_clusters, n_features),
... dtype=cp.float32)
>>> output = cp.empty((n_samples, 1), dtype=cp.int32)
>>> # A single RAFT handle can optionally be reused across
>>> # pylibraft functions.
>>> handle = Handle()
>>> fused_l2_nn_argmin(in1, in2, out=output, handle=handle)
array(...)
>>> # pylibraft functions are often asynchronous so the
>>> # handle needs to be explicitly synchronized
>>> handle.sync()
"""
x_cai = cai_wrapper(X)
y_cai = cai_wrapper(Y)
x_dt = x_cai.dtype
y_dt = y_cai.dtype
m = x_cai.shape[0]
n = y_cai.shape[0]
if out is None:
output = device_ndarray.empty((m,), dtype="int32")
else:
output = out
output_cai = cai_wrapper(output)
x_k = x_cai.shape[1]
y_k = y_cai.shape[1]
if x_k != y_k:
raise ValueError("Inputs must have same number of columns. "
"a=%s, b=%s" % (x_k, y_k))
x_ptr = <uintptr_t>x_cai.data
y_ptr = <uintptr_t>y_cai.data
d_ptr = <uintptr_t>output_cai.data
handle = handle if handle is not None else Handle()
cdef device_resources *h = <device_resources*><size_t>handle.getHandle()
d_dt = output_cai.dtype
x_c_contiguous = x_cai.c_contiguous
y_c_contiguous = y_cai.c_contiguous
if x_c_contiguous != y_c_contiguous:
raise ValueError("Inputs must have matching strides")
if x_dt != y_dt:
raise ValueError("Inputs must have the same dtypes")
if d_dt != np.int32:
raise ValueError("Output array must be int32")
if x_dt == np.float32:
fused_l2_nn_min_arg(deref(h),
<int*> d_ptr,
<float*> x_ptr,
<float*> y_ptr,
<int>m,
<int>n,
<int>x_k,
<bool>sqrt)
elif x_dt == np.float64:
fused_l2_nn_min_arg(deref(h),
<int*> d_ptr,
<double*> x_ptr,
<double*> y_ptr,
<int>m,
<int>n,
<int>x_k,
<bool>sqrt)
else:
raise ValueError("dtype %s not supported" % x_dt)
return output
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/distance/__init__.py | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .fused_l2_nn import fused_l2_nn_argmin
from .pairwise_distance import DISTANCE_TYPES, distance as pairwise_distance
__all__ = ["fused_l2_nn_argmin", "pairwise_distance"]
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/brute_force.pyx | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import numpy as np
from cython.operator cimport dereference as deref
from libcpp cimport bool, nullptr
from libcpp.vector cimport vector
from pylibraft.distance.distance_type cimport DistanceType
from pylibraft.common import (
DeviceResources,
auto_convert_output,
cai_wrapper,
device_ndarray,
)
from libc.stdint cimport int64_t, uintptr_t
from pylibraft.common.cpp.optional cimport optional
from pylibraft.common.handle cimport device_resources
from pylibraft.common.mdspan cimport get_dmv_float, get_dmv_int64
from pylibraft.common.handle import auto_sync_handle
from pylibraft.common.interruptible import cuda_interruptible
from pylibraft.distance.distance_type cimport DistanceType
# TODO: Centralize this
from pylibraft.distance.pairwise_distance import DISTANCE_TYPES
from pylibraft.neighbors.common import _check_input_array
from pylibraft.common.cpp.mdspan cimport (
device_matrix_view,
host_matrix_view,
make_device_matrix_view,
make_host_matrix_view,
row_major,
)
from pylibraft.neighbors.cpp.brute_force cimport knn as c_knn
def _get_array_params(array_interface, check_dtype=None):
dtype = np.dtype(array_interface["typestr"])
if check_dtype is None and dtype != check_dtype:
raise TypeError("dtype %s not supported" % dtype)
shape = array_interface["shape"]
if len(shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(shape))
data = array_interface["data"][0]
return (shape, dtype, data)
@auto_sync_handle
@auto_convert_output
def knn(dataset, queries, k=None, indices=None, distances=None,
metric="sqeuclidean", metric_arg=2.0,
global_id_offset=0, handle=None):
"""
Perform a brute-force nearest neighbors search.
Parameters
----------
dataset : array interface compliant matrix, row-major layout,
shape (n_samples, dim). Supported dtype [float]
queries : array interface compliant matrix, row-major layout,
shape (n_queries, dim) Supported dtype [float]
k : int
Number of neighbors to search (k <= 2048). Optional if indices or
distances arrays are given (in which case their second dimension
is k).
indices : Optional array interface compliant matrix shape
(n_queries, k), dtype int64_t. If supplied, neighbor
indices will be written here in-place. (default None)
Supported dtype uint64
distances : Optional array interface compliant matrix shape
(n_queries, k), dtype float. If supplied, neighbor
indices will be written here in-place. (default None)
{handle_docstring}
Returns
-------
indices: array interface compliant object containing resulting indices
shape (n_queries, k)
distances: array interface compliant object containing resulting distances
shape (n_queries, k)
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import DeviceResources
>>> from pylibraft.neighbors.brute_force import knn
>>> n_samples = 50000
>>> n_features = 50
>>> n_queries = 1000
>>> dataset = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> # Search using the built index
>>> queries = cp.random.random_sample((n_queries, n_features),
... dtype=cp.float32)
>>> k = 40
>>> distances, neighbors = knn(dataset, queries, k)
>>> distances = cp.asarray(distances)
>>> neighbors = cp.asarray(neighbors)
"""
if handle is None:
handle = DeviceResources()
dataset_cai = cai_wrapper(dataset)
queries_cai = cai_wrapper(queries)
if k is None:
if indices is not None:
k = cai_wrapper(indices).shape[1]
elif distances is not None:
k = cai_wrapper(distances).shape[1]
else:
raise ValueError("Argument k must be specified if both indices "
"and distances arg is None")
# we require c-contiguous (rowmajor) inputs here
_check_input_array(dataset_cai, [np.dtype("float32")])
_check_input_array(queries_cai, [np.dtype("float32")],
exp_cols=dataset_cai.shape[1])
n_queries = queries_cai.shape[0]
if indices is None:
indices = device_ndarray.empty((n_queries, k), dtype='int64')
if distances is None:
distances = device_ndarray.empty((n_queries, k), dtype='float32')
cdef DistanceType c_metric = DISTANCE_TYPES[metric]
distances_cai = cai_wrapper(distances)
indices_cai = cai_wrapper(indices)
cdef optional[float] c_metric_arg = <float>metric_arg
cdef optional[int64_t] c_global_offset = <int64_t>global_id_offset
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
if dataset_cai.dtype == np.float32:
with cuda_interruptible():
c_knn(deref(handle_),
get_dmv_float(dataset_cai, check_shape=True),
get_dmv_float(queries_cai, check_shape=True),
get_dmv_int64(indices_cai, check_shape=True),
get_dmv_float(distances_cai, check_shape=True),
c_metric,
c_metric_arg,
c_global_offset)
else:
raise TypeError("dtype %s not supported" % dataset_cai.dtype)
return (distances, indices)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Set the list of Cython files to build
set(cython_sources common.pyx refine.pyx brute_force.pyx)
set(linked_libraries raft::raft raft::compiled)
# Build all of the Cython targets
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}" ASSOCIATED_TARGETS raft MODULE_PREFIX neighbors_
)
add_subdirectory(cagra)
add_subdirectory(ivf_flat)
add_subdirectory(ivf_pq)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/common.pxd | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from pylibraft.distance.distance_type cimport DistanceType
cdef _get_metric_string(DistanceType metric)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/__init__.pxd | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/common.pyx | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import warnings
from pylibraft.distance.distance_type cimport DistanceType
SUPPORTED_DISTANCES = {
"sqeuclidean": DistanceType.L2Expanded,
"euclidean": DistanceType.L2SqrtExpanded,
"inner_product": DistanceType.InnerProduct,
}
def _get_metric(metric):
if metric not in SUPPORTED_DISTANCES:
if metric == "l2_expanded":
warnings.warn("Using l2_expanded as a metric name is deprecated,"
" use sqeuclidean instead", FutureWarning)
return DistanceType.L2Expanded
raise ValueError("metric %s is not supported" % metric)
return SUPPORTED_DISTANCES[metric]
cdef _get_metric_string(DistanceType metric):
return {DistanceType.L2Expanded : "sqeuclidean",
DistanceType.InnerProduct: "inner_product",
DistanceType.L2SqrtExpanded: "euclidean"}[metric]
def _check_input_array(cai, exp_dt, exp_rows=None, exp_cols=None):
if cai.dtype not in exp_dt:
raise TypeError("dtype %s not supported" % cai.dtype)
if not cai.c_contiguous:
raise ValueError("Row major input is expected")
if exp_cols is not None and cai.shape[1] != exp_cols:
raise ValueError("Incorrect number of columns, expected {} got {}"
.format(exp_cols, cai.shape[1]))
if exp_rows is not None and cai.shape[0] != exp_rows:
raise ValueError("Incorrect number of rows, expected {} , got {}"
.format(exp_rows, cai.shape[0]))
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/refine.pyx | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import numpy as np
from cython.operator cimport dereference as deref
from libc.stdint cimport int8_t, int64_t, uint8_t, uintptr_t
from libcpp cimport bool, nullptr
from pylibraft.distance.distance_type cimport DistanceType
from pylibraft.common import (
DeviceResources,
auto_convert_output,
cai_wrapper,
device_ndarray,
)
from pylibraft.common.handle cimport device_resources
from pylibraft.common.handle import auto_sync_handle
from pylibraft.common.input_validation import is_c_contiguous
from pylibraft.common.interruptible import cuda_interruptible
from pylibraft.distance.distance_type cimport DistanceType
import pylibraft.neighbors.ivf_pq as ivf_pq
from pylibraft.neighbors.common import _get_metric
cimport pylibraft.neighbors.ivf_pq.cpp.c_ivf_pq as c_ivf_pq
from pylibraft.common.cpp.mdspan cimport (
device_matrix_view,
host_matrix_view,
make_host_matrix_view,
row_major,
)
from pylibraft.common.mdspan cimport (
get_dmv_float,
get_dmv_int8,
get_dmv_int64,
get_dmv_uint8,
)
from pylibraft.neighbors.common cimport _get_metric_string
from pylibraft.neighbors.ivf_pq.cpp.c_ivf_pq cimport (
index_params,
search_params,
)
# We omit the const qualifiers in the interface for refine, because cython
# has an issue parsing it (https://github.com/cython/cython/issues/4180).
cdef extern from "raft_runtime/neighbors/refine.hpp" \
namespace "raft::runtime::neighbors" nogil:
cdef void c_refine "raft::runtime::neighbors::refine" (
const device_resources& handle,
device_matrix_view[float, int64_t, row_major] dataset,
device_matrix_view[float, int64_t, row_major] queries,
device_matrix_view[int64_t, int64_t, row_major] candidates,
device_matrix_view[int64_t, int64_t, row_major] indices,
device_matrix_view[float, int64_t, row_major] distances,
DistanceType metric) except +
cdef void c_refine "raft::runtime::neighbors::refine" (
const device_resources& handle,
device_matrix_view[uint8_t, int64_t, row_major] dataset,
device_matrix_view[uint8_t, int64_t, row_major] queries,
device_matrix_view[int64_t, int64_t, row_major] candidates,
device_matrix_view[int64_t, int64_t, row_major] indices,
device_matrix_view[float, int64_t, row_major] distances,
DistanceType metric) except +
cdef void c_refine "raft::runtime::neighbors::refine" (
const device_resources& handle,
device_matrix_view[int8_t, int64_t, row_major] dataset,
device_matrix_view[int8_t, int64_t, row_major] queries,
device_matrix_view[int64_t, int64_t, row_major] candidates,
device_matrix_view[int64_t, int64_t, row_major] indices,
device_matrix_view[float, int64_t, row_major] distances,
DistanceType metric) except +
cdef void c_refine "raft::runtime::neighbors::refine" (
const device_resources& handle,
host_matrix_view[float, int64_t, row_major] dataset,
host_matrix_view[float, int64_t, row_major] queries,
host_matrix_view[int64_t, int64_t, row_major] candidates,
host_matrix_view[int64_t, int64_t, row_major] indices,
host_matrix_view[float, int64_t, row_major] distances,
DistanceType metric) except +
cdef void c_refine "raft::runtime::neighbors::refine" (
const device_resources& handle,
host_matrix_view[uint8_t, int64_t, row_major] dataset,
host_matrix_view[uint8_t, int64_t, row_major] queries,
host_matrix_view[int64_t, int64_t, row_major] candidates,
host_matrix_view[int64_t, int64_t, row_major] indices,
host_matrix_view[float, int64_t, row_major] distances,
DistanceType metric) except +
cdef void c_refine "raft::runtime::neighbors::refine" (
const device_resources& handle,
host_matrix_view[int8_t, int64_t, row_major] dataset,
host_matrix_view[int8_t, int64_t, row_major] queries,
host_matrix_view[int64_t, int64_t, row_major] candidates,
host_matrix_view[int64_t, int64_t, row_major] indices,
host_matrix_view[float, int64_t, row_major] distances,
DistanceType metric) except +
def _get_array_params(array_interface, check_dtype=None):
dtype = np.dtype(array_interface["typestr"])
if check_dtype is None and dtype != check_dtype:
raise TypeError("dtype %s not supported" % dtype)
shape = array_interface["shape"]
if len(shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(shape))
data = array_interface["data"][0]
return (shape, dtype, data)
cdef host_matrix_view[float, int64_t, row_major] \
get_host_matrix_view_float(array) except *:
shape, dtype, data = _get_array_params(
array.__array_interface__, check_dtype=np.float32)
return make_host_matrix_view[float, int64_t, row_major](
<float*><uintptr_t>data, shape[0], shape[1])
cdef host_matrix_view[int64_t, int64_t, row_major] \
get_host_matrix_view_int64_t(array) except *:
shape, dtype, data = _get_array_params(
array.__array_interface__, check_dtype=np.int64)
return make_host_matrix_view[int64_t, int64_t, row_major](
<int64_t*><uintptr_t>data, shape[0], shape[1])
cdef host_matrix_view[uint8_t, int64_t, row_major] \
get_host_matrix_view_uint8(array) except *:
shape, dtype, data = _get_array_params(
array.__array_interface__, check_dtype=np.uint8)
return make_host_matrix_view[uint8_t, int64_t, row_major](
<uint8_t*><uintptr_t>data, shape[0], shape[1])
cdef host_matrix_view[int8_t, int64_t, row_major] \
get_host_matrix_view_int8(array) except *:
shape, dtype, data = _get_array_params(
array.__array_interface__, check_dtype=np.int8)
return make_host_matrix_view[int8_t, int64_t, row_major](
<int8_t*><uintptr_t>data, shape[0], shape[1])
@auto_sync_handle
@auto_convert_output
def refine(dataset, queries, candidates, k=None, indices=None, distances=None,
metric="sqeuclidean", handle=None):
"""
Refine nearest neighbor search.
Refinement is an operation that follows an approximate NN search. The
approximate search has already selected n_candidates neighbor candidates
for each query. We narrow it down to k neighbors. For each query, we
calculate the exact distance between the query and its n_candidates
neighbor candidate, and select the k nearest ones.
Input arrays can be either CUDA array interface compliant matrices or
array interface compliant matrices in host memory. All array must be in
the same memory space.
Parameters
----------
index_params : IndexParams object
dataset : array interface compliant matrix, shape (n_samples, dim)
Supported dtype [float, int8, uint8]
queries : array interface compliant matrix, shape (n_queries, dim)
Supported dtype [float, int8, uint8]
candidates : array interface compliant matrix, shape (n_queries, k0)
Supported dtype int64
k : int
Number of neighbors to search (k <= k0). Optional if indices or
distances arrays are given (in which case their second dimension
is k).
indices : Optional array interface compliant matrix shape \
(n_queries, k).
If supplied, neighbor indices will be written here in-place.
(default None). Supported dtype int64.
distances : Optional array interface compliant matrix shape \
(n_queries, k).
If supplied, neighbor indices will be written here in-place.
(default None) Supported dtype float.
{handle_docstring}
Returns
-------
index: ivf_pq.Index
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import DeviceResources
>>> from pylibraft.neighbors import ivf_pq, refine
>>> n_samples = 50000
>>> n_features = 50
>>> n_queries = 1000
>>> dataset = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> handle = DeviceResources()
>>> index_params = ivf_pq.IndexParams(n_lists=1024,
... metric="sqeuclidean",
... pq_dim=10)
>>> index = ivf_pq.build(index_params, dataset, handle=handle)
>>> # Search using the built index
>>> queries = cp.random.random_sample((n_queries, n_features),
... dtype=cp.float32)
>>> k = 40
>>> _, candidates = ivf_pq.search(ivf_pq.SearchParams(), index,
... queries, k, handle=handle)
>>> k = 10
>>> distances, neighbors = refine(dataset, queries, candidates, k,
... handle=handle)
>>> distances = cp.asarray(distances)
>>> neighbors = cp.asarray(neighbors)
>>> # pylibraft functions are often asynchronous so the
>>> # handle needs to be explicitly synchronized
>>> handle.sync()
"""
if handle is None:
handle = DeviceResources()
if hasattr(dataset, "__cuda_array_interface__"):
return _refine_device(dataset, queries, candidates, k, indices,
distances, metric, handle)
else:
return _refine_host(dataset, queries, candidates, k, indices,
distances, metric, handle)
def _refine_device(dataset, queries, candidates, k, indices, distances,
metric, handle):
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
if k is None:
if indices is not None:
k = cai_wrapper(indices).shape[1]
elif distances is not None:
k = cai_wrapper(distances).shape[1]
else:
raise ValueError("Argument k must be specified if both indices "
"and distances arg is None")
queries_cai = cai_wrapper(queries)
dataset_cai = cai_wrapper(dataset)
candidates_cai = cai_wrapper(candidates)
n_queries = cai_wrapper(queries).shape[0]
if indices is None:
indices = device_ndarray.empty((n_queries, k), dtype='int64')
if distances is None:
distances = device_ndarray.empty((n_queries, k), dtype='float32')
indices_cai = cai_wrapper(indices)
distances_cai = cai_wrapper(distances)
cdef DistanceType c_metric = _get_metric(metric)
if dataset_cai.dtype == np.float32:
with cuda_interruptible():
c_refine(deref(handle_),
get_dmv_float(dataset_cai, check_shape=True),
get_dmv_float(queries_cai, check_shape=True),
get_dmv_int64(candidates_cai, check_shape=True),
get_dmv_int64(indices_cai, check_shape=True),
get_dmv_float(distances_cai, check_shape=True),
c_metric)
elif dataset_cai.dtype == np.int8:
with cuda_interruptible():
c_refine(deref(handle_),
get_dmv_int8(dataset_cai, check_shape=True),
get_dmv_int8(queries_cai, check_shape=True),
get_dmv_int64(candidates_cai, check_shape=True),
get_dmv_int64(indices_cai, check_shape=True),
get_dmv_float(distances_cai, check_shape=True),
c_metric)
elif dataset_cai.dtype == np.uint8:
with cuda_interruptible():
c_refine(deref(handle_),
get_dmv_uint8(dataset_cai, check_shape=True),
get_dmv_uint8(queries_cai, check_shape=True),
get_dmv_int64(candidates_cai, check_shape=True),
get_dmv_int64(indices_cai, check_shape=True),
get_dmv_float(distances_cai, check_shape=True),
c_metric)
else:
raise TypeError("dtype %s not supported" % dataset_cai.dtype)
return (distances, indices)
def _refine_host(dataset, queries, candidates, k, indices, distances,
metric, handle):
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
if k is None:
if indices is not None:
k = indices.__array_interface__["shape"][1]
elif distances is not None:
k = distances.__array_interface__["shape"][1]
else:
raise ValueError("Argument k must be specified if both indices "
"and distances arg is None")
n_queries = queries.__array_interface__["shape"][0]
if indices is None:
indices = np.empty((n_queries, k), dtype='int64')
if distances is None:
distances = np.empty((n_queries, k), dtype='float32')
cdef DistanceType c_metric = _get_metric(metric)
dtype = np.dtype(dataset.__array_interface__["typestr"])
if dtype == np.float32:
with cuda_interruptible():
c_refine(deref(handle_),
get_host_matrix_view_float(dataset),
get_host_matrix_view_float(queries),
get_host_matrix_view_int64_t(candidates),
get_host_matrix_view_int64_t(indices),
get_host_matrix_view_float(distances),
c_metric)
elif dtype == np.int8:
with cuda_interruptible():
c_refine(deref(handle_),
get_host_matrix_view_int8(dataset),
get_host_matrix_view_int8(queries),
get_host_matrix_view_int64_t(candidates),
get_host_matrix_view_int64_t(indices),
get_host_matrix_view_float(distances),
c_metric)
elif dtype == np.uint8:
with cuda_interruptible():
c_refine(deref(handle_),
get_host_matrix_view_uint8(dataset),
get_host_matrix_view_uint8(queries),
get_host_matrix_view_int64_t(candidates),
get_host_matrix_view_int64_t(indices),
get_host_matrix_view_float(distances),
c_metric)
else:
raise TypeError("dtype %s not supported" % dtype)
return (distances, indices)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/__init__.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pylibraft.neighbors import brute_force, cagra, ivf_flat, ivf_pq
from .refine import refine
__all__ = ["common", "refine", "brute_force", "ivf_flat", "ivf_pq", "cagra"]
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/ivf_pq/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Set the list of Cython files to build
set(cython_sources ivf_pq.pyx)
set(linked_libraries raft::raft raft::compiled)
# Build all of the Cython targets
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}" ASSOCIATED_TARGETS raft MODULE_PREFIX neighbors_ivfpq_
)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/ivf_pq/ivf_pq.pyx | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import warnings
import numpy as np
from cython.operator cimport dereference as deref
from libc.stdint cimport int32_t, int64_t, uint32_t, uintptr_t
from libcpp cimport bool, nullptr
from libcpp.string cimport string
from pylibraft.distance.distance_type cimport DistanceType
from pylibraft.common import (
DeviceResources,
ai_wrapper,
auto_convert_output,
cai_wrapper,
device_ndarray,
)
from pylibraft.common.cai_wrapper import wrap_array
from pylibraft.common.interruptible import cuda_interruptible
from pylibraft.common.handle cimport device_resources
from pylibraft.common.handle import auto_sync_handle
from pylibraft.common.input_validation import is_c_contiguous
from rmm._lib.memory_resource cimport (
DeviceMemoryResource,
device_memory_resource,
)
cimport pylibraft.neighbors.ivf_flat.cpp.c_ivf_flat as c_ivf_flat
cimport pylibraft.neighbors.ivf_pq.cpp.c_ivf_pq as c_ivf_pq
from pylibraft.common.optional cimport make_optional, optional
from pylibraft.neighbors.common import _check_input_array, _get_metric
from pylibraft.common.cpp.mdspan cimport (
device_matrix_view,
device_vector_view,
make_device_vector_view,
row_major,
)
from pylibraft.common.mdspan cimport (
get_dmv_float,
get_dmv_int8,
get_dmv_int64,
get_dmv_uint8,
make_optional_view_int64,
)
from pylibraft.neighbors.common cimport _get_metric_string
from pylibraft.neighbors.ivf_pq.cpp.c_ivf_pq cimport (
index_params,
search_params,
)
cdef _get_codebook_string(c_ivf_pq.codebook_gen codebook):
return {c_ivf_pq.codebook_gen.PER_SUBSPACE: "subspace",
c_ivf_pq.codebook_gen.PER_CLUSTER: "cluster"}[codebook]
cdef _map_dtype_np_to_cuda(dtype, supported_dtypes=None):
if supported_dtypes is not None and dtype not in supported_dtypes:
raise TypeError("Type %s is not supported" % str(dtype))
return {np.float32: c_ivf_pq.cudaDataType_t.CUDA_R_32F,
np.float16: c_ivf_pq.cudaDataType_t.CUDA_R_16F,
np.uint8: c_ivf_pq.cudaDataType_t.CUDA_R_8U}[dtype]
cdef _get_dtype_string(dtype):
return str({c_ivf_pq.cudaDataType_t.CUDA_R_32F: np.float32,
c_ivf_pq.cudaDataType_t.CUDA_R_16F: np.float16,
c_ivf_pq.cudaDataType_t.CUDA_R_8U: np.uint8}[dtype])
cdef class IndexParams:
"""
Parameters to build index for IVF-PQ nearest neighbor search
Parameters
----------
n_list : int, default = 1024
The number of clusters used in the coarse quantizer.
metric : string denoting the metric type, default="sqeuclidean"
Valid values for metric: ["sqeuclidean", "inner_product",
"euclidean"], where
- sqeuclidean is the euclidean distance without the square root
operation, i.e.: distance(a,b) = \\sum_i (a_i - b_i)^2,
- euclidean is the euclidean distance
- inner product distance is defined as
distance(a, b) = \\sum_i a_i * b_i.
kmeans_n_iters : int, default = 20
The number of iterations searching for kmeans centers during index
building.
kmeans_trainset_fraction : int, default = 0.5
If kmeans_trainset_fraction is less than 1, then the dataset is
subsampled, and only n_samples * kmeans_trainset_fraction rows
are used for training.
pq_bits : int, default = 8
The bit length of the vector element after quantization.
pq_dim : int, default = 0
The dimensionality of a the vector after product quantization.
When zero, an optimal value is selected using a heuristic. Note
pq_dim * pq_bits must be a multiple of 8. Hint: a smaller 'pq_dim'
results in a smaller index size and better search performance, but
lower recall. If 'pq_bits' is 8, 'pq_dim' can be set to any number,
but multiple of 8 are desirable for good performance. If 'pq_bits'
is not 8, 'pq_dim' should be a multiple of 8. For good performance,
it is desirable that 'pq_dim' is a multiple of 32. Ideally,
'pq_dim' should be also a divisor of the dataset dim.
codebook_kind : string, default = "subspace"
Valid values ["subspace", "cluster"]
force_random_rotation : bool, default = False
Apply a random rotation matrix on the input data and queries even
if `dim % pq_dim == 0`. Note: if `dim` is not multiple of `pq_dim`,
a random rotation is always applied to the input data and queries
to transform the working space from `dim` to `rot_dim`, which may
be slightly larger than the original space and and is a multiple
of `pq_dim` (`rot_dim % pq_dim == 0`). However, this transform is
not necessary when `dim` is multiple of `pq_dim` (`dim == rot_dim`,
hence no need in adding "extra" data columns / features). By
default, if `dim == rot_dim`, the rotation transform is
initialized with the identity matrix. When
`force_random_rotation == True`, a random orthogonal transform
matrix is generated regardless of the values of `dim` and `pq_dim`.
add_data_on_build : bool, default = True
After training the coarse and fine quantizers, we will populate
the index with the dataset if add_data_on_build == True, otherwise
the index is left empty, and the extend method can be used
to add new vectors to the index.
conservative_memory_allocation : bool, default = True
By default, the algorithm allocates more space than necessary for
individual clusters (`list_data`). This allows to amortize the cost
of memory allocation and reduce the number of data copies during
repeated calls to `extend` (extending the database).
To disable this behavior and use as little GPU memory for the
database as possible, set this flat to `True`.
"""
def __init__(self, *,
n_lists=1024,
metric="sqeuclidean",
kmeans_n_iters=20,
kmeans_trainset_fraction=0.5,
pq_bits=8,
pq_dim=0,
codebook_kind="subspace",
force_random_rotation=False,
add_data_on_build=True,
conservative_memory_allocation=False):
self.params.n_lists = n_lists
self.params.metric = _get_metric(metric)
self.params.metric_arg = 0
self.params.kmeans_n_iters = kmeans_n_iters
self.params.kmeans_trainset_fraction = kmeans_trainset_fraction
self.params.pq_bits = pq_bits
self.params.pq_dim = pq_dim
if codebook_kind == "subspace":
self.params.codebook_kind = c_ivf_pq.codebook_gen.PER_SUBSPACE
elif codebook_kind == "cluster":
self.params.codebook_kind = c_ivf_pq.codebook_gen.PER_CLUSTER
else:
raise ValueError("Incorrect codebook kind %s" % codebook_kind)
self.params.force_random_rotation = force_random_rotation
self.params.add_data_on_build = add_data_on_build
self.params.conservative_memory_allocation = \
conservative_memory_allocation
@property
def n_lists(self):
return self.params.n_lists
@property
def metric(self):
return self.params.metric
@property
def kmeans_n_iters(self):
return self.params.kmeans_n_iters
@property
def kmeans_trainset_fraction(self):
return self.params.kmeans_trainset_fraction
@property
def pq_bits(self):
return self.params.pq_bits
@property
def pq_dim(self):
return self.params.pq_dim
@property
def codebook_kind(self):
return self.params.codebook_kind
@property
def force_random_rotation(self):
return self.params.force_random_rotation
@property
def add_data_on_build(self):
return self.params.add_data_on_build
@property
def conservative_memory_allocation(self):
return self.params.conservative_memory_allocation
cdef class Index:
# We store a pointer to the index because it dose not have a trivial
# constructor.
cdef c_ivf_pq.index[int64_t] * index
cdef readonly bool trained
def __cinit__(self, handle=None):
self.trained = False
self.index = NULL
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
# We create a placeholder object. The actual parameter values do
# not matter, it will be replaced with a built index object later.
self.index = new c_ivf_pq.index[int64_t](
deref(handle_), _get_metric("sqeuclidean"),
c_ivf_pq.codebook_gen.PER_SUBSPACE,
<uint32_t>1,
<uint32_t>4,
<uint32_t>8,
<uint32_t>0,
<bool>False)
def __dealloc__(self):
if self.index is not NULL:
del self.index
def __repr__(self):
m_str = "metric=" + _get_metric_string(self.index.metric())
code_str = "codebook=" + _get_codebook_string(
self.index.codebook_kind())
attr_str = [attr + "=" + str(getattr(self, attr))
for attr in ["size", "dim", "pq_dim", "pq_bits",
"n_lists", "rot_dim"]]
attr_str = [m_str, code_str] + attr_str
return "Index(type=IVF-PQ, " + (", ".join(attr_str)) + ")"
@property
def dim(self):
return self.index[0].dim()
@property
def size(self):
return self.index[0].size()
@property
def pq_dim(self):
return self.index[0].pq_dim()
@property
def pq_len(self):
return self.index[0].pq_len()
@property
def pq_bits(self):
return self.index[0].pq_bits()
@property
def metric(self):
return self.index[0].metric()
@property
def n_lists(self):
return self.index[0].n_lists()
@property
def rot_dim(self):
return self.index[0].rot_dim()
@property
def codebook_kind(self):
return self.index[0].codebook_kind()
@property
def conservative_memory_allocation(self):
return self.index[0].conservative_memory_allocation()
@auto_sync_handle
@auto_convert_output
def build(IndexParams index_params, dataset, handle=None):
"""
Builds an IVF-PQ index that can be later used for nearest neighbor search.
The input array can be either CUDA array interface compliant matrix or
array interface compliant matrix in host memory.
Parameters
----------
index_params : IndexParams object
dataset : array interface compliant matrix shape (n_samples, dim)
Supported dtype [float, int8, uint8]
{handle_docstring}
Returns
-------
index: ivf_pq.Index
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import DeviceResources
>>> from pylibraft.neighbors import ivf_pq
>>> n_samples = 50000
>>> n_features = 50
>>> n_queries = 1000
>>> dataset = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> handle = DeviceResources()
>>> index_params = ivf_pq.IndexParams(
... n_lists=1024,
... metric="sqeuclidean",
... pq_dim=10)
>>> index = ivf_pq.build(index_params, dataset, handle=handle)
>>> # Search using the built index
>>> queries = cp.random.random_sample((n_queries, n_features),
... dtype=cp.float32)
>>> k = 10
>>> distances, neighbors = ivf_pq.search(ivf_pq.SearchParams(), index,
... queries, k, handle=handle)
>>> distances = cp.asarray(distances)
>>> neighbors = cp.asarray(neighbors)
>>> # pylibraft functions are often asynchronous so the
>>> # handle needs to be explicitly synchronized
>>> handle.sync()
"""
dataset_cai = wrap_array(dataset)
dataset_dt = dataset_cai.dtype
_check_input_array(dataset_cai, [np.dtype('float32'), np.dtype('byte'),
np.dtype('ubyte')])
cdef int64_t n_rows = dataset_cai.shape[0]
cdef uint32_t dim = dataset_cai.shape[1]
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
idx = Index()
if dataset_dt == np.float32:
with cuda_interruptible():
c_ivf_pq.build(deref(handle_),
index_params.params,
get_dmv_float(dataset_cai, check_shape=True),
idx.index)
idx.trained = True
elif dataset_dt == np.byte:
with cuda_interruptible():
c_ivf_pq.build(deref(handle_),
index_params.params,
get_dmv_int8(dataset_cai, check_shape=True),
idx.index)
idx.trained = True
elif dataset_dt == np.ubyte:
with cuda_interruptible():
c_ivf_pq.build(deref(handle_),
index_params.params,
get_dmv_uint8(dataset_cai, check_shape=True),
idx.index)
idx.trained = True
else:
raise TypeError("dtype %s not supported" % dataset_dt)
return idx
@auto_sync_handle
@auto_convert_output
def extend(Index index, new_vectors, new_indices, handle=None):
"""
Extend an existing index with new vectors.
The input array can be either CUDA array interface compliant matrix or
array interface compliant matrix in host memory.
Parameters
----------
index : ivf_pq.Index
Trained ivf_pq object.
new_vectors : array interface compliant matrix shape (n_samples, dim)
Supported dtype [float, int8, uint8]
new_indices : array interface compliant vector shape (n_samples)
Supported dtype [int64]
{handle_docstring}
Returns
-------
index: ivf_pq.Index
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import DeviceResources
>>> from pylibraft.neighbors import ivf_pq
>>> n_samples = 50000
>>> n_features = 50
>>> n_queries = 1000
>>> dataset = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> handle = DeviceResources()
>>> index = ivf_pq.build(ivf_pq.IndexParams(), dataset, handle=handle)
>>> n_rows = 100
>>> more_data = cp.random.random_sample((n_rows, n_features),
... dtype=cp.float32)
>>> indices = index.size + cp.arange(n_rows, dtype=cp.int64)
>>> index = ivf_pq.extend(index, more_data, indices)
>>> # Search using the built index
>>> queries = cp.random.random_sample((n_queries, n_features),
... dtype=cp.float32)
>>> k = 10
>>> distances, neighbors = ivf_pq.search(ivf_pq.SearchParams(),
... index, queries,
... k, handle=handle)
>>> # pylibraft functions are often asynchronous so the
>>> # handle needs to be explicitly synchronized
>>> handle.sync()
>>> distances = cp.asarray(distances)
>>> neighbors = cp.asarray(neighbors)
"""
if not index.trained:
raise ValueError("Index need to be built before calling extend.")
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
vecs_cai = wrap_array(new_vectors)
vecs_dt = vecs_cai.dtype
cdef optional[device_vector_view[int64_t, int64_t]] new_indices_opt
cdef int64_t n_rows = vecs_cai.shape[0]
cdef uint32_t dim = vecs_cai.shape[1]
_check_input_array(vecs_cai, [np.dtype('float32'), np.dtype('byte'),
np.dtype('ubyte')],
exp_cols=index.dim)
idx_cai = wrap_array(new_indices)
_check_input_array(idx_cai, [np.dtype('int64')], exp_rows=n_rows)
if len(idx_cai.shape)!=1:
raise ValueError("Indices array is expected to be 1D")
if index.index.size() > 0:
new_indices_opt = make_device_vector_view(
<int64_t *><uintptr_t>idx_cai.data,
<int64_t>idx_cai.shape[0])
if vecs_dt == np.float32:
with cuda_interruptible():
c_ivf_pq.extend(deref(handle_),
get_dmv_float(vecs_cai, check_shape=True),
new_indices_opt,
index.index)
elif vecs_dt == np.int8:
with cuda_interruptible():
c_ivf_pq.extend(deref(handle_),
get_dmv_int8(vecs_cai, check_shape=True),
new_indices_opt,
index.index)
elif vecs_dt == np.uint8:
with cuda_interruptible():
c_ivf_pq.extend(deref(handle_),
get_dmv_uint8(vecs_cai, check_shape=True),
new_indices_opt,
index.index)
else:
raise TypeError("query dtype %s not supported" % vecs_dt)
return index
cdef class SearchParams:
"""
IVF-PQ search parameters
Parameters
----------
n_probes: int, default = 1024
The number of course clusters to select for the fine search.
lut_dtype: default = np.float32
Data type of look up table to be created dynamically at search
time. The use of low-precision types reduces the amount of shared
memory required at search time, so fast shared memory kernels can
be used even for datasets with large dimansionality. Note that
the recall is slightly degraded when low-precision type is
selected. Possible values [np.float32, np.float16, np.uint8]
internal_distance_dtype: default = np.float32
Storage data type for distance/similarity computation.
Possible values [np.float32, np.float16]
"""
def __init__(self, *, n_probes=20,
lut_dtype=np.float32,
internal_distance_dtype=np.float32):
self.params.n_probes = n_probes
self.params.lut_dtype = _map_dtype_np_to_cuda(lut_dtype)
self.params.internal_distance_dtype = \
_map_dtype_np_to_cuda(internal_distance_dtype)
# TODO(tfeher): enable if #926 adds this
# self.params.shmem_carveout = self.shmem_carveout
def __repr__(self):
lut_str = "lut_dtype=" + _get_dtype_string(self.params.lut_dtype)
idt_str = "internal_distance_dtype=" + \
_get_dtype_string(self.params.internal_distance_dtype)
attr_str = [attr + "=" + str(getattr(self, attr))
for attr in ["n_probes"]]
# TODO (tfeher) add "shmem_carveout"
attr_str = attr_str + [lut_str, idt_str]
return "SearchParams(type=IVF-PQ, " + (", ".join(attr_str)) + ")"
@property
def n_probes(self):
return self.params.n_probes
@property
def lut_dtype(self):
return self.params.lut_dtype
@property
def internal_distance_dtype(self):
return self.params.internal_distance_dtype
@auto_sync_handle
@auto_convert_output
def search(SearchParams search_params,
Index index,
queries,
k,
neighbors=None,
distances=None,
DeviceMemoryResource memory_resource=None,
handle=None):
"""
Find the k nearest neighbors for each query.
Parameters
----------
search_params : SearchParams
index : Index
Trained IVF-PQ index.
queries : CUDA array interface compliant matrix shape (n_samples, dim)
Supported dtype [float, int8, uint8]
k : int
The number of neighbors.
neighbors : Optional CUDA array interface compliant matrix shape
(n_queries, k), dtype int64_t. If supplied, neighbor
indices will be written here in-place. (default None)
distances : Optional CUDA array interface compliant matrix shape
(n_queries, k) If supplied, the distances to the
neighbors will be written here in-place. (default None)
memory_resource : RMM DeviceMemoryResource object, optional
This can be used to explicitly manage the temporary memory
allocation during search. Passing a pooling allocator can reduce
memory allocation overhead. If not specified, then the memory
resource from the raft handle is used.
{handle_docstring}
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import DeviceResources
>>> from pylibraft.neighbors import ivf_pq
>>> n_samples = 50000
>>> n_features = 50
>>> n_queries = 1000
>>> dataset = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> # Build index
>>> handle = DeviceResources()
>>> index = ivf_pq.build(ivf_pq.IndexParams(), dataset, handle=handle)
>>> # Search using the built index
>>> queries = cp.random.random_sample((n_queries, n_features),
... dtype=cp.float32)
>>> k = 10
>>> search_params = ivf_pq.SearchParams(
... n_probes=20,
... lut_dtype=cp.float16,
... internal_distance_dtype=cp.float32
... )
>>> # Using a pooling allocator reduces overhead of temporary array
>>> # creation during search. This is useful if multiple searches
>>> # are performad with same query size.
>>> import rmm
>>> mr = rmm.mr.PoolMemoryResource(
... rmm.mr.CudaMemoryResource(),
... initial_pool_size=2**29,
... maximum_pool_size=2**31
... )
>>> distances, neighbors = ivf_pq.search(search_params, index, queries,
... k, memory_resource=mr,
... handle=handle)
>>> # pylibraft functions are often asynchronous so the
>>> # handle needs to be explicitly synchronized
>>> handle.sync()
>>> neighbors = cp.asarray(neighbors)
>>> distances = cp.asarray(distances)
"""
if not index.trained:
raise ValueError("Index need to be built before calling search.")
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
queries_cai = cai_wrapper(queries)
queries_dt = queries_cai.dtype
cdef uint32_t n_queries = queries_cai.shape[0]
_check_input_array(queries_cai, [np.dtype('float32'), np.dtype('byte'),
np.dtype('ubyte')],
exp_cols=index.dim)
if neighbors is None:
neighbors = device_ndarray.empty((n_queries, k), dtype='int64')
neighbors_cai = cai_wrapper(neighbors)
_check_input_array(neighbors_cai, [np.dtype('int64')],
exp_rows=n_queries, exp_cols=k)
if distances is None:
distances = device_ndarray.empty((n_queries, k), dtype='float32')
distances_cai = cai_wrapper(distances)
_check_input_array(distances_cai, [np.dtype('float32')],
exp_rows=n_queries, exp_cols=k)
cdef c_ivf_pq.search_params params = search_params.params
cdef uintptr_t neighbors_ptr = neighbors_cai.data
cdef uintptr_t distances_ptr = distances_cai.data
# TODO(tfeher) pass mr_ptr arg
cdef device_memory_resource* mr_ptr = <device_memory_resource*> nullptr
if memory_resource is not None:
mr_ptr = memory_resource.get_mr()
if queries_dt == np.float32:
with cuda_interruptible():
c_ivf_pq.search(deref(handle_),
params,
deref(index.index),
get_dmv_float(queries_cai, check_shape=True),
get_dmv_int64(neighbors_cai, check_shape=True),
get_dmv_float(distances_cai, check_shape=True))
elif queries_dt == np.byte:
with cuda_interruptible():
c_ivf_pq.search(deref(handle_),
params,
deref(index.index),
get_dmv_int8(queries_cai, check_shape=True),
get_dmv_int64(neighbors_cai, check_shape=True),
get_dmv_float(distances_cai, check_shape=True))
elif queries_dt == np.ubyte:
with cuda_interruptible():
c_ivf_pq.search(deref(handle_),
params,
deref(index.index),
get_dmv_uint8(queries_cai, check_shape=True),
get_dmv_int64(neighbors_cai, check_shape=True),
get_dmv_float(distances_cai, check_shape=True))
else:
raise ValueError("query dtype %s not supported" % queries_dt)
return (distances, neighbors)
@auto_sync_handle
def save(filename, Index index, handle=None):
"""
Saves the index to a file.
Saving / loading the index is experimental. The serialization format is
subject to change.
Parameters
----------
filename : string
Name of the file.
index : Index
Trained IVF-PQ index.
{handle_docstring}
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import DeviceResources
>>> from pylibraft.neighbors import ivf_pq
>>> n_samples = 50000
>>> n_features = 50
>>> dataset = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> # Build index
>>> handle = DeviceResources()
>>> index = ivf_pq.build(ivf_pq.IndexParams(), dataset, handle=handle)
>>> ivf_pq.save("my_index.bin", index, handle=handle)
"""
if not index.trained:
raise ValueError("Index need to be built before saving it.")
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
cdef string c_filename = filename.encode('utf-8')
c_ivf_pq.serialize(deref(handle_), c_filename, deref(index.index))
@auto_sync_handle
def load(filename, handle=None):
"""
Loads index from a file.
Saving / loading the index is experimental. The serialization format is
subject to change, therefore loading an index saved with a previous
version of raft is not guaranteed to work.
Parameters
----------
filename : string
Name of the file.
{handle_docstring}
Returns
-------
index : Index
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import DeviceResources
>>> from pylibraft.neighbors import ivf_pq
>>> n_samples = 50000
>>> n_features = 50
>>> dataset = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> # Build and save index
>>> handle = DeviceResources()
>>> index = ivf_pq.build(ivf_pq.IndexParams(), dataset, handle=handle)
>>> ivf_pq.save("my_index.bin", index, handle=handle)
>>> del index
>>> n_queries = 100
>>> queries = cp.random.random_sample((n_queries, n_features),
... dtype=cp.float32)
>>> handle = DeviceResources()
>>> index = ivf_pq.load("my_index.bin", handle=handle)
>>> distances, neighbors = ivf_pq.search(ivf_pq.SearchParams(), index,
... queries, k=10, handle=handle)
"""
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
cdef string c_filename = filename.encode('utf-8')
index = Index()
c_ivf_pq.deserialize(deref(handle_), c_filename, index.index)
index.trained = True
return index
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/ivf_pq/ivf_pq.pxd | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
cimport pylibraft.neighbors.ivf_pq.cpp.c_ivf_pq as c_ivf_pq
cdef class IndexParams:
cdef c_ivf_pq.index_params params
cdef class SearchParams:
cdef c_ivf_pq.search_params params
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/ivf_pq/__init__.py | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .ivf_pq import (
Index,
IndexParams,
SearchParams,
build,
extend,
load,
save,
search,
)
__all__ = [
"Index",
"IndexParams",
"SearchParams",
"build",
"extend",
"load",
"save",
"search",
]
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/ivf_pq | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/ivf_pq/cpp/c_ivf_pq.pxd | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import numpy as np
import pylibraft.common.handle
from cython.operator cimport dereference as deref
from libc.stdint cimport int8_t, int64_t, uint8_t, uint32_t, uintptr_t
from libcpp cimport bool, nullptr
from libcpp.string cimport string
from rmm._lib.memory_resource cimport device_memory_resource
from pylibraft.common.cpp.mdspan cimport (
device_matrix_view,
device_vector_view,
row_major,
)
from pylibraft.common.handle cimport device_resources
from pylibraft.common.optional cimport optional
from pylibraft.distance.distance_type cimport DistanceType
cdef extern from "library_types.h":
ctypedef enum cudaDataType_t:
CUDA_R_32F "CUDA_R_32F" # float
CUDA_R_16F "CUDA_R_16F" # half
# uint8 - used to refer to IVF-PQ's fp8 storage type
CUDA_R_8U "CUDA_R_8U"
cdef extern from "raft/neighbors/ann_types.hpp" \
namespace "raft::neighbors::ann" nogil:
cdef cppclass ann_index "raft::neighbors::index":
pass
cdef cppclass ann_index_params "raft::spatial::knn::index_params":
DistanceType metric
float metric_arg
bool add_data_on_build
cdef cppclass ann_search_params "raft::spatial::knn::search_params":
pass
cdef extern from "raft/neighbors/ivf_pq_types.hpp" \
namespace "raft::neighbors::ivf_pq" nogil:
ctypedef enum codebook_gen:
PER_SUBSPACE "raft::neighbors::ivf_pq::codebook_gen::PER_SUBSPACE",
PER_CLUSTER "raft::neighbors::ivf_pq::codebook_gen::PER_CLUSTER"
cpdef cppclass index_params(ann_index_params):
uint32_t n_lists
uint32_t kmeans_n_iters
double kmeans_trainset_fraction
uint32_t pq_bits
uint32_t pq_dim
codebook_gen codebook_kind
bool force_random_rotation
bool conservative_memory_allocation
cdef cppclass index[IdxT](ann_index):
index(const device_resources& handle,
DistanceType metric,
codebook_gen codebook_kind,
uint32_t n_lists,
uint32_t dim,
uint32_t pq_bits,
uint32_t pq_dim,
bool conservative_memory_allocation)
IdxT size()
uint32_t dim()
uint32_t pq_dim()
uint32_t pq_len()
uint32_t pq_bits()
DistanceType metric()
uint32_t n_lists()
uint32_t rot_dim()
codebook_gen codebook_kind()
bool conservative_memory_allocation()
cpdef cppclass search_params(ann_search_params):
uint32_t n_probes
cudaDataType_t lut_dtype
cudaDataType_t internal_distance_dtype
cdef extern from "raft_runtime/neighbors/ivf_pq.hpp" \
namespace "raft::runtime::neighbors::ivf_pq" nogil:
cdef void build(
const device_resources& handle,
const index_params& params,
device_matrix_view[float, int64_t, row_major] dataset,
index[int64_t]* index) except +
cdef void build(
const device_resources& handle,
const index_params& params,
device_matrix_view[int8_t, int64_t, row_major] dataset,
index[int64_t]* index) except +
cdef void build(
const device_resources& handle,
const index_params& params,
device_matrix_view[uint8_t, int64_t, row_major] dataset,
index[int64_t]* index) except +
cdef void extend(
const device_resources& handle,
device_matrix_view[float, int64_t, row_major] new_vectors,
optional[device_vector_view[int64_t, int64_t]] new_indices,
index[int64_t]* index) except +
cdef void extend(
const device_resources& handle,
device_matrix_view[int8_t, int64_t, row_major] new_vectors,
optional[device_vector_view[int64_t, int64_t]] new_indices,
index[int64_t]* index) except +
cdef void extend(
const device_resources& handle,
device_matrix_view[uint8_t, int64_t, row_major] new_vectors,
optional[device_vector_view[int64_t, int64_t]] new_indices,
index[int64_t]* index) except +
cdef void search(
const device_resources& handle,
const search_params& params,
const index[int64_t]& index,
device_matrix_view[float, int64_t, row_major] queries,
device_matrix_view[int64_t, int64_t, row_major] neighbors,
device_matrix_view[float, int64_t, row_major] distances) except +
cdef void search(
const device_resources& handle,
const search_params& params,
const index[int64_t]& index,
device_matrix_view[int8_t, int64_t, row_major] queries,
device_matrix_view[int64_t, int64_t, row_major] neighbors,
device_matrix_view[float, int64_t, row_major] distances) except +
cdef void search(
const device_resources& handle,
const search_params& params,
const index[int64_t]& index,
device_matrix_view[uint8_t, int64_t, row_major] queries,
device_matrix_view[int64_t, int64_t, row_major] neighbors,
device_matrix_view[float, int64_t, row_major] distances) except +
cdef void serialize(const device_resources& handle,
const string& filename,
const index[int64_t]& index) except +
cdef void deserialize(const device_resources& handle,
const string& filename,
index[int64_t]* index) except +
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/ivf_pq | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/ivf_pq/cpp/__init__.py | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/cagra/CMakeLists.txt | # =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Set the list of Cython files to build
set(cython_sources cagra.pyx)
set(linked_libraries raft::raft raft::compiled)
# Build all of the Cython targets
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}" ASSOCIATED_TARGETS raft MODULE_PREFIX neighbors_cagra_
)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/cagra/cagra.pyx | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import warnings
import numpy as np
from cython.operator cimport dereference as deref
from libc.stdint cimport (
int8_t,
int32_t,
int64_t,
uint8_t,
uint32_t,
uint64_t,
uintptr_t,
)
from libcpp cimport bool, nullptr
from libcpp.string cimport string
from pylibraft.distance.distance_type cimport DistanceType
from pylibraft.common import (
DeviceResources,
ai_wrapper,
auto_convert_output,
cai_wrapper,
device_ndarray,
)
from pylibraft.common.cai_wrapper import wrap_array
from pylibraft.common.interruptible import cuda_interruptible
from pylibraft.common.handle cimport device_resources
from pylibraft.common.handle import auto_sync_handle
from pylibraft.common.input_validation import is_c_contiguous
from rmm._lib.memory_resource cimport (
DeviceMemoryResource,
device_memory_resource,
)
cimport pylibraft.neighbors.cagra.cpp.c_cagra as c_cagra
from pylibraft.common.optional cimport make_optional, optional
from pylibraft.neighbors.common import _check_input_array, _get_metric
from pylibraft.common.cpp.mdspan cimport (
device_matrix_view,
device_vector_view,
make_device_vector_view,
row_major,
)
from pylibraft.common.mdspan cimport (
get_const_dmv_float,
get_const_dmv_int8,
get_const_dmv_uint8,
get_const_hmv_float,
get_const_hmv_int8,
get_const_hmv_uint8,
get_dmv_float,
get_dmv_int8,
get_dmv_int64,
get_dmv_uint8,
get_dmv_uint32,
get_hmv_float,
get_hmv_int8,
get_hmv_int64,
get_hmv_uint8,
get_hmv_uint32,
make_optional_view_int64,
)
from pylibraft.neighbors.common cimport _get_metric_string
cdef class IndexParams:
""""
Parameters to build index for CAGRA nearest neighbor search
Parameters
----------
metric : string denoting the metric type, default="sqeuclidean"
Valid values for metric: ["sqeuclidean"], where
- sqeuclidean is the euclidean distance without the square root
operation, i.e.: distance(a,b) = \\sum_i (a_i - b_i)^2
intermediate_graph_degree : int, default = 128
graph_degree : int, default = 64
build_algo: string denoting the graph building algorithm to use,
default = "ivf_pq"
Valid values for algo: ["ivf_pq", "nn_descent"], where
- ivf_pq will use the IVF-PQ algorithm for building the knn graph
- nn_descent (experimental) will use the NN-Descent algorithm for
building the knn graph. It is expected to be generally
faster than ivf_pq.
"""
cdef c_cagra.index_params params
def __init__(self, *,
metric="sqeuclidean",
intermediate_graph_degree=128,
graph_degree=64,
build_algo="ivf_pq"):
self.params.metric = _get_metric(metric)
self.params.metric_arg = 0
self.params.intermediate_graph_degree = intermediate_graph_degree
self.params.graph_degree = graph_degree
if build_algo == "ivf_pq":
self.params.build_algo = c_cagra.graph_build_algo.IVF_PQ
elif build_algo == "nn_descent":
self.params.build_algo = c_cagra.graph_build_algo.NN_DESCENT
@property
def metric(self):
return self.params.metric
@property
def intermediate_graph_degree(self):
return self.params.intermediate_graph_degree
@property
def graph_degree(self):
return self.params.graph_degree
cdef class Index:
cdef readonly bool trained
cdef str active_index_type
def __cinit__(self):
self.trained = False
self.active_index_type = None
cdef class IndexFloat(Index):
cdef c_cagra.index[float, uint32_t] * index
def __cinit__(self, handle=None):
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
self.index = new c_cagra.index[float, uint32_t](
deref(handle_))
def __repr__(self):
m_str = "metric=" + _get_metric_string(self.index.metric())
attr_str = [attr + "=" + str(getattr(self, attr))
for attr in ["metric", "dim", "graph_degree"]]
attr_str = [m_str] + attr_str
return "Index(type=CAGRA, " + (", ".join(attr_str)) + ")"
@auto_sync_handle
def update_dataset(self, dataset, handle=None):
""" Replace the dataset with a new dataset.
Parameters
----------
dataset : array interface compliant matrix shape (n_samples, dim)
{handle_docstring}
"""
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
dataset_ai = wrap_array(dataset)
dataset_dt = dataset_ai.dtype
_check_input_array(dataset_ai, [np.dtype("float32")])
if dataset_ai.from_cai:
self.index[0].update_dataset(deref(handle_),
get_const_dmv_float(dataset_ai,
check_shape=True))
else:
self.index[0].update_dataset(deref(handle_),
get_const_hmv_float(dataset_ai,
check_shape=True))
@property
def metric(self):
return self.index[0].metric()
@property
def size(self):
return self.index[0].size()
@property
def dim(self):
return self.index[0].dim()
@property
def graph_degree(self):
return self.index[0].graph_degree()
def __dealloc__(self):
if self.index is not NULL:
del self.index
cdef class IndexInt8(Index):
cdef c_cagra.index[int8_t, uint32_t] * index
def __cinit__(self, handle=None):
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
self.index = new c_cagra.index[int8_t, uint32_t](
deref(handle_))
@auto_sync_handle
def update_dataset(self, dataset, handle=None):
""" Replace the dataset with a new dataset.
Parameters
----------
dataset : array interface compliant matrix shape (n_samples, dim)
{handle_docstring}
"""
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
dataset_ai = wrap_array(dataset)
dataset_dt = dataset_ai.dtype
_check_input_array(dataset_ai, [np.dtype("byte")])
if dataset_ai.from_cai:
self.index[0].update_dataset(deref(handle_),
get_const_dmv_int8(dataset_ai,
check_shape=True))
else:
self.index[0].update_dataset(deref(handle_),
get_const_hmv_int8(dataset_ai,
check_shape=True))
def __repr__(self):
m_str = "metric=" + _get_metric_string(self.index.metric())
attr_str = [attr + "=" + str(getattr(self, attr))
for attr in ["metric", "dim", "graph_degree"]]
attr_str = [m_str] + attr_str
return "Index(type=CAGRA, " + (", ".join(attr_str)) + ")"
@property
def metric(self):
return self.index[0].metric()
@property
def size(self):
return self.index[0].size()
@property
def dim(self):
return self.index[0].dim()
@property
def graph_degree(self):
return self.index[0].graph_degree()
def __dealloc__(self):
if self.index is not NULL:
del self.index
cdef class IndexUint8(Index):
cdef c_cagra.index[uint8_t, uint32_t] * index
def __cinit__(self, handle=None):
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
self.index = new c_cagra.index[uint8_t, uint32_t](
deref(handle_))
@auto_sync_handle
def update_dataset(self, dataset, handle=None):
""" Replace the dataset with a new dataset.
Parameters
----------
dataset : array interface compliant matrix shape (n_samples, dim)
{handle_docstring}
"""
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
dataset_ai = wrap_array(dataset)
dataset_dt = dataset_ai.dtype
_check_input_array(dataset_ai, [np.dtype("ubyte")])
if dataset_ai.from_cai:
self.index[0].update_dataset(deref(handle_),
get_const_dmv_uint8(dataset_ai,
check_shape=True))
else:
self.index[0].update_dataset(deref(handle_),
get_const_hmv_uint8(dataset_ai,
check_shape=True))
def __repr__(self):
m_str = "metric=" + _get_metric_string(self.index.metric())
attr_str = [attr + "=" + str(getattr(self, attr))
for attr in ["metric", "dim", "graph_degree"]]
attr_str = [m_str] + attr_str
return "Index(type=CAGRA, " + (", ".join(attr_str)) + ")"
@property
def metric(self):
return self.index[0].metric()
@property
def size(self):
return self.index[0].size()
@property
def dim(self):
return self.index[0].dim()
@property
def graph_degree(self):
return self.index[0].graph_degree()
def __dealloc__(self):
if self.index is not NULL:
del self.index
@auto_sync_handle
@auto_convert_output
def build(IndexParams index_params, dataset, handle=None):
"""
Build the CAGRA index from the dataset for efficient search.
The build performs two different steps- first an intermediate knn-graph is
constructed, then it's optimized it to create the final graph. The
index_params object controls the node degree of these graphs.
It is required that both the dataset and the optimized graph fit the
GPU memory.
The following distance metrics are supported:
- L2
Parameters
----------
index_params : IndexParams object
dataset : CUDA array interface compliant matrix shape (n_samples, dim)
Supported dtype [float, int8, uint8]
{handle_docstring}
Returns
-------
index: cagra.Index
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import DeviceResources
>>> from pylibraft.neighbors import cagra
>>> n_samples = 50000
>>> n_features = 50
>>> n_queries = 1000
>>> k = 10
>>> dataset = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> handle = DeviceResources()
>>> build_params = cagra.IndexParams(metric="sqeuclidean")
>>> index = cagra.build(build_params, dataset, handle=handle)
>>> distances, neighbors = cagra.search(cagra.SearchParams(),
... index, dataset,
... k, handle=handle)
>>> # pylibraft functions are often asynchronous so the
>>> # handle needs to be explicitly synchronized
>>> handle.sync()
>>> distances = cp.asarray(distances)
>>> neighbors = cp.asarray(neighbors)
"""
dataset_ai = wrap_array(dataset)
dataset_dt = dataset_ai.dtype
_check_input_array(dataset_ai, [np.dtype('float32'), np.dtype('byte'),
np.dtype('ubyte')])
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
cdef IndexFloat idx_float
cdef IndexInt8 idx_int8
cdef IndexUint8 idx_uint8
if dataset_ai.from_cai:
if dataset_dt == np.float32:
idx_float = IndexFloat(handle)
idx_float.active_index_type = "float32"
with cuda_interruptible():
c_cagra.build_device(
deref(handle_),
index_params.params,
get_dmv_float(dataset_ai, check_shape=True),
deref(idx_float.index))
idx_float.trained = True
return idx_float
elif dataset_dt == np.byte:
idx_int8 = IndexInt8(handle)
idx_int8.active_index_type = "byte"
with cuda_interruptible():
c_cagra.build_device(
deref(handle_),
index_params.params,
get_dmv_int8(dataset_ai, check_shape=True),
deref(idx_int8.index))
idx_int8.trained = True
return idx_int8
elif dataset_dt == np.ubyte:
idx_uint8 = IndexUint8(handle)
idx_uint8.active_index_type = "ubyte"
with cuda_interruptible():
c_cagra.build_device(
deref(handle_),
index_params.params,
get_dmv_uint8(dataset_ai, check_shape=True),
deref(idx_uint8.index))
idx_uint8.trained = True
return idx_uint8
else:
raise TypeError("dtype %s not supported" % dataset_dt)
else:
if dataset_dt == np.float32:
idx_float = IndexFloat(handle)
idx_float.active_index_type = "float32"
with cuda_interruptible():
c_cagra.build_host(
deref(handle_),
index_params.params,
get_hmv_float(dataset_ai, check_shape=True),
deref(idx_float.index))
idx_float.trained = True
return idx_float
elif dataset_dt == np.byte:
idx_int8 = IndexInt8(handle)
idx_int8.active_index_type = "byte"
with cuda_interruptible():
c_cagra.build_host(
deref(handle_),
index_params.params,
get_hmv_int8(dataset_ai, check_shape=True),
deref(idx_int8.index))
idx_int8.trained = True
return idx_int8
elif dataset_dt == np.ubyte:
idx_uint8 = IndexUint8(handle)
idx_uint8.active_index_type = "ubyte"
with cuda_interruptible():
c_cagra.build_host(
deref(handle_),
index_params.params,
get_hmv_uint8(dataset_ai, check_shape=True),
deref(idx_uint8.index))
idx_uint8.trained = True
return idx_uint8
else:
raise TypeError("dtype %s not supported" % dataset_dt)
cdef class SearchParams:
"""
CAGRA search parameters
Parameters
----------
max_queries: int, default = 0
Maximum number of queries to search at the same time (batch size).
Auto select when 0.
itopk_size: int, default = 64
Number of intermediate search results retained during the search.
This is the main knob to adjust trade off between accuracy and
search speed. Higher values improve the search accuracy.
max_iterations: int, default = 0
Upper limit of search iterations. Auto select when 0.
algo: string denoting the search algorithm to use, default = "auto"
Valid values for algo: ["auto", "single_cta", "multi_cta"], where
- auto will automatically select the best value based on query size
- single_cta is better when query contains larger number of
vectors (e.g >10)
- multi_cta is better when query contains only a few vectors
team_size: int, default = 0
Number of threads used to calculate a single distance. 4, 8, 16,
or 32.
search_width: int, default = 1
Number of graph nodes to select as the starting point for the
search in each iteration.
min_iterations: int, default = 0
Lower limit of search iterations.
thread_block_size: int, default = 0
Thread block size. 0, 64, 128, 256, 512, 1024.
Auto selection when 0.
hashmap_mode: string denoting the type of hash map to use. It's
usually better to allow the algorithm to select this value.,
default = "auto"
Valid values for hashmap_mode: ["auto", "small", "hash"], where
- auto will automatically select the best value based on algo
- small will use the small shared memory hash table with resetting.
- hash will use a single hash table in global memory.
hashmap_min_bitlen: int, default = 0
Upper limit of hashmap fill rate. More than 0.1, less than 0.9.
hashmap_max_fill_rate: float, default = 0.5
Upper limit of hashmap fill rate. More than 0.1, less than 0.9.
num_random_samplings: int, default = 1
Number of iterations of initial random seed node selection. 1 or
more.
rand_xor_mask: int, default = 0x128394
Bit mask used for initial random seed node selection.
"""
cdef c_cagra.search_params params
def __init__(self, *,
max_queries=0,
itopk_size=64,
max_iterations=0,
algo="auto",
team_size=0,
search_width=1,
min_iterations=0,
thread_block_size=0,
hashmap_mode="auto",
hashmap_min_bitlen=0,
hashmap_max_fill_rate=0.5,
num_random_samplings=1,
rand_xor_mask=0x128394):
self.params.max_queries = max_queries
self.params.itopk_size = itopk_size
self.params.max_iterations = max_iterations
if algo == "single_cta":
self.params.algo = c_cagra.search_algo.SINGLE_CTA
elif algo == "multi_cta":
self.params.algo = c_cagra.search_algo.MULTI_CTA
elif algo == "multi_kernel":
self.params.algo = c_cagra.search_algo.MULTI_KERNEL
elif algo == "auto":
self.params.algo = c_cagra.search_algo.AUTO
else:
raise ValueError("`algo` value not supported.")
self.params.team_size = team_size
self.params.search_width = search_width
self.params.min_iterations = min_iterations
self.params.thread_block_size = thread_block_size
if hashmap_mode == "hash":
self.params.hashmap_mode = c_cagra.hash_mode.HASH
elif hashmap_mode == "small":
self.params.hashmap_mode = c_cagra.hash_mode.SMALL
elif hashmap_mode == "auto":
self.params.hashmap_mode = c_cagra.hash_mode.AUTO
else:
raise ValueError("`hashmap_mode` value not supported.")
self.params.hashmap_min_bitlen = hashmap_min_bitlen
self.params.hashmap_max_fill_rate = hashmap_max_fill_rate
self.params.num_random_samplings = num_random_samplings
self.params.rand_xor_mask = rand_xor_mask
def __repr__(self):
attr_str = [attr + "=" + str(getattr(self, attr))
for attr in [
"max_queries", "itopk_size", "max_iterations", "algo",
"team_size", "search_width", "min_iterations",
"thread_block_size", "hashmap_mode",
"hashmap_min_bitlen", "hashmap_max_fill_rate",
"num_random_samplings", "rand_xor_mask"]]
return "SearchParams(type=CAGRA, " + (", ".join(attr_str)) + ")"
@property
def max_queries(self):
return self.params.max_queries
@property
def itopk_size(self):
return self.params.itopk_size
@property
def max_iterations(self):
return self.params.max_iterations
@property
def algo(self):
return self.params.algo
@property
def team_size(self):
return self.params.team_size
@property
def search_width(self):
return self.params.search_width
@property
def min_iterations(self):
return self.params.min_iterations
@property
def thread_block_size(self):
return self.params.thread_block_size
@property
def hashmap_mode(self):
return self.params.hashmap_mode
@property
def hashmap_min_bitlen(self):
return self.params.hashmap_min_bitlen
@property
def hashmap_max_fill_rate(self):
return self.params.hashmap_max_fill_rate
@property
def num_random_samplings(self):
return self.params.num_random_samplings
@property
def rand_xor_mask(self):
return self.params.rand_xor_mask
@auto_sync_handle
@auto_convert_output
def search(SearchParams search_params,
Index index,
queries,
k,
neighbors=None,
distances=None,
handle=None):
"""
Find the k nearest neighbors for each query.
Parameters
----------
search_params : SearchParams
index : Index
Trained CAGRA index.
queries : CUDA array interface compliant matrix shape (n_samples, dim)
Supported dtype [float, int8, uint8]
k : int
The number of neighbors.
neighbors : Optional CUDA array interface compliant matrix shape
(n_queries, k), dtype int64_t. If supplied, neighbor
indices will be written here in-place. (default None)
distances : Optional CUDA array interface compliant matrix shape
(n_queries, k) If supplied, the distances to the
neighbors will be written here in-place. (default None)
{handle_docstring}
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import DeviceResources
>>> from pylibraft.neighbors import cagra
>>> n_samples = 50000
>>> n_features = 50
>>> n_queries = 1000
>>> dataset = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> # Build index
>>> handle = DeviceResources()
>>> index = cagra.build(cagra.IndexParams(), dataset, handle=handle)
>>> # Search using the built index
>>> queries = cp.random.random_sample((n_queries, n_features),
... dtype=cp.float32)
>>> k = 10
>>> search_params = cagra.SearchParams(
... max_queries=100,
... itopk_size=64
... )
>>> # Using a pooling allocator reduces overhead of temporary array
>>> # creation during search. This is useful if multiple searches
>>> # are performad with same query size.
>>> distances, neighbors = cagra.search(search_params, index, queries,
... k, handle=handle)
>>> # pylibraft functions are often asynchronous so the
>>> # handle needs to be explicitly synchronized
>>> handle.sync()
>>> neighbors = cp.asarray(neighbors)
>>> distances = cp.asarray(distances)
"""
if not index.trained:
raise ValueError("Index need to be built before calling search.")
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
queries_cai = cai_wrapper(queries)
queries_dt = queries_cai.dtype
cdef uint32_t n_queries = queries_cai.shape[0]
_check_input_array(queries_cai, [np.dtype('float32'), np.dtype('byte'),
np.dtype('ubyte')],
exp_cols=index.dim)
if neighbors is None:
neighbors = device_ndarray.empty((n_queries, k), dtype='uint32')
neighbors_cai = cai_wrapper(neighbors)
_check_input_array(neighbors_cai, [np.dtype('uint32')],
exp_rows=n_queries, exp_cols=k)
if distances is None:
distances = device_ndarray.empty((n_queries, k), dtype='float32')
distances_cai = cai_wrapper(distances)
_check_input_array(distances_cai, [np.dtype('float32')],
exp_rows=n_queries, exp_cols=k)
cdef c_cagra.search_params params = search_params.params
cdef IndexFloat idx_float
cdef IndexInt8 idx_int8
cdef IndexUint8 idx_uint8
if queries_dt == np.float32:
idx_float = index
with cuda_interruptible():
c_cagra.search(deref(handle_),
params,
deref(idx_float.index),
get_dmv_float(queries_cai, check_shape=True),
get_dmv_uint32(neighbors_cai, check_shape=True),
get_dmv_float(distances_cai, check_shape=True))
elif queries_dt == np.byte:
idx_int8 = index
with cuda_interruptible():
c_cagra.search(deref(handle_),
params,
deref(idx_int8.index),
get_dmv_int8(queries_cai, check_shape=True),
get_dmv_uint32(neighbors_cai, check_shape=True),
get_dmv_float(distances_cai, check_shape=True))
elif queries_dt == np.ubyte:
idx_uint8 = index
with cuda_interruptible():
c_cagra.search(deref(handle_),
params,
deref(idx_uint8.index),
get_dmv_uint8(queries_cai, check_shape=True),
get_dmv_uint32(neighbors_cai, check_shape=True),
get_dmv_float(distances_cai, check_shape=True))
else:
raise ValueError("query dtype %s not supported" % queries_dt)
return (distances, neighbors)
@auto_sync_handle
def save(filename, Index index, bool include_dataset=True, handle=None):
"""
Saves the index to a file.
Saving / loading the index is experimental. The serialization format is
subject to change.
Parameters
----------
filename : string
Name of the file.
index : Index
Trained CAGRA index.
include_dataset : bool
Whether or not to write out the dataset along with the index. Including
the dataset in the serialized index will use extra disk space, and
might not be desired if you already have a copy of the dataset on
disk. If this option is set to false, you will have to call
`index.update_dataset(dataset)` after loading the index.
{handle_docstring}
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import DeviceResources
>>> from pylibraft.neighbors import cagra
>>> n_samples = 50000
>>> n_features = 50
>>> dataset = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> # Build index
>>> handle = DeviceResources()
>>> index = cagra.build(cagra.IndexParams(), dataset, handle=handle)
>>> # Serialize and deserialize the cagra index built
>>> cagra.save("my_index.bin", index, handle=handle)
>>> index_loaded = cagra.load("my_index.bin", handle=handle)
"""
if not index.trained:
raise ValueError("Index need to be built before saving it.")
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
cdef string c_filename = filename.encode('utf-8')
cdef IndexFloat idx_float
cdef IndexInt8 idx_int8
cdef IndexUint8 idx_uint8
if index.active_index_type == "float32":
idx_float = index
c_cagra.serialize_file(
deref(handle_), c_filename, deref(idx_float.index),
include_dataset)
elif index.active_index_type == "byte":
idx_int8 = index
c_cagra.serialize_file(
deref(handle_), c_filename, deref(idx_int8.index), include_dataset)
elif index.active_index_type == "ubyte":
idx_uint8 = index
c_cagra.serialize_file(
deref(handle_), c_filename, deref(idx_uint8.index),
include_dataset)
else:
raise ValueError(
"Index dtype %s not supported" % index.active_index_type)
@auto_sync_handle
def load(filename, handle=None):
"""
Loads index from file.
Saving / loading the index is experimental. The serialization format is
subject to change, therefore loading an index saved with a previous
version of raft is not guaranteed to work.
Parameters
----------
filename : string
Name of the file.
{handle_docstring}
Returns
-------
index : Index
"""
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
cdef string c_filename = filename.encode('utf-8')
cdef IndexFloat idx_float
cdef IndexInt8 idx_int8
cdef IndexUint8 idx_uint8
with open(filename, "rb") as f:
type_str = f.read(3).decode("utf8")
dataset_dt = np.dtype(type_str)
if dataset_dt == np.float32:
idx_float = IndexFloat(handle)
c_cagra.deserialize_file(
deref(handle_), c_filename, idx_float.index)
idx_float.trained = True
idx_float.active_index_type = 'float32'
return idx_float
elif dataset_dt == np.byte:
idx_int8 = IndexInt8(handle)
c_cagra.deserialize_file(
deref(handle_), c_filename, idx_int8.index)
idx_int8.trained = True
idx_int8.active_index_type = 'byte'
return idx_int8
elif dataset_dt == np.ubyte:
idx_uint8 = IndexUint8(handle)
c_cagra.deserialize_file(
deref(handle_), c_filename, idx_uint8.index)
idx_uint8.trained = True
idx_uint8.active_index_type = 'ubyte'
return idx_uint8
else:
raise ValueError("Dataset dtype %s not supported" % dataset_dt)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/cagra/__init__.py | # Copyright (c) 2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .cagra import Index, IndexParams, SearchParams, build, load, save, search
__all__ = [
"Index",
"IndexParams",
"SearchParams",
"build",
"load",
"save",
"search",
]
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/cagra | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/cagra/cpp/__init__.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/cagra | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/cagra/cpp/c_cagra.pxd | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import numpy as np
import pylibraft.common.handle
from cython.operator cimport dereference as deref
from libc.stdint cimport int8_t, int64_t, uint8_t, uint32_t, uint64_t
from libcpp cimport bool, nullptr
from libcpp.string cimport string
from rmm._lib.memory_resource cimport device_memory_resource
from pylibraft.common.cpp.mdspan cimport (
device_matrix_view,
device_vector_view,
host_matrix_view,
row_major,
)
from pylibraft.common.handle cimport device_resources
from pylibraft.common.mdspan cimport const_float, const_int8_t, const_uint8_t
from pylibraft.common.optional cimport optional
from pylibraft.distance.distance_type cimport DistanceType
from pylibraft.neighbors.ivf_pq.cpp.c_ivf_pq cimport (
ann_index,
ann_index_params,
ann_search_params,
index_params as ivfpq_ip,
search_params as ivfpq_sp,
)
cdef extern from "raft/neighbors/cagra_types.hpp" \
namespace "raft::neighbors::cagra" nogil:
ctypedef enum graph_build_algo:
IVF_PQ "raft::neighbors::cagra::graph_build_algo::IVF_PQ",
NN_DESCENT "raft::neighbors::cagra::graph_build_algo::NN_DESCENT"
cpdef cppclass index_params(ann_index_params):
size_t intermediate_graph_degree
size_t graph_degree
graph_build_algo build_algo
ctypedef enum search_algo:
SINGLE_CTA "raft::neighbors::cagra::search_algo::SINGLE_CTA",
MULTI_CTA "raft::neighbors::cagra::search_algo::MULTI_CTA",
MULTI_KERNEL "raft::neighbors::cagra::search_algo::MULTI_KERNEL",
AUTO "raft::neighbors::cagra::search_algo::AUTO"
ctypedef enum hash_mode:
HASH "raft::neighbors::cagra::hash_mode::HASH",
SMALL "raft::neighbors::cagra::hash_mode::SMALL",
AUTO "raft::neighbors::cagra::hash_mode::AUTO"
cpdef cppclass search_params(ann_search_params):
size_t max_queries
size_t itopk_size
size_t max_iterations
search_algo algo
size_t team_size
size_t search_width
size_t min_iterations
size_t thread_block_size
hash_mode hashmap_mode
size_t hashmap_min_bitlen
float hashmap_max_fill_rate
uint32_t num_random_samplings
uint64_t rand_xor_mask
cdef cppclass index[T, IdxT](ann_index):
index(const device_resources&)
DistanceType metric()
IdxT size()
uint32_t dim()
uint32_t graph_degree()
device_matrix_view[T, IdxT, row_major] dataset()
device_matrix_view[T, IdxT, row_major] graph()
# hack: can't use the T template param here because of issues handling
# const w/ cython. introduce a new template param to get around this
void update_dataset[ValueT](const device_resources & handle,
host_matrix_view[ValueT,
int64_t,
row_major] dataset)
void update_dataset[ValueT](const device_resources & handle,
device_matrix_view[ValueT,
int64_t,
row_major] dataset)
cdef extern from "raft_runtime/neighbors/cagra.hpp" \
namespace "raft::runtime::neighbors::cagra" nogil:
cdef void build_device(
const device_resources& handle,
const index_params& params,
device_matrix_view[float, int64_t, row_major] dataset,
index[float, uint32_t]& index) except +
cdef void build_device(
const device_resources& handle,
const index_params& params,
device_matrix_view[int8_t, int64_t, row_major] dataset,
index[int8_t, uint32_t]& index) except +
cdef void build_device(
const device_resources& handle,
const index_params& params,
device_matrix_view[uint8_t, int64_t, row_major] dataset,
index[uint8_t, uint32_t]& index) except +
cdef void build_host(
const device_resources& handle,
const index_params& params,
host_matrix_view[float, int64_t, row_major] dataset,
index[float, uint32_t]& index) except +
cdef void build_host(
const device_resources& handle,
const index_params& params,
host_matrix_view[int8_t, int64_t, row_major] dataset,
index[int8_t, uint32_t]& index) except +
cdef void build_host(
const device_resources& handle,
const index_params& params,
host_matrix_view[uint8_t, int64_t, row_major] dataset,
index[uint8_t, uint32_t]& index) except +
cdef void search(
const device_resources& handle,
const search_params& params,
const index[float, uint32_t]& index,
device_matrix_view[float, int64_t, row_major] queries,
device_matrix_view[uint32_t, int64_t, row_major] neighbors,
device_matrix_view[float, int64_t, row_major] distances) except +
cdef void search(
const device_resources& handle,
const search_params& params,
const index[int8_t, uint32_t]& index,
device_matrix_view[int8_t, int64_t, row_major] queries,
device_matrix_view[uint32_t, int64_t, row_major] neighbors,
device_matrix_view[float, int64_t, row_major] distances) except +
cdef void search(
const device_resources& handle,
const search_params& params,
const index[uint8_t, uint32_t]& index,
device_matrix_view[uint8_t, int64_t, row_major] queries,
device_matrix_view[uint32_t, int64_t, row_major] neighbors,
device_matrix_view[float, int64_t, row_major] distances) except +
cdef void serialize(const device_resources& handle,
string& str,
const index[float, uint32_t]& index,
bool include_dataset) except +
cdef void deserialize(const device_resources& handle,
const string& str,
index[float, uint32_t]* index) except +
cdef void serialize(const device_resources& handle,
string& str,
const index[uint8_t, uint32_t]& index,
bool include_dataset) except +
cdef void deserialize(const device_resources& handle,
const string& str,
index[uint8_t, uint32_t]* index) except +
cdef void serialize(const device_resources& handle,
string& str,
const index[int8_t, uint32_t]& index,
bool include_dataset) except +
cdef void deserialize(const device_resources& handle,
const string& str,
index[int8_t, uint32_t]* index) except +
cdef void serialize_file(const device_resources& handle,
const string& filename,
const index[float, uint32_t]& index,
bool include_dataset) except +
cdef void deserialize_file(const device_resources& handle,
const string& filename,
index[float, uint32_t]* index) except +
cdef void serialize_file(const device_resources& handle,
const string& filename,
const index[uint8_t, uint32_t]& index,
bool include_dataset) except +
cdef void deserialize_file(const device_resources& handle,
const string& filename,
index[uint8_t, uint32_t]* index) except +
cdef void serialize_file(const device_resources& handle,
const string& filename,
const index[int8_t, uint32_t]& index,
bool include_dataset) except +
cdef void deserialize_file(const device_resources& handle,
const string& filename,
index[int8_t, uint32_t]* index) except +
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/cpp/brute_force.pxd | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import numpy as np
import pylibraft.common.handle
from cython.operator cimport dereference as deref
from libc.stdint cimport int8_t, int64_t, uint8_t, uint64_t, uintptr_t
from libcpp cimport bool, nullptr
from libcpp.string cimport string
from libcpp.vector cimport vector
from rmm._lib.memory_resource cimport device_memory_resource
from pylibraft.common.cpp.mdspan cimport (
device_matrix_view,
host_matrix_view,
make_device_matrix_view,
make_host_matrix_view,
row_major,
)
from pylibraft.common.cpp.optional cimport optional
from pylibraft.common.handle cimport device_resources
from pylibraft.distance.distance_type cimport DistanceType
cdef extern from "raft_runtime/neighbors/brute_force.hpp" \
namespace "raft::runtime::neighbors::brute_force" nogil:
cdef void knn(const device_resources & handle,
device_matrix_view[float, int64_t, row_major] index,
device_matrix_view[float, int64_t, row_major] search,
device_matrix_view[int64_t, int64_t, row_major] indices,
device_matrix_view[float, int64_t, row_major] distances,
DistanceType metric,
optional[float] metric_arg,
optional[int64_t] global_id_offset) except +
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/cpp/__init__.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/ivf_flat/CMakeLists.txt | # =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Set the list of Cython files to build
set(cython_sources ivf_flat.pyx)
set(linked_libraries raft::raft raft::compiled)
# Build all of the Cython targets
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}" ASSOCIATED_TARGETS raft MODULE_PREFIX neighbors_ivfflat_
)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/ivf_flat/__init__.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .ivf_flat import (
Index,
IndexParams,
SearchParams,
build,
extend,
load,
save,
search,
)
__all__ = [
"Index",
"IndexParams",
"SearchParams",
"build",
"extend",
"search",
"save",
"load",
]
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/ivf_flat/ivf_flat.pyx | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import warnings
import numpy as np
from cython.operator cimport dereference as deref
from libc.stdint cimport int8_t, int64_t, uint8_t, uint32_t, uintptr_t
from libcpp cimport bool, nullptr
from libcpp.string cimport string
from pylibraft.distance.distance_type cimport DistanceType
from pylibraft.common import (
DeviceResources,
ai_wrapper,
auto_convert_output,
device_ndarray,
)
from pylibraft.common.cai_wrapper import cai_wrapper
from pylibraft.common.cpp.mdspan cimport (
device_matrix_view,
device_vector_view,
make_device_vector_view,
row_major,
)
from pylibraft.common.interruptible import cuda_interruptible
from pylibraft.common.handle cimport device_resources
from pylibraft.common.handle import auto_sync_handle
from pylibraft.common.input_validation import is_c_contiguous
from rmm._lib.memory_resource cimport (
DeviceMemoryResource,
device_memory_resource,
)
cimport pylibraft.neighbors.ivf_flat.cpp.c_ivf_flat as c_ivf_flat
from pylibraft.common.cpp.optional cimport optional
from pylibraft.neighbors.common import _check_input_array, _get_metric
from pylibraft.common.mdspan cimport (
get_dmv_float,
get_dmv_int8,
get_dmv_int64,
get_dmv_uint8,
)
from pylibraft.neighbors.common cimport _get_metric_string
from pylibraft.neighbors.ivf_flat.cpp.c_ivf_flat cimport (
index_params,
search_params,
)
cdef class IndexParams:
"""
Parameters to build index for IVF-FLAT nearest neighbor search
Parameters
----------
n_list : int, default = 1024
The number of clusters used in the coarse quantizer.
metric : string denoting the metric type, default="sqeuclidean"
Valid values for metric: ["sqeuclidean", "inner_product",
"euclidean"], where
- sqeuclidean is the euclidean distance without the square root
operation, i.e.: distance(a,b) = \\sum_i (a_i - b_i)^2,
- euclidean is the euclidean distance
- inner product distance is defined as
distance(a, b) = \\sum_i a_i * b_i.
kmeans_n_iters : int, default = 20
The number of iterations searching for kmeans centers during index
building.
kmeans_trainset_fraction : int, default = 0.5
If kmeans_trainset_fraction is less than 1, then the dataset is
subsampled, and only n_samples * kmeans_trainset_fraction rows
are used for training.
add_data_on_build : bool, default = True
After training the coarse and fine quantizers, we will populate
the index with the dataset if add_data_on_build == True, otherwise
the index is left empty, and the extend method can be used
to add new vectors to the index.
adaptive_centers : bool, default = False
By default (adaptive_centers = False), the cluster centers are
trained in `ivf_flat::build`, and and never modified in
`ivf_flat::extend`. The alternative behavior (adaptive_centers
= true) is to update the cluster centers for new data when it is
added. In this case, `index.centers()` are always exactly the
centroids of the data in the corresponding clusters. The drawback
of this behavior is that the centroids depend on the order of
adding new data (through the classification of the added data);
that is, `index.centers()` "drift" together with the changing
distribution of the newly added data.
"""
cdef c_ivf_flat.index_params params
def __init__(self, *,
n_lists=1024,
metric="sqeuclidean",
kmeans_n_iters=20,
kmeans_trainset_fraction=0.5,
add_data_on_build=True,
bool adaptive_centers=False):
self.params.n_lists = n_lists
self.params.metric = _get_metric(metric)
self.params.metric_arg = 0
self.params.kmeans_n_iters = kmeans_n_iters
self.params.kmeans_trainset_fraction = kmeans_trainset_fraction
self.params.add_data_on_build = add_data_on_build
self.params.adaptive_centers = adaptive_centers
@property
def n_lists(self):
return self.params.n_lists
@property
def metric(self):
return self.params.metric
@property
def kmeans_n_iters(self):
return self.params.kmeans_n_iters
@property
def kmeans_trainset_fraction(self):
return self.params.kmeans_trainset_fraction
@property
def add_data_on_build(self):
return self.params.add_data_on_build
@property
def adaptive_centers(self):
return self.params.adaptive_centers
cdef class Index:
cdef readonly bool trained
cdef str active_index_type
def __cinit__(self):
self.trained = False
self.active_index_type = None
cdef class IndexFloat(Index):
cdef c_ivf_flat.index[float, int64_t] * index
def __cinit__(self, handle=None):
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
# this is to keep track of which index type is being used
# We create a placeholder object. The actual parameter values do
# not matter, it will be replaced with a built index object later.
self.index = new c_ivf_flat.index[float, int64_t](
deref(handle_), _get_metric("sqeuclidean"),
<uint32_t>1,
<bool>False,
<bool>False,
<uint32_t>4)
def __repr__(self):
m_str = "metric=" + _get_metric_string(self.index.metric())
attr_str = [
attr + "=" + str(getattr(self, attr))
for attr in ["size", "dim", "n_lists", "adaptive_centers"]
]
attr_str = [m_str] + attr_str
return "Index(type=IVF-FLAT, " + (", ".join(attr_str)) + ")"
@property
def dim(self):
return self.index[0].dim()
@property
def size(self):
return self.index[0].size()
@property
def metric(self):
return self.index[0].metric()
@property
def n_lists(self):
return self.index[0].n_lists()
@property
def adaptive_centers(self):
return self.index[0].adaptive_centers()
cdef class IndexInt8(Index):
cdef c_ivf_flat.index[int8_t, int64_t] * index
def __cinit__(self, handle=None):
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
# this is to keep track of which index type is being used
# We create a placeholder object. The actual parameter values do
# not matter, it will be replaced with a built index object later.
self.index = new c_ivf_flat.index[int8_t, int64_t](
deref(handle_), _get_metric("sqeuclidean"),
<uint32_t>1,
<bool>False,
<bool>False,
<uint32_t>4)
def __repr__(self):
m_str = "metric=" + _get_metric_string(self.index.metric())
attr_str = [
attr + "=" + str(getattr(self, attr))
for attr in ["size", "dim", "n_lists", "adaptive_centers"]
]
attr_str = [m_str] + attr_str
return "Index(type=IVF-FLAT, " + (", ".join(attr_str)) + ")"
@property
def dim(self):
return self.index[0].dim()
@property
def size(self):
return self.index[0].size()
@property
def metric(self):
return self.index[0].metric()
@property
def n_lists(self):
return self.index[0].n_lists()
@property
def adaptive_centers(self):
return self.index[0].adaptive_centers()
cdef class IndexUint8(Index):
cdef c_ivf_flat.index[uint8_t, int64_t] * index
def __cinit__(self, handle=None):
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
# this is to keep track of which index type is being used
# We create a placeholder object. The actual parameter values do
# not matter, it will be replaced with a built index object later.
self.index = new c_ivf_flat.index[uint8_t, int64_t](
deref(handle_), _get_metric("sqeuclidean"),
<uint32_t>1,
<bool>False,
<bool>False,
<uint32_t>4)
def __repr__(self):
m_str = "metric=" + _get_metric_string(self.index.metric())
attr_str = [
attr + "=" + str(getattr(self, attr))
for attr in ["size", "dim", "n_lists", "adaptive_centers"]
]
attr_str = [m_str] + attr_str
return "Index(type=IVF-FLAT, " + (", ".join(attr_str)) + ")"
@property
def dim(self):
return self.index[0].dim()
@property
def size(self):
return self.index[0].size()
@property
def metric(self):
return self.index[0].metric()
@property
def n_lists(self):
return self.index[0].n_lists()
@property
def adaptive_centers(self):
return self.index[0].adaptive_centers()
@auto_sync_handle
@auto_convert_output
def build(IndexParams index_params, dataset, handle=None):
"""
Builds an IVF-FLAT index that can be used for nearest neighbor search.
Parameters
----------
index_params : IndexParams object
dataset : CUDA array interface compliant matrix shape (n_samples, dim)
Supported dtype [float, int8, uint8]
{handle_docstring}
Returns
-------
index: ivf_flat.Index
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import DeviceResources
>>> from pylibraft.neighbors import ivf_flat
>>> n_samples = 50000
>>> n_features = 50
>>> n_queries = 1000
>>> dataset = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> handle = DeviceResources()
>>> index_params = ivf_flat.IndexParams(
... n_lists=1024,
... metric="sqeuclidean")
>>> index = ivf_flat.build(index_params, dataset, handle=handle)
>>> # Search using the built index
>>> queries = cp.random.random_sample((n_queries, n_features),
... dtype=cp.float32)
>>> k = 10
>>> distances, neighbors = ivf_flat.search(ivf_flat.SearchParams(),
... index, queries, k,
... handle=handle)
>>> distances = cp.asarray(distances)
>>> neighbors = cp.asarray(neighbors)
>>> # pylibraft functions are often asynchronous so the
>>> # handle needs to be explicitly synchronized
>>> handle.sync()
"""
dataset_cai = cai_wrapper(dataset)
dataset_dt = dataset_cai.dtype
_check_input_array(dataset_cai, [np.dtype('float32'), np.dtype('byte'),
np.dtype('ubyte')])
cdef int64_t n_rows = dataset_cai.shape[0]
cdef uint32_t dim = dataset_cai.shape[1]
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
cdef IndexFloat idx_float
cdef IndexInt8 idx_int8
cdef IndexUint8 idx_uint8
if dataset_dt == np.float32:
idx_float = IndexFloat(handle)
idx_float.active_index_type = "float32"
with cuda_interruptible():
c_ivf_flat.build(deref(handle_),
index_params.params,
get_dmv_float(dataset_cai, check_shape=True),
deref(idx_float.index))
idx_float.trained = True
return idx_float
elif dataset_dt == np.byte:
idx_int8 = IndexInt8(handle)
idx_int8.active_index_type = "byte"
with cuda_interruptible():
c_ivf_flat.build(deref(handle_),
index_params.params,
get_dmv_int8(dataset_cai, check_shape=True),
deref(idx_int8.index))
idx_int8.trained = True
return idx_int8
elif dataset_dt == np.ubyte:
idx_uint8 = IndexUint8(handle)
idx_uint8.active_index_type = "ubyte"
with cuda_interruptible():
c_ivf_flat.build(deref(handle_),
index_params.params,
get_dmv_uint8(dataset_cai, check_shape=True),
deref(idx_uint8.index))
idx_uint8.trained = True
return idx_uint8
else:
raise TypeError("dtype %s not supported" % dataset_dt)
@auto_sync_handle
@auto_convert_output
def extend(Index index, new_vectors, new_indices, handle=None):
"""
Extend an existing index with new vectors.
Parameters
----------
index : ivf_flat.Index
Trained ivf_flat object.
new_vectors : CUDA array interface compliant matrix shape (n_samples, dim)
Supported dtype [float, int8, uint8]
new_indices : CUDA array interface compliant vector shape (n_samples)
Supported dtype [int64]
{handle_docstring}
Returns
-------
index: ivf_flat.Index
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import DeviceResources
>>> from pylibraft.neighbors import ivf_flat
>>> n_samples = 50000
>>> n_features = 50
>>> n_queries = 1000
>>> dataset = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> handle = DeviceResources()
>>> index = ivf_flat.build(ivf_flat.IndexParams(), dataset,
... handle=handle)
>>> n_rows = 100
>>> more_data = cp.random.random_sample((n_rows, n_features),
... dtype=cp.float32)
>>> indices = index.size + cp.arange(n_rows, dtype=cp.int64)
>>> index = ivf_flat.extend(index, more_data, indices)
>>> # Search using the built index
>>> queries = cp.random.random_sample((n_queries, n_features),
... dtype=cp.float32)
>>> k = 10
>>> distances, neighbors = ivf_flat.search(ivf_flat.SearchParams(),
... index, queries,
... k, handle=handle)
>>> # pylibraft functions are often asynchronous so the
>>> # handle needs to be explicitly synchronized
>>> handle.sync()
>>> distances = cp.asarray(distances)
>>> neighbors = cp.asarray(neighbors)
"""
if not index.trained:
raise ValueError("Index need to be built before calling extend.")
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
vecs_cai = cai_wrapper(new_vectors)
vecs_dt = vecs_cai.dtype
cdef int64_t n_rows = vecs_cai.shape[0]
cdef uint32_t dim = vecs_cai.shape[1]
_check_input_array(vecs_cai, [np.dtype(index.active_index_type)],
exp_cols=index.dim)
idx_cai = cai_wrapper(new_indices)
_check_input_array(idx_cai, [np.dtype('int64')], exp_rows=n_rows)
if len(idx_cai.shape)!=1:
raise ValueError("Indices array is expected to be 1D")
cdef optional[device_vector_view[int64_t, int64_t]] new_indices_opt
cdef IndexFloat idx_float
cdef IndexInt8 idx_int8
cdef IndexUint8 idx_uint8
if vecs_dt == np.float32:
idx_float = index
if idx_float.index.size() > 0:
new_indices_opt = make_device_vector_view(
<int64_t *><uintptr_t>idx_cai.data,
<int64_t>idx_cai.shape[0])
with cuda_interruptible():
c_ivf_flat.extend(deref(handle_),
get_dmv_float(vecs_cai, check_shape=True),
new_indices_opt,
idx_float.index)
elif vecs_dt == np.int8:
idx_int8 = index
if idx_int8.index[0].size() > 0:
new_indices_opt = make_device_vector_view(
<int64_t *><uintptr_t>idx_cai.data,
<int64_t>idx_cai.shape[0])
with cuda_interruptible():
c_ivf_flat.extend(deref(handle_),
get_dmv_int8(vecs_cai, check_shape=True),
new_indices_opt,
idx_int8.index)
elif vecs_dt == np.uint8:
idx_uint8 = index
if idx_uint8.index[0].size() > 0:
new_indices_opt = make_device_vector_view(
<int64_t *><uintptr_t>idx_cai.data,
<int64_t>idx_cai.shape[0])
with cuda_interruptible():
c_ivf_flat.extend(deref(handle_),
get_dmv_uint8(vecs_cai, check_shape=True),
new_indices_opt,
idx_uint8.index)
else:
raise TypeError("query dtype %s not supported" % vecs_dt)
return index
cdef class SearchParams:
"""
IVF-FLAT search parameters
Parameters
----------
n_probes: int, default = 1024
The number of course clusters to select for the fine search.
"""
cdef c_ivf_flat.search_params params
def __init__(self, *, n_probes=20):
self.params.n_probes = n_probes
def __repr__(self):
attr_str = [attr + "=" + str(getattr(self, attr))
for attr in ["n_probes"]]
return "SearchParams(type=IVF-FLAT, " + (", ".join(attr_str)) + ")"
@property
def n_probes(self):
return self.params.n_probes
@auto_sync_handle
@auto_convert_output
def search(SearchParams search_params,
Index index,
queries,
k,
neighbors=None,
distances=None,
handle=None):
"""
Find the k nearest neighbors for each query.
Parameters
----------
search_params : SearchParams
index : Index
Trained IVF-FLAT index.
queries : CUDA array interface compliant matrix shape (n_samples, dim)
Supported dtype [float, int8, uint8]
k : int
The number of neighbors.
neighbors : Optional CUDA array interface compliant matrix shape
(n_queries, k), dtype int64_t. If supplied, neighbor
indices will be written here in-place. (default None)
distances : Optional CUDA array interface compliant matrix shape
(n_queries, k) If supplied, the distances to the
neighbors will be written here in-place. (default None)
{handle_docstring}
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import DeviceResources
>>> from pylibraft.neighbors import ivf_flat
>>> n_samples = 50000
>>> n_features = 50
>>> n_queries = 1000
>>> dataset = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> # Build index
>>> handle = DeviceResources()
>>> index = ivf_flat.build(ivf_flat.IndexParams(), dataset,
... handle=handle)
>>> # Search using the built index
>>> queries = cp.random.random_sample((n_queries, n_features),
... dtype=cp.float32)
>>> k = 10
>>> search_params = ivf_flat.SearchParams(
... n_probes=20
... )
>>> distances, neighbors = ivf_flat.search(search_params, index,
... queries, k, handle=handle)
>>> # pylibraft functions are often asynchronous so the
>>> # handle needs to be explicitly synchronized
>>> handle.sync()
>>> neighbors = cp.asarray(neighbors)
>>> distances = cp.asarray(distances)
"""
if not index.trained:
raise ValueError("Index need to be built before calling search.")
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
queries_cai = cai_wrapper(queries)
queries_dt = queries_cai.dtype
cdef uint32_t n_queries = queries_cai.shape[0]
_check_input_array(queries_cai, [np.dtype(index.active_index_type)],
exp_cols=index.dim)
if neighbors is None:
neighbors = device_ndarray.empty((n_queries, k), dtype='int64')
neighbors_cai = cai_wrapper(neighbors)
_check_input_array(neighbors_cai, [np.dtype('int64')],
exp_rows=n_queries, exp_cols=k)
if distances is None:
distances = device_ndarray.empty((n_queries, k), dtype='float32')
distances_cai = cai_wrapper(distances)
_check_input_array(distances_cai, [np.dtype('float32')],
exp_rows=n_queries, exp_cols=k)
cdef c_ivf_flat.search_params params = search_params.params
cdef IndexFloat idx_float
cdef IndexInt8 idx_int8
cdef IndexUint8 idx_uint8
if queries_dt == np.float32:
idx_float = index
with cuda_interruptible():
c_ivf_flat.search(deref(handle_),
params,
deref(idx_float.index),
get_dmv_float(queries_cai, check_shape=True),
get_dmv_int64(neighbors_cai, check_shape=True),
get_dmv_float(distances_cai, check_shape=True))
elif queries_dt == np.byte:
idx_int8 = index
with cuda_interruptible():
c_ivf_flat.search(deref(handle_),
params,
deref(idx_int8.index),
get_dmv_int8(queries_cai, check_shape=True),
get_dmv_int64(neighbors_cai, check_shape=True),
get_dmv_float(distances_cai, check_shape=True))
elif queries_dt == np.ubyte:
idx_uint8 = index
with cuda_interruptible():
c_ivf_flat.search(deref(handle_),
params,
deref(idx_uint8.index),
get_dmv_uint8(queries_cai, check_shape=True),
get_dmv_int64(neighbors_cai, check_shape=True),
get_dmv_float(distances_cai, check_shape=True))
else:
raise ValueError("query dtype %s not supported" % queries_dt)
return (distances, neighbors)
@auto_sync_handle
def save(filename, Index index, handle=None):
"""
Saves the index to a file.
Saving / loading the index is experimental. The serialization format is
subject to change.
Parameters
----------
filename : string
Name of the file.
index : Index
Trained IVF-Flat index.
{handle_docstring}
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import DeviceResources
>>> from pylibraft.neighbors import ivf_flat
>>> n_samples = 50000
>>> n_features = 50
>>> dataset = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> # Build index
>>> handle = DeviceResources()
>>> index = ivf_flat.build(ivf_flat.IndexParams(), dataset,
... handle=handle)
>>> ivf_flat.save("my_index.bin", index, handle=handle)
"""
if not index.trained:
raise ValueError("Index need to be built before saving it.")
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
cdef string c_filename = filename.encode('utf-8')
cdef IndexFloat idx_float
cdef IndexInt8 idx_int8
cdef IndexUint8 idx_uint8
if index.active_index_type == "float32":
idx_float = index
c_ivf_flat.serialize_file(
deref(handle_), c_filename, deref(idx_float.index))
elif index.active_index_type == "byte":
idx_int8 = index
c_ivf_flat.serialize_file(
deref(handle_), c_filename, deref(idx_int8.index))
elif index.active_index_type == "ubyte":
idx_uint8 = index
c_ivf_flat.serialize_file(
deref(handle_), c_filename, deref(idx_uint8.index))
else:
raise ValueError(
"Index dtype %s not supported" % index.active_index_type)
@auto_sync_handle
def load(filename, handle=None):
"""
Loads index from a file.
Saving / loading the index is experimental. The serialization format is
subject to change, therefore loading an index saved with a previous
version of raft is not guaranteed to work.
Parameters
----------
filename : string
Name of the file.
{handle_docstring}
Returns
-------
index : Index
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import DeviceResources
>>> from pylibraft.neighbors import ivf_flat
>>> n_samples = 50000
>>> n_features = 50
>>> dataset = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> # Build and save index
>>> handle = DeviceResources()
>>> index = ivf_flat.build(ivf_flat.IndexParams(), dataset,
... handle=handle)
>>> ivf_flat.save("my_index.bin", index, handle=handle)
>>> del index
>>> n_queries = 100
>>> queries = cp.random.random_sample((n_queries, n_features),
... dtype=cp.float32)
>>> handle = DeviceResources()
>>> index = ivf_flat.load("my_index.bin", handle=handle)
>>> distances, neighbors = ivf_flat.search(ivf_flat.SearchParams(),
... index, queries, k=10,
... handle=handle)
"""
if handle is None:
handle = DeviceResources()
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
cdef string c_filename = filename.encode('utf-8')
cdef IndexFloat idx_float
cdef IndexInt8 idx_int8
cdef IndexUint8 idx_uint8
with open(filename, 'rb') as f:
type_str = f.read(3).decode('utf-8')
dataset_dt = np.dtype(type_str)
if dataset_dt == np.float32:
idx_float = IndexFloat(handle)
c_ivf_flat.deserialize_file(
deref(handle_), c_filename, idx_float.index)
idx_float.trained = True
idx_float.active_index_type = 'float32'
return idx_float
elif dataset_dt == np.byte:
idx_int8 = IndexInt8(handle)
c_ivf_flat.deserialize_file(
deref(handle_), c_filename, idx_int8.index)
idx_int8.trained = True
idx_int8.active_index_type = 'byte'
return idx_int8
elif dataset_dt == np.ubyte:
idx_uint8 = IndexUint8(handle)
c_ivf_flat.deserialize_file(
deref(handle_), c_filename, idx_uint8.index)
idx_uint8.trained = True
idx_uint8.active_index_type = 'ubyte'
return idx_uint8
else:
raise ValueError("Index dtype %s not supported" % dataset_dt)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/ivf_flat | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/ivf_flat/cpp/c_ivf_flat.pxd | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import numpy as np
import pylibraft.common.handle
from cython.operator cimport dereference as deref
from libc.stdint cimport int8_t, int64_t, uint8_t, uint32_t, uintptr_t
from libcpp cimport bool, nullptr
from libcpp.string cimport string
from rmm._lib.memory_resource cimport device_memory_resource
from pylibraft.common.cpp.mdspan cimport (
device_matrix_view,
device_vector_view,
host_matrix_view,
make_device_matrix_view,
make_host_matrix_view,
row_major,
)
from pylibraft.common.cpp.optional cimport optional
from pylibraft.common.handle cimport device_resources
from pylibraft.distance.distance_type cimport DistanceType
from pylibraft.neighbors.ivf_pq.cpp.c_ivf_pq cimport (
ann_index,
ann_index_params,
ann_search_params,
)
cdef extern from "raft/neighbors/ivf_flat_types.hpp" \
namespace "raft::neighbors::ivf_flat" nogil:
cpdef cppclass index_params(ann_index_params):
uint32_t n_lists
uint32_t kmeans_n_iters
double kmeans_trainset_fraction
bool adaptive_centers
bool conservative_memory_allocation
cdef cppclass index[T, IdxT](ann_index):
index(const device_resources& handle,
DistanceType metric,
uint32_t n_lists,
bool adaptive_centers,
bool conservative_memory_allocation,
uint32_t dim)
IdxT size()
uint32_t dim()
DistanceType metric()
uint32_t n_lists()
bool adaptive_centers()
cpdef cppclass search_params(ann_search_params):
uint32_t n_probes
cdef extern from "raft_runtime/neighbors/ivf_flat.hpp" \
namespace "raft::runtime::neighbors::ivf_flat" nogil:
cdef void build(const device_resources&,
const index_params& params,
device_matrix_view[float, int64_t, row_major] dataset,
index[float, int64_t]& index) except +
cdef void build(const device_resources& handle,
const index_params& params,
device_matrix_view[int8_t, int64_t, row_major] dataset,
index[int8_t, int64_t]& index) except +
cdef void build(const device_resources& handle,
const index_params& params,
device_matrix_view[uint8_t, int64_t, row_major] dataset,
index[uint8_t, int64_t]& index) except +
cdef void extend(
const device_resources& handle,
device_matrix_view[float, int64_t, row_major] new_vectors,
optional[device_vector_view[int64_t, int64_t]] new_indices,
index[float, int64_t]* index) except +
cdef void extend(
const device_resources& handle,
device_matrix_view[int8_t, int64_t, row_major] new_vectors,
optional[device_vector_view[int64_t, int64_t]] new_indices,
index[int8_t, int64_t]* index) except +
cdef void extend(
const device_resources& handle,
device_matrix_view[uint8_t, int64_t, row_major] new_vectors,
optional[device_vector_view[int64_t, int64_t]] new_indices,
index[uint8_t, int64_t]* index) except +
cdef void search(
const device_resources& handle,
const search_params& params,
const index[float, int64_t]& index,
device_matrix_view[float, int64_t, row_major] queries,
device_matrix_view[int64_t, int64_t, row_major] neighbors,
device_matrix_view[float, int64_t, row_major] distances) except +
cdef void search(
const device_resources& handle,
const search_params& params,
const index[int8_t, int64_t]& index,
device_matrix_view[int8_t, int64_t, row_major] queries,
device_matrix_view[int64_t, int64_t, row_major] neighbors,
device_matrix_view[float, int64_t, row_major] distances) except +
cdef void search(
const device_resources& handle,
const search_params& params,
const index[uint8_t, int64_t]& index,
device_matrix_view[uint8_t, int64_t, row_major] queries,
device_matrix_view[int64_t, int64_t, row_major] neighbors,
device_matrix_view[float, int64_t, row_major] distances) except +
cdef void serialize(const device_resources& handle,
string& str,
const index[float, int64_t]& index) except +
cdef void deserialize(const device_resources& handle,
const string& str,
index[float, int64_t]* index) except +
cdef void serialize(const device_resources& handle,
string& str,
const index[uint8_t, int64_t]& index) except +
cdef void deserialize(const device_resources& handle,
const string& str,
index[uint8_t, int64_t]* index) except +
cdef void serialize(const device_resources& handle,
string& str,
const index[int8_t, int64_t]& index) except +
cdef void deserialize(const device_resources& handle,
const string& str,
index[int8_t, int64_t]* index) except +
cdef void serialize_file(const device_resources& handle,
const string& filename,
const index[float, int64_t]& index) except +
cdef void deserialize_file(const device_resources& handle,
const string& filename,
index[float, int64_t]* index) except +
cdef void serialize_file(const device_resources& handle,
const string& filename,
const index[uint8_t, int64_t]& index) except +
cdef void deserialize_file(const device_resources& handle,
const string& filename,
index[uint8_t, int64_t]* index) except +
cdef void serialize_file(const device_resources& handle,
const string& filename,
const index[int8_t, int64_t]& index) except +
cdef void deserialize_file(const device_resources& handle,
const string& filename,
index[int8_t, int64_t]* index) except +
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/ivf_flat | rapidsai_public_repos/raft/python/pylibraft/pylibraft/neighbors/ivf_flat/cpp/__init__.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/matrix/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Set the list of Cython files to build
set(cython_sources select_k.pyx)
set(linked_libraries raft::raft raft::compiled)
# Build all of the Cython targets
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}" ASSOCIATED_TARGETS raft MODULE_PREFIX matrix_
)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/matrix/select_k.pyx | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from cython.operator cimport dereference as deref
from libc.stdint cimport int64_t
from libcpp cimport bool
import numpy as np
from pylibraft.common import auto_convert_output, cai_wrapper, device_ndarray
from pylibraft.common.handle import auto_sync_handle
from pylibraft.common.input_validation import is_c_contiguous
from pylibraft.common.cpp.mdspan cimport (
device_matrix_view,
host_matrix_view,
make_device_matrix_view,
make_host_matrix_view,
row_major,
)
from pylibraft.common.cpp.optional cimport optional
from pylibraft.common.handle cimport device_resources
from pylibraft.common.mdspan cimport get_dmv_float, get_dmv_int64
from pylibraft.matrix.cpp.select_k cimport select_k as c_select_k
@auto_sync_handle
@auto_convert_output
def select_k(dataset, k=None, distances=None, indices=None, select_min=True,
handle=None):
"""
Selects the top k items from each row in a matrix
Parameters
----------
dataset : array interface compliant matrix, row-major layout,
shape (n_rows, dim). Supported dtype [float]
k : int
Number of items to return for each row. Optional if indices or
distances arrays are given (in which case their second dimension
is k).
distances : Optional array interface compliant matrix shape
(n_rows, k), dtype float. If supplied,
distances will be written here in-place. (default None)
indices : Optional array interface compliant matrix shape
(n_rows, k), dtype int64_t. If supplied, neighbor
indices will be written here in-place. (default None)
select_min: : bool
Whether to select the minimum or maximum K items
{handle_docstring}
Returns
-------
distances: array interface compliant object containing resulting distances
shape (n_rows, k)
indices: array interface compliant object containing resulting indices
shape (n_rows, k)
Examples
--------
>>> import cupy as cp
>>> from pylibraft.matrix import select_k
>>> n_features = 50
>>> n_rows = 1000
>>> queries = cp.random.random_sample((n_rows, n_features),
... dtype=cp.float32)
>>> k = 40
>>> distances, ids = select_k(queries, k)
>>> distances = cp.asarray(distances)
>>> ids = cp.asarray(ids)
"""
dataset_cai = cai_wrapper(dataset)
if k is None:
if indices is not None:
k = cai_wrapper(indices).shape[1]
elif distances is not None:
k = cai_wrapper(distances).shape[1]
else:
raise ValueError("Argument k must be specified if both indices "
"and distances arg is None")
n_rows = dataset.shape[0]
if indices is None:
indices = device_ndarray.empty((n_rows, k), dtype='int64')
if distances is None:
distances = device_ndarray.empty((n_rows, k), dtype='float32')
distances_cai = cai_wrapper(distances)
indices_cai = cai_wrapper(indices)
cdef device_resources* handle_ = \
<device_resources*><size_t>handle.getHandle()
cdef optional[device_matrix_view[int64_t, int64_t, row_major]] in_idx
if dataset_cai.dtype == np.float32:
c_select_k(deref(handle_),
get_dmv_float(dataset_cai, check_shape=True),
in_idx,
get_dmv_float(distances_cai, check_shape=True),
get_dmv_int64(indices_cai, check_shape=True),
<bool>select_min)
else:
raise TypeError("dtype %s not supported" % dataset_cai.dtype)
return distances, indices
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/matrix/__init__.pxd | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/matrix/__init__.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .select_k import select_k
__all__ = ["select_k"]
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/matrix | rapidsai_public_repos/raft/python/pylibraft/pylibraft/matrix/cpp/select_k.pxd | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from libc.stdint cimport int64_t
from libcpp cimport bool
from pylibraft.common.cpp.mdspan cimport device_matrix_view, row_major
from pylibraft.common.cpp.optional cimport optional
from pylibraft.common.handle cimport device_resources
cdef extern from "raft_runtime/matrix/select_k.hpp" \
namespace "raft::runtime::matrix" nogil:
cdef void select_k(const device_resources & handle,
device_matrix_view[float, int64_t, row_major],
optional[device_matrix_view[int64_t,
int64_t,
row_major]],
device_matrix_view[float, int64_t, row_major],
device_matrix_view[int64_t, int64_t, row_major],
bool) except +
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/matrix | rapidsai_public_repos/raft/python/pylibraft/pylibraft/matrix/cpp/__init__.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/test/test_device_ndarray.py | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from pylibraft.common import device_ndarray
@pytest.mark.parametrize("order", ["F", "C"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_basic_attributes(order, dtype):
a = np.random.random((500, 2)).astype(dtype)
if order == "C":
a = np.ascontiguousarray(a)
else:
a = np.asfortranarray(a)
db = device_ndarray(a)
db_host = db.copy_to_host()
assert a.shape == db.shape
assert a.dtype == db.dtype
assert a.data.f_contiguous == db.f_contiguous
assert a.data.f_contiguous == db_host.data.f_contiguous
assert a.data.c_contiguous == db.c_contiguous
assert a.data.c_contiguous == db_host.data.c_contiguous
np.testing.assert_array_equal(a.tolist(), db_host.tolist())
@pytest.mark.parametrize("order", ["F", "C"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_empty(order, dtype):
a = np.random.random((500, 2)).astype(dtype)
if order == "C":
a = np.ascontiguousarray(a)
else:
a = np.asfortranarray(a)
db = device_ndarray.empty(a.shape, dtype=dtype, order=order)
db_host = db.copy_to_host()
assert a.shape == db.shape
assert a.dtype == db.dtype
assert a.data.f_contiguous == db.f_contiguous
assert a.data.f_contiguous == db_host.data.f_contiguous
assert a.data.c_contiguous == db.c_contiguous
assert a.data.c_contiguous == db_host.data.c_contiguous
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/test/test_select_k.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from pylibraft.common import device_ndarray
from pylibraft.matrix import select_k
@pytest.mark.parametrize("n_rows", [32, 100])
@pytest.mark.parametrize("n_cols", [40, 100])
@pytest.mark.parametrize("k", [1, 5, 16, 35])
@pytest.mark.parametrize("inplace", [True, False])
def test_select_k(n_rows, n_cols, k, inplace):
dataset = np.random.random_sample((n_rows, n_cols)).astype("float32")
dataset_device = device_ndarray(dataset)
indices = np.zeros((n_rows, k), dtype="int64")
distances = np.zeros((n_rows, k), dtype="float32")
indices_device = device_ndarray(indices)
distances_device = device_ndarray(distances)
ret_distances, ret_indices = select_k(
dataset_device,
k=k,
distances=distances_device,
indices=indices_device,
)
distances_device = ret_distances if not inplace else distances_device
actual_distances = distances_device.copy_to_host()
argsort = np.argsort(dataset, axis=1)
for i in range(dataset.shape[0]):
expected_indices = argsort[i]
gpu_dists = actual_distances[i]
cpu_ordered = dataset[i, expected_indices]
np.testing.assert_allclose(
cpu_ordered[:k], gpu_dists, atol=1e-4, rtol=1e-4
)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/test/test_mdspan_serializer.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from pylibraft.common.mdspan import run_roundtrip_test_for_mdspan
# TODO(hcho3): Set up hypothesis
@pytest.mark.parametrize("dtype", ["float32", "float64", "int32", "uint32"])
def test_mdspan_serializer(dtype):
X = np.random.random_sample((2, 3)).astype(dtype)
run_roundtrip_test_for_mdspan(X)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/test/test_random.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from pylibraft.common import DeviceResources, device_ndarray
from pylibraft.random import rmat
def generate_theta(r_scale, c_scale):
max_scale = max(r_scale, c_scale)
theta = np.random.random_sample(max_scale * 4)
for i in range(max_scale):
a = theta[4 * i]
b = theta[4 * i + 1]
c = theta[4 * i + 2]
d = theta[4 * i + 3]
total = a + b + c + d
theta[4 * i] = a / total
theta[4 * i + 1] = b / total
theta[4 * i + 2] = c / total
theta[4 * i + 3] = d / total
theta_device = device_ndarray(theta)
return theta, theta_device
@pytest.mark.parametrize("n_edges", [10000, 20000])
@pytest.mark.parametrize("r_scale", [16, 18])
@pytest.mark.parametrize("c_scale", [16, 18])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
def test_rmat(n_edges, r_scale, c_scale, dtype):
theta, theta_device = generate_theta(r_scale, c_scale)
out_buff = np.empty((n_edges, 2), dtype=dtype)
output_device = device_ndarray(out_buff)
handle = DeviceResources()
rmat(output_device, theta_device, r_scale, c_scale, 12345, handle=handle)
handle.sync()
output = output_device.copy_to_host()
# a more rigorous tests have been done at the c++ level
assert np.all(output[:, 0] >= 0)
assert np.all(output[:, 0] < 2**r_scale)
assert np.all(output[:, 1] >= 0)
assert np.all(output[:, 1] < 2**c_scale)
rmat(output_device, theta_device, r_scale, c_scale, 12345, handle=handle)
handle.sync()
output1 = output_device.copy_to_host()
assert np.all(np.equal(output, output1))
def test_rmat_exception():
n_edges = 20000
r_scale = c_scale = 16
dtype = np.int32
with pytest.raises(Exception) as exception:
out_buff = np.empty((n_edges, 2), dtype=dtype)
output_device = device_ndarray(out_buff)
rmat(output_device, None, r_scale, c_scale, 12345)
assert exception is not None
assert exception.message == "'theta' cannot be None!"
with pytest.raises(Exception) as exception:
theta, theta_device = generate_theta(r_scale, c_scale)
rmat(None, theta_device, r_scale, c_scale, 12345)
assert exception is not None
assert exception.message == "'out' cannot be None!"
def test_rmat_valueerror():
n_edges = 20000
r_scale = c_scale = 16
with pytest.raises(ValueError) as exception:
out_buff = np.empty((n_edges, 2), dtype=np.int16)
output_device = device_ndarray(out_buff)
theta, theta_device = generate_theta(r_scale, c_scale)
rmat(output_device, theta_device, r_scale, c_scale, 12345)
assert exception is not None
assert "not supported" in exception.message
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/test/test_refine.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# h ttp://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
from test_ivf_pq import calc_recall, check_distances, generate_data
from pylibraft.common import device_ndarray
from pylibraft.neighbors import refine
def run_refine(
n_rows=500,
n_cols=50,
n_queries=100,
metric="sqeuclidean",
k0=40,
k=10,
inplace=False,
dtype=np.float32,
memory_type="device",
):
dataset = generate_data((n_rows, n_cols), dtype)
queries = generate_data((n_queries, n_cols), dtype)
if metric == "inner_product":
if dtype != np.float32:
pytest.skip("Normalized input cannot be represented in int8")
return
dataset = normalize(dataset, norm="l2", axis=1)
queries = normalize(queries, norm="l2", axis=1)
dataset_device = device_ndarray(dataset)
queries_device = device_ndarray(queries)
# Calculate reference values with sklearn
skl_metric = {"sqeuclidean": "euclidean", "inner_product": "cosine"}[
metric
]
nn_skl = NearestNeighbors(
n_neighbors=k0, algorithm="brute", metric=skl_metric
)
nn_skl.fit(dataset)
skl_dist, candidates = nn_skl.kneighbors(queries)
candidates = candidates.astype(np.int64)
candidates_device = device_ndarray(candidates)
out_idx = np.zeros((n_queries, k), dtype=np.int64)
out_dist = np.zeros((n_queries, k), dtype=np.float32)
out_idx_device = device_ndarray(out_idx) if inplace else None
out_dist_device = device_ndarray(out_dist) if inplace else None
if memory_type == "device":
if inplace:
refine(
dataset_device,
queries_device,
candidates_device,
indices=out_idx_device,
distances=out_dist_device,
metric=metric,
)
else:
out_dist_device, out_idx_device = refine(
dataset_device,
queries_device,
candidates_device,
k=k,
metric=metric,
)
out_idx = out_idx_device.copy_to_host()
out_dist = out_dist_device.copy_to_host()
elif memory_type == "host":
if inplace:
refine(
dataset,
queries,
candidates,
indices=out_idx,
distances=out_dist,
metric=metric,
)
else:
out_dist, out_idx = refine(
dataset, queries, candidates, k=k, metric=metric
)
skl_idx = candidates[:, :k]
recall = calc_recall(out_idx, skl_idx)
if recall <= 0.999:
# We did not find the same neighbor indices.
# We could have found other neighbor with same distance.
if metric == "sqeuclidean":
skl_dist = np.power(skl_dist[:, :k], 2)
elif metric == "inner_product":
skl_dist = 1 - skl_dist[:, :k]
else:
raise ValueError("Invalid metric")
mask = out_idx != skl_idx
assert np.all(out_dist[mask] <= skl_dist[mask] + 1.0e-6)
check_distances(dataset, queries, metric, out_idx, out_dist, 0.001)
@pytest.mark.parametrize("n_queries", [100, 1024, 37])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("metric", ["sqeuclidean", "inner_product"])
@pytest.mark.parametrize("dtype", [np.float32, np.int8, np.uint8])
@pytest.mark.parametrize("memory_type", ["device", "host"])
def test_refine_dtypes(n_queries, dtype, inplace, metric, memory_type):
run_refine(
n_rows=2000,
n_queries=n_queries,
n_cols=50,
k0=40,
k=10,
dtype=dtype,
inplace=inplace,
metric=metric,
memory_type=memory_type,
)
@pytest.mark.parametrize(
"params",
[
pytest.param(
{
"n_rows": 0,
"n_cols": 10,
"n_queries": 10,
"k0": 10,
"k": 1,
},
marks=pytest.mark.xfail(reason="empty dataset"),
),
{"n_rows": 1, "n_cols": 10, "n_queries": 10, "k": 1, "k0": 1},
{"n_rows": 10, "n_cols": 1, "n_queries": 10, "k": 10, "k0": 10},
{"n_rows": 999, "n_cols": 42, "n_queries": 453, "k0": 137, "k": 53},
],
)
@pytest.mark.parametrize("memory_type", ["device", "host"])
def test_refine_row_col(params, memory_type):
run_refine(
n_rows=params["n_rows"],
n_queries=params["n_queries"],
n_cols=params["n_cols"],
k0=params["k0"],
k=params["k"],
memory_type=memory_type,
)
@pytest.mark.parametrize("memory_type", ["device", "host"])
def test_input_dtype(memory_type):
with pytest.raises(Exception):
run_refine(dtype=np.float64, memory_type=memory_type)
@pytest.mark.parametrize(
"params",
[
{"idx_shape": None, "dist_shape": None, "k": None},
{"idx_shape": [100, 9], "dist_shape": None, "k": 10},
{"idx_shape": [101, 10], "dist_shape": None, "k": None},
{"idx_shape": None, "dist_shape": [100, 11], "k": 10},
{"idx_shape": None, "dist_shape": [99, 10], "k": None},
],
)
@pytest.mark.parametrize("memory_type", ["device", "host"])
def test_input_assertions(params, memory_type):
n_cols = 5
n_queries = 100
k0 = 40
dtype = np.float32
dataset = generate_data((500, n_cols), dtype)
dataset_device = device_ndarray(dataset)
queries = generate_data((n_queries, n_cols), dtype)
queries_device = device_ndarray(queries)
candidates = np.random.randint(
0, 500, size=(n_queries, k0), dtype=np.int64
)
candidates_device = device_ndarray(candidates)
if params["idx_shape"] is not None:
out_idx = np.zeros(params["idx_shape"], dtype=np.int64)
out_idx_device = device_ndarray(out_idx)
else:
out_idx_device = None
if params["dist_shape"] is not None:
out_dist = np.zeros(params["dist_shape"], dtype=np.float32)
out_dist_device = device_ndarray(out_dist)
else:
out_dist_device = None
if memory_type == "device":
with pytest.raises(Exception):
distances, indices = refine(
dataset_device,
queries_device,
candidates_device,
k=params["k"],
indices=out_idx_device,
distances=out_dist_device,
)
else:
with pytest.raises(Exception):
distances, indices = refine(
dataset,
queries,
candidates,
k=params["k"],
indices=out_idx,
distances=out_dist,
)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/test/test_cagra.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# h ttp://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
from pylibraft.common import device_ndarray
from pylibraft.neighbors import cagra
# todo (dantegd): consolidate helper utils of ann methods
def generate_data(shape, dtype):
if dtype == np.byte:
x = np.random.randint(-127, 128, size=shape, dtype=np.byte)
elif dtype == np.ubyte:
x = np.random.randint(0, 255, size=shape, dtype=np.ubyte)
else:
x = np.random.random_sample(shape).astype(dtype)
return x
def calc_recall(ann_idx, true_nn_idx):
assert ann_idx.shape == true_nn_idx.shape
n = 0
for i in range(ann_idx.shape[0]):
n += np.intersect1d(ann_idx[i, :], true_nn_idx[i, :]).size
recall = n / ann_idx.size
return recall
def run_cagra_build_search_test(
n_rows=10000,
n_cols=10,
n_queries=100,
k=10,
dtype=np.float32,
metric="euclidean",
intermediate_graph_degree=128,
graph_degree=64,
build_algo="ivf_pq",
array_type="device",
compare=True,
inplace=True,
add_data_on_build=True,
search_params={},
):
dataset = generate_data((n_rows, n_cols), dtype)
if metric == "inner_product":
dataset = normalize(dataset, norm="l2", axis=1)
dataset_device = device_ndarray(dataset)
build_params = cagra.IndexParams(
metric=metric,
intermediate_graph_degree=intermediate_graph_degree,
graph_degree=graph_degree,
build_algo=build_algo,
)
if array_type == "device":
index = cagra.build(build_params, dataset_device)
else:
index = cagra.build(build_params, dataset)
assert index.trained
if not add_data_on_build:
dataset_1 = dataset[: n_rows // 2, :]
dataset_2 = dataset[n_rows // 2 :, :]
indices_1 = np.arange(n_rows // 2, dtype=np.uint32)
indices_2 = np.arange(n_rows // 2, n_rows, dtype=np.uint32)
if array_type == "device":
dataset_1_device = device_ndarray(dataset_1)
dataset_2_device = device_ndarray(dataset_2)
indices_1_device = device_ndarray(indices_1)
indices_2_device = device_ndarray(indices_2)
index = cagra.extend(index, dataset_1_device, indices_1_device)
index = cagra.extend(index, dataset_2_device, indices_2_device)
else:
index = cagra.extend(index, dataset_1, indices_1)
index = cagra.extend(index, dataset_2, indices_2)
queries = generate_data((n_queries, n_cols), dtype)
out_idx = np.zeros((n_queries, k), dtype=np.uint32)
out_dist = np.zeros((n_queries, k), dtype=np.float32)
queries_device = device_ndarray(queries)
out_idx_device = device_ndarray(out_idx) if inplace else None
out_dist_device = device_ndarray(out_dist) if inplace else None
search_params = cagra.SearchParams(**search_params)
ret_output = cagra.search(
search_params,
index,
queries_device,
k,
neighbors=out_idx_device,
distances=out_dist_device,
)
if not inplace:
out_dist_device, out_idx_device = ret_output
if not compare:
return
out_idx = out_idx_device.copy_to_host()
out_dist = out_dist_device.copy_to_host()
# Calculate reference values with sklearn
skl_metric = {
"sqeuclidean": "sqeuclidean",
"inner_product": "cosine",
"euclidean": "euclidean",
}[metric]
nn_skl = NearestNeighbors(
n_neighbors=k, algorithm="brute", metric=skl_metric
)
nn_skl.fit(dataset)
skl_idx = nn_skl.kneighbors(queries, return_distance=False)
recall = calc_recall(out_idx, skl_idx)
assert recall > 0.7
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("dtype", [np.float32, np.int8, np.uint8])
@pytest.mark.parametrize("array_type", ["device", "host"])
@pytest.mark.parametrize("build_algo", ["ivf_pq", "nn_descent"])
def test_cagra_dataset_dtype_host_device(
dtype, array_type, inplace, build_algo
):
# Note that inner_product tests use normalized input which we cannot
# represent in int8, therefore we test only sqeuclidean metric here.
run_cagra_build_search_test(
dtype=dtype,
inplace=inplace,
array_type=array_type,
build_algo=build_algo,
)
@pytest.mark.parametrize(
"params",
[
{
"intermediate_graph_degree": 64,
"graph_degree": 32,
"add_data_on_build": True,
"k": 1,
"metric": "euclidean",
"build_algo": "ivf_pq",
},
{
"intermediate_graph_degree": 32,
"graph_degree": 16,
"add_data_on_build": False,
"k": 5,
"metric": "sqeuclidean",
"build_algo": "ivf_pq",
},
{
"intermediate_graph_degree": 128,
"graph_degree": 32,
"add_data_on_build": True,
"k": 10,
"metric": "inner_product",
"build_algo": "nn_descent",
},
],
)
def test_cagra_index_params(params):
# Note that inner_product tests use normalized input which we cannot
# represent in int8, therefore we test only sqeuclidean metric here.
run_cagra_build_search_test(
k=params["k"],
metric=params["metric"],
graph_degree=params["graph_degree"],
intermediate_graph_degree=params["intermediate_graph_degree"],
compare=False,
build_algo=params["build_algo"],
)
@pytest.mark.parametrize(
"params",
[
{
"max_queries": 100,
"itopk_size": 32,
"max_iterations": 100,
"algo": "single_cta",
"team_size": 0,
"search_width": 1,
"min_iterations": 1,
"thread_block_size": 64,
"hashmap_mode": "hash",
"hashmap_min_bitlen": 0.2,
"hashmap_max_fill_rate": 0.5,
"num_random_samplings": 1,
},
{
"max_queries": 10,
"itopk_size": 128,
"max_iterations": 0,
"algo": "multi_cta",
"team_size": 8,
"search_width": 2,
"min_iterations": 10,
"thread_block_size": 0,
"hashmap_mode": "auto",
"hashmap_min_bitlen": 0.9,
"hashmap_max_fill_rate": 0.5,
"num_random_samplings": 10,
},
{
"max_queries": 0,
"itopk_size": 64,
"max_iterations": 0,
"algo": "multi_kernel",
"team_size": 16,
"search_width": 1,
"min_iterations": 0,
"thread_block_size": 0,
"hashmap_mode": "auto",
"hashmap_min_bitlen": 0,
"hashmap_max_fill_rate": 0.5,
"num_random_samplings": 1,
},
{
"max_queries": 0,
"itopk_size": 64,
"max_iterations": 0,
"algo": "auto",
"team_size": 32,
"search_width": 4,
"min_iterations": 0,
"thread_block_size": 0,
"hashmap_mode": "auto",
"hashmap_min_bitlen": 0,
"hashmap_max_fill_rate": 0.5,
"num_random_samplings": 1,
},
],
)
def test_cagra_search_params(params):
# Note that inner_product tests use normalized input which we cannot
# represent in int8, therefore we test only sqeuclidean metric here.
run_cagra_build_search_test(search_params=params)
@pytest.mark.parametrize("dtype", [np.float32, np.int8, np.ubyte])
@pytest.mark.parametrize("include_dataset", [True, False])
def test_save_load(dtype, include_dataset):
n_rows = 10000
n_cols = 50
n_queries = 1000
dataset = generate_data((n_rows, n_cols), dtype)
dataset_device = device_ndarray(dataset)
build_params = cagra.IndexParams()
index = cagra.build(build_params, dataset_device)
assert index.trained
filename = "my_index.bin"
cagra.save(filename, index, include_dataset=include_dataset)
loaded_index = cagra.load(filename)
# if we didn't save the dataset with the index, we need to update the
# index with an already loaded copy
if not include_dataset:
loaded_index.update_dataset(dataset)
queries = generate_data((n_queries, n_cols), dtype)
queries_device = device_ndarray(queries)
search_params = cagra.SearchParams()
k = 10
distance_dev, neighbors_dev = cagra.search(
search_params, index, queries_device, k
)
neighbors = neighbors_dev.copy_to_host()
dist = distance_dev.copy_to_host()
del index
distance_dev, neighbors_dev = cagra.search(
search_params, loaded_index, queries_device, k
)
neighbors2 = neighbors_dev.copy_to_host()
dist2 = distance_dev.copy_to_host()
assert np.all(neighbors == neighbors2)
assert np.allclose(dist, dist2, rtol=1e-6)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/test/test_doctests.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import doctest
import inspect
import io
import pytest
import pylibraft.cluster
import pylibraft.distance
import pylibraft.matrix
import pylibraft.neighbors
import pylibraft.random
# Code adapted from https://github.com/rapidsai/cudf/blob/branch-23.02/python/cudf/cudf/tests/test_doctests.py # noqa
def _name_in_all(parent, name):
return name in getattr(parent, "__all__", [])
def _is_public_name(parent, name):
return not name.startswith("_")
def _find_doctests_in_obj(obj, finder=None, criteria=None):
"""Find all doctests in an object.
Parameters
----------
obj : module or class
The object to search for docstring examples.
finder : doctest.DocTestFinder, optional
The DocTestFinder object to use. If not provided, a DocTestFinder is
constructed.
criteria : callable, optional
Callable indicating whether to recurse over members of the provided
object. If not provided, names not defined in the object's ``__all__``
property are ignored.
Yields
------
doctest.DocTest
The next doctest found in the object.
"""
if finder is None:
finder = doctest.DocTestFinder()
if criteria is None:
criteria = _name_in_all
for docstring in finder.find(obj):
if docstring.examples:
yield docstring
for name, member in inspect.getmembers(obj):
# Only recurse over members matching the criteria
if not criteria(obj, name):
continue
# Recurse over the public API of modules (objects defined in the
# module's __all__)
if inspect.ismodule(member):
yield from _find_doctests_in_obj(
member, finder, criteria=_name_in_all
)
# Recurse over the public API of classes (attributes not prefixed with
# an underscore)
if inspect.isclass(member):
yield from _find_doctests_in_obj(
member, finder, criteria=_is_public_name
)
# doctest finder seems to dislike cython functions, since
# `inspect.isfunction` doesn't return true for them. hack around this
if callable(member) and not inspect.isfunction(member):
for docstring in finder.find(member):
if docstring.examples:
yield docstring
# since the root pylibraft module doesn't import submodules (or define an
# __all__) we are explicitly adding all the submodules we want to run
# doctests for here
DOC_STRINGS = list(_find_doctests_in_obj(pylibraft.cluster))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.common))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.distance))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.matrix.select_k))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.neighbors))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.neighbors.brute_force))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.neighbors.cagra))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.neighbors.ivf_flat))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.neighbors.ivf_pq))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.neighbors.refine))
DOC_STRINGS.extend(_find_doctests_in_obj(pylibraft.random))
@pytest.mark.parametrize(
"docstring",
DOC_STRINGS,
ids=lambda docstring: docstring.name,
)
def test_docstring(docstring):
# We ignore differences in whitespace in the doctest output, and enable
# the use of an ellipsis "..." to match any string in the doctest
# output. An ellipsis is useful for, e.g., memory addresses or
# imprecise floating point values.
optionflags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
runner = doctest.DocTestRunner(optionflags=optionflags)
# Capture stdout and include failing outputs in the traceback.
doctest_stdout = io.StringIO()
with contextlib.redirect_stdout(doctest_stdout):
runner.run(docstring)
results = runner.summarize()
assert not results.failed, (
f"{results.failed} of {results.attempted} doctests failed for "
f"{docstring.name}:\n{doctest_stdout.getvalue()}"
)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/test/test_kmeans.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from pylibraft.cluster.kmeans import (
KMeansParams,
cluster_cost,
compute_new_centroids,
fit,
init_plus_plus,
)
from pylibraft.common import DeviceResources, device_ndarray
from pylibraft.distance import pairwise_distance
@pytest.mark.parametrize("n_rows", [100])
@pytest.mark.parametrize("n_cols", [5, 25])
@pytest.mark.parametrize("n_clusters", [5, 15])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_kmeans_fit(n_rows, n_cols, n_clusters, dtype):
# generate some random input points / centroids
X_host = np.random.random_sample((n_rows, n_cols)).astype(dtype)
centroids = device_ndarray(X_host[:n_clusters])
X = device_ndarray(X_host)
# compute the inertia, before fitting centroids
original_inertia = cluster_cost(X, centroids)
params = KMeansParams(n_clusters=n_clusters, seed=42)
# fit the centroids, make sure inertia has gone down
# TODO: once we have make_blobs exposed to python
# (https://github.com/rapidsai/raft/issues/1059)
# we should use that to test out the kmeans fit, like the C++
# tests do right now
centroids, inertia, n_iter = fit(params, X, centroids)
assert inertia < original_inertia
assert n_iter >= 1
assert np.allclose(cluster_cost(X, centroids), inertia, rtol=1e-6)
@pytest.mark.parametrize("n_rows", [100])
@pytest.mark.parametrize("n_cols", [5, 25])
@pytest.mark.parametrize("n_clusters", [5, 15])
@pytest.mark.parametrize("metric", ["euclidean", "sqeuclidean"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("additional_args", [True, False])
def test_compute_new_centroids(
n_rows, n_cols, metric, n_clusters, dtype, additional_args
):
# A single RAFT handle can optionally be reused across
# pylibraft functions.
handle = DeviceResources()
X = np.random.random_sample((n_rows, n_cols)).astype(dtype)
X_device = device_ndarray(X)
centroids = X[:n_clusters]
centroids_device = device_ndarray(centroids)
weight_per_cluster = np.zeros((n_clusters,), dtype=dtype)
weight_per_cluster_device = (
device_ndarray(weight_per_cluster) if additional_args else None
)
new_centroids = np.zeros((n_clusters, n_cols), dtype=dtype)
new_centroids_device = device_ndarray(new_centroids)
sample_weights = np.ones((n_rows,)).astype(dtype) / n_rows
sample_weights_device = (
device_ndarray(sample_weights) if additional_args else None
)
# Compute new centroids naively
dists = np.zeros((n_rows, n_clusters), dtype=dtype)
dists_device = device_ndarray(dists)
pairwise_distance(X_device, centroids_device, dists_device, metric=metric)
handle.sync()
labels = np.argmin(dists_device.copy_to_host(), axis=1).astype(np.int32)
labels_device = device_ndarray(labels)
expected_centers = np.empty((n_clusters, n_cols), dtype=dtype)
expected_wX = X * sample_weights.reshape((-1, 1))
for i in range(n_clusters):
j = expected_wX[labels == i]
j = j.sum(axis=0)
g = sample_weights[labels == i].sum()
expected_centers[i, :] = j / g
compute_new_centroids(
X_device,
centroids_device,
labels_device,
new_centroids_device,
sample_weights=sample_weights_device,
weight_per_cluster=weight_per_cluster_device,
handle=handle,
)
# pylibraft functions are often asynchronous so the
# handle needs to be explicitly synchronized
handle.sync()
actual_centers = new_centroids_device.copy_to_host()
assert np.allclose(expected_centers, actual_centers, rtol=1e-6)
@pytest.mark.parametrize("n_rows", [100])
@pytest.mark.parametrize("n_cols", [5, 25])
@pytest.mark.parametrize("n_clusters", [4, 15])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_cluster_cost(n_rows, n_cols, n_clusters, dtype):
X = np.random.random_sample((n_rows, n_cols)).astype(dtype)
X_device = device_ndarray(X)
centroids = X[:n_clusters]
centroids_device = device_ndarray(centroids)
inertia = cluster_cost(X_device, centroids_device)
# compute the nearest centroid to each sample
distances = pairwise_distance(
X_device, centroids_device, metric="sqeuclidean"
).copy_to_host()
cluster_ids = np.argmin(distances, axis=1)
cluster_distances = np.take_along_axis(
distances, cluster_ids[:, None], axis=1
)
# need reduced tolerance for float32
tol = 1e-3 if dtype == np.float32 else 1e-6
assert np.allclose(inertia, sum(cluster_distances), rtol=tol, atol=tol)
@pytest.mark.parametrize("n_rows", [100])
@pytest.mark.parametrize("n_cols", [5, 25])
@pytest.mark.parametrize("n_clusters", [4, 15])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_init_plus_plus(n_rows, n_cols, n_clusters, dtype):
X = np.random.random_sample((n_rows, n_cols)).astype(dtype)
X_device = device_ndarray(X)
centroids = init_plus_plus(X_device, n_clusters, seed=1)
centroids_ = centroids.copy_to_host()
assert centroids_.shape == (n_clusters, X.shape[1])
# Centroids are selected from the existing points
for centroid in centroids_:
assert (centroid == X).all(axis=1).any()
@pytest.mark.parametrize("n_rows", [100])
@pytest.mark.parametrize("n_cols", [5, 25])
@pytest.mark.parametrize("n_clusters", [4, 15])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_init_plus_plus_preallocated_output(n_rows, n_cols, n_clusters, dtype):
X = np.random.random_sample((n_rows, n_cols)).astype(dtype)
X_device = device_ndarray(X)
centroids = device_ndarray.empty((n_clusters, n_cols), dtype=dtype)
new_centroids = init_plus_plus(X_device, centroids=centroids, seed=1)
new_centroids_ = new_centroids.copy_to_host()
# The shape should not have changed
assert new_centroids_.shape == centroids.shape
# Centroids are selected from the existing points
for centroid in new_centroids_:
assert (centroid == X).all(axis=1).any()
def test_init_plus_plus_exclusive_arguments():
# Check an exception is raised when n_clusters and centroids shape
# are inconsistent.
X = np.random.random_sample((10, 5)).astype(np.float64)
X = device_ndarray(X)
n_clusters = 3
centroids = np.random.random_sample((n_clusters + 1, 5)).astype(np.float64)
centroids = device_ndarray(centroids)
with pytest.raises(
RuntimeError, match="Parameters 'n_clusters' and 'centroids'"
):
init_plus_plus(X, n_clusters, centroids=centroids)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/test/test_config.py | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
try:
import cupy
except ImportError:
pytest.skip(reason="cupy not installed.")
import pylibraft.config
from pylibraft.common import auto_convert_output, device_ndarray
@auto_convert_output
def gen_cai(m, n, t=None):
if t is None:
return device_ndarray.empty((m, n))
elif t == tuple:
return device_ndarray.empty((m, n)), device_ndarray.empty((m, n))
elif t == list:
return [device_ndarray.empty((m, n)), device_ndarray.empty((m, n))]
@pytest.mark.parametrize(
"out_type",
[
["cupy", cupy.ndarray],
["raft", pylibraft.common.device_ndarray],
[lambda arr: arr.copy_to_host(), np.ndarray],
],
)
@pytest.mark.parametrize("gen_t", [None, tuple, list])
def test_auto_convert_output(out_type, gen_t):
conf, t = out_type
pylibraft.config.set_output_as(conf)
output = gen_cai(1, 5, gen_t)
if not isinstance(output, (list, tuple)):
assert isinstance(output, t)
else:
for o in output:
assert isinstance(o, t)
# Make sure we set the config back to default
pylibraft.config.set_output_as("raft")
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/test/test_ivf_pq.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# h ttp://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from sklearn.metrics import pairwise_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
from pylibraft.common import device_ndarray
from pylibraft.neighbors import ivf_pq
def generate_data(shape, dtype):
if dtype == np.byte:
x = np.random.randint(-127, 128, size=shape, dtype=np.byte)
elif dtype == np.ubyte:
x = np.random.randint(0, 255, size=shape, dtype=np.ubyte)
else:
x = np.random.random_sample(shape).astype(dtype)
return x
def calc_recall(ann_idx, true_nn_idx):
assert ann_idx.shape == true_nn_idx.shape
n = 0
for i in range(ann_idx.shape[0]):
n += np.intersect1d(ann_idx[i, :], true_nn_idx[i, :]).size
recall = n / ann_idx.size
return recall
def check_distances(dataset, queries, metric, out_idx, out_dist, eps=None):
"""
Calculate the real distance between queries and dataset[out_idx],
and compare it to out_dist.
"""
if eps is None:
# Quantization leads to errors in the distance calculation.
# The aim of this test is not to test precision, but to catch obvious
# errors.
eps = 0.1
dist = np.empty(out_dist.shape, out_dist.dtype)
for i in range(queries.shape[0]):
X = queries[np.newaxis, i, :]
Y = dataset[out_idx[i, :], :]
if metric == "sqeuclidean":
dist[i, :] = pairwise_distances(X, Y, "sqeuclidean")
elif metric == "euclidean":
dist[i, :] = pairwise_distances(X, Y, "euclidean")
elif metric == "inner_product":
dist[i, :] = np.matmul(X, Y.T)
else:
raise ValueError("Invalid metric")
dist_eps = abs(dist)
dist_eps[dist < 1e-3] = 1e-3
diff = abs(out_dist - dist) / dist_eps
assert np.mean(diff) < eps
def run_ivf_pq_build_search_test(
n_rows,
n_cols,
n_queries,
k,
n_lists,
metric,
dtype,
pq_bits=8,
pq_dim=0,
codebook_kind="subspace",
add_data_on_build="True",
n_probes=100,
lut_dtype=np.float32,
internal_distance_dtype=np.float32,
force_random_rotation=False,
kmeans_trainset_fraction=1,
kmeans_n_iters=20,
compare=True,
inplace=True,
array_type="device",
):
dataset = generate_data((n_rows, n_cols), dtype)
if metric == "inner_product":
dataset = normalize(dataset, norm="l2", axis=1)
dataset_device = device_ndarray(dataset)
build_params = ivf_pq.IndexParams(
n_lists=n_lists,
metric=metric,
kmeans_n_iters=kmeans_n_iters,
kmeans_trainset_fraction=kmeans_trainset_fraction,
pq_bits=pq_bits,
pq_dim=pq_dim,
codebook_kind=codebook_kind,
force_random_rotation=force_random_rotation,
add_data_on_build=add_data_on_build,
)
if array_type == "device":
index = ivf_pq.build(build_params, dataset_device)
else:
index = ivf_pq.build(build_params, dataset)
assert index.trained
if pq_dim != 0:
assert index.pq_dim == build_params.pq_dim
assert index.pq_bits == build_params.pq_bits
assert index.metric == build_params.metric
assert index.n_lists == build_params.n_lists
if not add_data_on_build:
dataset_1 = dataset[: n_rows // 2, :]
dataset_2 = dataset[n_rows // 2 :, :]
indices_1 = np.arange(n_rows // 2, dtype=np.int64)
indices_2 = np.arange(n_rows // 2, n_rows, dtype=np.int64)
if array_type == "device":
dataset_1_device = device_ndarray(dataset_1)
dataset_2_device = device_ndarray(dataset_2)
indices_1_device = device_ndarray(indices_1)
indices_2_device = device_ndarray(indices_2)
index = ivf_pq.extend(index, dataset_1_device, indices_1_device)
index = ivf_pq.extend(index, dataset_2_device, indices_2_device)
else:
index = ivf_pq.extend(index, dataset_1, indices_1)
index = ivf_pq.extend(index, dataset_2, indices_2)
assert index.size >= n_rows
queries = generate_data((n_queries, n_cols), dtype)
out_idx = np.zeros((n_queries, k), dtype=np.int64)
out_dist = np.zeros((n_queries, k), dtype=np.float32)
queries_device = device_ndarray(queries)
out_idx_device = device_ndarray(out_idx) if inplace else None
out_dist_device = device_ndarray(out_dist) if inplace else None
search_params = ivf_pq.SearchParams(
n_probes=n_probes,
lut_dtype=lut_dtype,
internal_distance_dtype=internal_distance_dtype,
)
ret_output = ivf_pq.search(
search_params,
index,
queries_device,
k,
neighbors=out_idx_device,
distances=out_dist_device,
)
if not inplace:
out_dist_device, out_idx_device = ret_output
if not compare:
return
out_idx = out_idx_device.copy_to_host()
out_dist = out_dist_device.copy_to_host()
# Calculate reference values with sklearn
skl_metric = {
"sqeuclidean": "sqeuclidean",
"inner_product": "cosine",
"euclidean": "euclidean",
}[metric]
nn_skl = NearestNeighbors(
n_neighbors=k, algorithm="brute", metric=skl_metric
)
nn_skl.fit(dataset)
skl_idx = nn_skl.kneighbors(queries, return_distance=False)
recall = calc_recall(out_idx, skl_idx)
assert recall > 0.7
check_distances(dataset, queries, metric, out_idx, out_dist)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("n_rows", [10000])
@pytest.mark.parametrize("n_cols", [10])
@pytest.mark.parametrize("n_queries", [100])
@pytest.mark.parametrize("n_lists", [100])
@pytest.mark.parametrize("dtype", [np.float32, np.int8, np.uint8])
@pytest.mark.parametrize("array_type", ["host", "device"])
def test_ivf_pq_dtypes(
n_rows, n_cols, n_queries, n_lists, dtype, inplace, array_type
):
# Note that inner_product tests use normalized input which we cannot
# represent in int8, therefore we test only sqeuclidean metric here.
run_ivf_pq_build_search_test(
n_rows=n_rows,
n_cols=n_cols,
n_queries=n_queries,
k=10,
n_lists=n_lists,
metric="sqeuclidean",
dtype=dtype,
inplace=inplace,
array_type=array_type,
)
@pytest.mark.parametrize(
"params",
[
pytest.param(
{
"n_rows": 0,
"n_cols": 10,
"n_queries": 10,
"k": 1,
"n_lists": 10,
},
marks=pytest.mark.xfail(reason="empty dataset"),
),
{"n_rows": 1, "n_cols": 10, "n_queries": 10, "k": 1, "n_lists": 1},
{"n_rows": 10, "n_cols": 1, "n_queries": 10, "k": 10, "n_lists": 10},
# {"n_rows": 999, "n_cols": 42, "n_queries": 453, "k": 137,
# "n_lists": 53},
],
)
def test_ivf_pq_n(params):
# We do not test recall, just confirm that we can handle edge cases for
# certain parameters
run_ivf_pq_build_search_test(
n_rows=params["n_rows"],
n_cols=params["n_cols"],
n_queries=params["n_queries"],
k=params["k"],
n_lists=params["n_lists"],
metric="sqeuclidean",
dtype=np.float32,
compare=False,
)
@pytest.mark.parametrize(
"metric", ["sqeuclidean", "inner_product", "euclidean"]
)
@pytest.mark.parametrize("dtype", [np.float32])
@pytest.mark.parametrize("codebook_kind", ["subspace", "cluster"])
@pytest.mark.parametrize("rotation", [True, False])
def test_ivf_pq_build_params(metric, dtype, codebook_kind, rotation):
run_ivf_pq_build_search_test(
n_rows=10000,
n_cols=10,
n_queries=1000,
k=10,
n_lists=100,
metric=metric,
dtype=dtype,
pq_bits=8,
pq_dim=0,
codebook_kind=codebook_kind,
add_data_on_build=True,
n_probes=100,
force_random_rotation=rotation,
)
@pytest.mark.parametrize(
"params",
[
{"pq_dims": 10, "pq_bits": 8, "n_lists": 100},
{"pq_dims": 16, "pq_bits": 7, "n_lists": 100},
{"pq_dims": 0, "pq_bits": 8, "n_lists": 90},
{
"pq_dims": 0,
"pq_bits": 8,
"n_lists": 100,
"trainset_fraction": 0.9,
"n_iters": 30,
},
],
)
def test_ivf_pq_params(params):
run_ivf_pq_build_search_test(
n_rows=10000,
n_cols=16,
n_queries=1000,
k=10,
n_lists=params["n_lists"],
metric="sqeuclidean",
dtype=np.float32,
pq_bits=params["pq_bits"],
pq_dim=params["pq_dims"],
kmeans_trainset_fraction=params.get("trainset_fraction", 1.0),
kmeans_n_iters=params.get("n_iters", 20),
)
@pytest.mark.parametrize(
"params",
[
{
"k": 10,
"n_probes": 100,
"lut": np.float16,
"idd": np.float32,
},
{
"k": 10,
"n_probes": 99,
"lut": np.uint8,
"idd": np.float32,
},
{
"k": 10,
"n_probes": 100,
"lut": np.float16,
"idd": np.float16,
},
{
"k": 129,
"n_probes": 100,
"lut": np.float32,
"idd": np.float32,
},
],
)
def test_ivf_pq_search_params(params):
run_ivf_pq_build_search_test(
n_rows=10000,
n_cols=16,
n_queries=1000,
k=params["k"],
n_lists=100,
n_probes=params["n_probes"],
metric="sqeuclidean",
dtype=np.float32,
lut_dtype=params["lut"],
internal_distance_dtype=params["idd"],
)
@pytest.mark.parametrize("dtype", [np.float32, np.int8, np.uint8])
@pytest.mark.parametrize("array_type", ["host", "device"])
def test_extend(dtype, array_type):
run_ivf_pq_build_search_test(
n_rows=10000,
n_cols=10,
n_queries=100,
k=10,
n_lists=100,
metric="sqeuclidean",
dtype=dtype,
add_data_on_build=False,
array_type=array_type,
)
def test_build_assertions():
with pytest.raises(TypeError):
run_ivf_pq_build_search_test(
n_rows=1000,
n_cols=10,
n_queries=100,
k=10,
n_lists=100,
metric="sqeuclidean",
dtype=np.float64,
)
n_rows = 1000
n_cols = 100
n_queries = 212
k = 10
dataset = generate_data((n_rows, n_cols), np.float32)
dataset_device = device_ndarray(dataset)
index_params = ivf_pq.IndexParams(
n_lists=50,
metric="sqeuclidean",
kmeans_n_iters=20,
kmeans_trainset_fraction=1,
add_data_on_build=False,
)
index = ivf_pq.Index()
queries = generate_data((n_queries, n_cols), np.float32)
out_idx = np.zeros((n_queries, k), dtype=np.int64)
out_dist = np.zeros((n_queries, k), dtype=np.float32)
queries_device = device_ndarray(queries)
out_idx_device = device_ndarray(out_idx)
out_dist_device = device_ndarray(out_dist)
search_params = ivf_pq.SearchParams(n_probes=50)
with pytest.raises(ValueError):
# Index must be built before search
ivf_pq.search(
search_params,
index,
queries_device,
k,
out_idx_device,
out_dist_device,
)
index = ivf_pq.build(index_params, dataset_device)
assert index.trained
indices = np.arange(n_rows + 1, dtype=np.int64)
indices_device = device_ndarray(indices)
with pytest.raises(ValueError):
# Dataset dimension mismatch
ivf_pq.extend(index, queries_device, indices_device)
with pytest.raises(ValueError):
# indices dimension mismatch
ivf_pq.extend(index, dataset_device, indices_device)
@pytest.mark.parametrize(
"params",
[
{"q_dt": np.float64},
{"q_order": "F"},
{"q_cols": 101},
{"idx_dt": np.uint32},
{"idx_order": "F"},
{"idx_rows": 42},
{"idx_cols": 137},
{"dist_dt": np.float64},
{"dist_order": "F"},
{"dist_rows": 42},
{"dist_cols": 137},
],
)
def test_search_inputs(params):
"""Test with invalid input dtype, order, or dimension."""
n_rows = 1000
n_cols = 100
n_queries = 256
k = 10
dtype = np.float32
q_dt = params.get("q_dt", np.float32)
q_order = params.get("q_order", "C")
queries = generate_data(
(n_queries, params.get("q_cols", n_cols)), q_dt
).astype(q_dt, order=q_order)
queries_device = device_ndarray(queries)
idx_dt = params.get("idx_dt", np.int64)
idx_order = params.get("idx_order", "C")
out_idx = np.zeros(
(params.get("idx_rows", n_queries), params.get("idx_cols", k)),
dtype=idx_dt,
order=idx_order,
)
out_idx_device = device_ndarray(out_idx)
dist_dt = params.get("dist_dt", np.float32)
dist_order = params.get("dist_order", "C")
out_dist = np.zeros(
(params.get("dist_rows", n_queries), params.get("dist_cols", k)),
dtype=dist_dt,
order=dist_order,
)
out_dist_device = device_ndarray(out_dist)
index_params = ivf_pq.IndexParams(
n_lists=50, metric="sqeuclidean", add_data_on_build=True
)
dataset = generate_data((n_rows, n_cols), dtype)
dataset_device = device_ndarray(dataset)
index = ivf_pq.build(index_params, dataset_device)
assert index.trained
with pytest.raises(Exception):
search_params = ivf_pq.SearchParams(n_probes=50)
ivf_pq.search(
search_params,
index,
queries_device,
k,
out_idx_device,
out_dist_device,
)
def test_save_load():
n_rows = 10000
n_cols = 50
n_queries = 1000
dtype = np.float32
dataset = generate_data((n_rows, n_cols), dtype)
dataset_device = device_ndarray(dataset)
build_params = ivf_pq.IndexParams(n_lists=100, metric="sqeuclidean")
index = ivf_pq.build(build_params, dataset_device)
assert index.trained
filename = "my_index.bin"
ivf_pq.save(filename, index)
loaded_index = ivf_pq.load(filename)
assert index.pq_dim == loaded_index.pq_dim
assert index.pq_bits == loaded_index.pq_bits
assert index.metric == loaded_index.metric
assert index.n_lists == loaded_index.n_lists
assert index.size == loaded_index.size
queries = generate_data((n_queries, n_cols), dtype)
queries_device = device_ndarray(queries)
search_params = ivf_pq.SearchParams(n_probes=100)
k = 10
distance_dev, neighbors_dev = ivf_pq.search(
search_params, index, queries_device, k
)
neighbors = neighbors_dev.copy_to_host()
dist = distance_dev.copy_to_host()
del index
distance_dev, neighbors_dev = ivf_pq.search(
search_params, loaded_index, queries_device, k
)
neighbors2 = neighbors_dev.copy_to_host()
dist2 = distance_dev.copy_to_host()
assert np.all(neighbors == neighbors2)
assert np.allclose(dist, dist2, rtol=1e-6)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/test/test_cai_wrapper.py | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from pylibraft.common import cai_wrapper, device_ndarray
@pytest.mark.parametrize("order", ["F", "C"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("shape", [(10, 5)])
def test_basic_accessors(order, dtype, shape):
a = np.random.random(shape).astype(dtype)
if order == "C":
a = np.ascontiguousarray(a)
else:
a = np.asfortranarray(a)
db = device_ndarray(a)
cai_wrap = cai_wrapper(db)
assert cai_wrap.dtype == dtype
assert cai_wrap.shape == shape
assert cai_wrap.c_contiguous == (order == "C")
assert cai_wrap.f_contiguous == (order == "F")
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/test/test_distance.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from scipy.spatial.distance import cdist
from pylibraft.common import DeviceResources, Stream, device_ndarray
from pylibraft.distance import pairwise_distance
@pytest.mark.parametrize("n_rows", [50, 100])
@pytest.mark.parametrize("n_cols", [10, 50])
@pytest.mark.parametrize(
"metric",
[
"euclidean",
"cityblock",
"chebyshev",
"canberra",
"correlation",
"hamming",
"jensenshannon",
"russellrao",
"cosine",
"sqeuclidean",
"inner_product",
],
)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("order", ["F", "C"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_distance(n_rows, n_cols, inplace, metric, order, dtype):
input1 = np.random.random_sample((n_rows, n_cols))
input1 = np.asarray(input1, order=order).astype(dtype)
# RussellRao expects boolean arrays
if metric == "russellrao":
input1[input1 < 0.5] = 0
input1[input1 >= 0.5] = 1
# JensenShannon expects probability arrays
elif metric == "jensenshannon":
norm = np.sum(input1, axis=1)
input1 = (input1.T / norm).T
output = np.zeros((n_rows, n_rows), dtype=dtype)
if metric == "inner_product":
expected = np.matmul(input1, input1.T)
else:
expected = cdist(input1, input1, metric)
input1_device = device_ndarray(input1)
output_device = device_ndarray(output) if inplace else None
s2 = Stream()
handle = DeviceResources(stream=s2)
ret_output = pairwise_distance(
input1_device, input1_device, output_device, metric, handle=handle
)
handle.sync()
output_device = ret_output if not inplace else output_device
actual = output_device.copy_to_host()
assert np.allclose(expected, actual, atol=1e-3, rtol=1e-3)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/test/test_z_interruptible.py | # Copyright (c) 2022, NVIDIA CORPORATION.
import os
import signal
import time
import pytest
from pylibraft.common.interruptible import cuda_interruptible, cuda_yield
def send_ctrl_c():
# signal.raise_signal(signal.SIGINT) available only since python 3.8
os.kill(os.getpid(), signal.SIGINT)
def test_should_cancel_via_interruptible():
start_time = time.monotonic()
with pytest.raises(RuntimeError, match="this thread was cancelled"):
with cuda_interruptible():
send_ctrl_c()
cuda_yield()
time.sleep(1.0)
end_time = time.monotonic()
assert (
end_time < start_time + 0.5
), "The process seems to have waited, while it shouldn't have."
def test_should_cancel_via_python():
start_time = time.monotonic()
with pytest.raises(KeyboardInterrupt):
send_ctrl_c()
cuda_yield()
time.sleep(1.0)
end_time = time.monotonic()
assert (
end_time < start_time + 0.5
), "The process seems to have waited, while it shouldn't have."
def test_should_wait_no_interrupt():
start_time = time.monotonic()
with cuda_interruptible():
cuda_yield()
time.sleep(1.0)
end_time = time.monotonic()
assert (
end_time > start_time + 0.5
), "The process seems to be cancelled, while it shouldn't be."
def test_should_wait_no_yield():
start_time = time.monotonic()
with cuda_interruptible():
send_ctrl_c()
time.sleep(1.0)
end_time = time.monotonic()
assert (
end_time > start_time + 0.5
), "The process seems to be cancelled, while it shouldn't be."
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/test/test_handle.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from pylibraft.common import DeviceResources, Stream, device_ndarray
from pylibraft.distance import pairwise_distance
cupy = pytest.importorskip("cupy")
@pytest.mark.parametrize("stream", [cupy.cuda.Stream().ptr, Stream()])
def test_handle_external_stream(stream):
input1 = np.random.random_sample((50, 3))
input1 = np.asarray(input1, order="F").astype("float")
output = np.zeros((50, 50), dtype="float")
input1_device = device_ndarray(input1)
output_device = device_ndarray(output)
# We are just testing that this doesn't segfault
handle = DeviceResources(stream)
pairwise_distance(
input1_device, input1_device, output_device, "euclidean", handle=handle
)
handle.sync()
with pytest.raises(ValueError):
handle = DeviceResources(stream=1.0)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/test/test_brute_force.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from scipy.spatial.distance import cdist
from pylibraft.common import DeviceResources, Stream, device_ndarray
from pylibraft.neighbors.brute_force import knn
@pytest.mark.parametrize("n_index_rows", [32, 100])
@pytest.mark.parametrize("n_query_rows", [32, 100])
@pytest.mark.parametrize("n_cols", [40, 100])
@pytest.mark.parametrize("k", [1, 5, 32])
@pytest.mark.parametrize(
"metric",
[
"euclidean",
"cityblock",
"chebyshev",
"canberra",
"correlation",
"russellrao",
"cosine",
"sqeuclidean",
# "inner_product",
],
)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("dtype", [np.float32])
def test_knn(n_index_rows, n_query_rows, n_cols, k, inplace, metric, dtype):
index = np.random.random_sample((n_index_rows, n_cols)).astype(dtype)
queries = np.random.random_sample((n_query_rows, n_cols)).astype(dtype)
# RussellRao expects boolean arrays
if metric == "russellrao":
index[index < 0.5] = 0.0
index[index >= 0.5] = 1.0
queries[queries < 0.5] = 0.0
queries[queries >= 0.5] = 1.0
indices = np.zeros((n_query_rows, k), dtype="int64")
distances = np.zeros((n_query_rows, k), dtype=dtype)
index_device = device_ndarray(index)
queries_device = device_ndarray(queries)
indices_device = device_ndarray(indices)
distances_device = device_ndarray(distances)
s2 = Stream()
handle = DeviceResources(stream=s2)
ret_distances, ret_indices = knn(
index_device,
queries_device,
k,
indices=indices_device,
distances=distances_device,
metric=metric,
handle=handle,
)
handle.sync()
pw_dists = cdist(queries, index, metric=metric)
distances_device = ret_distances if not inplace else distances_device
actual_distances = distances_device.copy_to_host()
actual_distances[actual_distances <= 1e-5] = 0.0
argsort = np.argsort(pw_dists, axis=1)
for i in range(pw_dists.shape[0]):
expected_indices = argsort[i]
gpu_dists = actual_distances[i]
cpu_ordered = pw_dists[i, expected_indices]
np.testing.assert_allclose(
cpu_ordered[:k], gpu_dists, atol=1e-3, rtol=1e-3
)
def test_knn_check_col_major_inputs():
# make sure that we get an exception if passed col-major inputs,
# instead of returning incorrect results
cp = pytest.importorskip("cupy")
n_index_rows, n_query_rows, n_cols = 128, 16, 32
index = cp.random.random_sample((n_index_rows, n_cols), dtype="float32")
queries = cp.random.random_sample((n_query_rows, n_cols), dtype="float32")
with pytest.raises(ValueError):
knn(cp.asarray(index, order="F"), queries, k=4)
with pytest.raises(ValueError):
knn(index, cp.asarray(queries, order="F"), k=4)
# shouldn't throw an exception with c-contiguous inputs
knn(index, queries, k=4)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/test/test_ivf_flat.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# h ttp://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from sklearn.metrics import pairwise_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
from pylibraft.common import device_ndarray
from pylibraft.neighbors import ivf_flat
def generate_data(shape, dtype):
if dtype == np.byte:
x = np.random.randint(-127, 128, size=shape, dtype=np.byte)
elif dtype == np.ubyte:
x = np.random.randint(0, 255, size=shape, dtype=np.ubyte)
else:
x = np.random.random_sample(shape).astype(dtype)
return x
def calc_recall(ann_idx, true_nn_idx):
assert ann_idx.shape == true_nn_idx.shape
n = 0
for i in range(ann_idx.shape[0]):
n += np.intersect1d(ann_idx[i, :], true_nn_idx[i, :]).size
recall = n / ann_idx.size
return recall
def check_distances(dataset, queries, metric, out_idx, out_dist, eps=None):
"""
Calculate the real distance between queries and dataset[out_idx],
and compare it to out_dist.
"""
if eps is None:
# Quantization leads to errors in the distance calculation.
# The aim of this test is not to test precision, but to catch obvious
# errors.
eps = 0.1
dist = np.empty(out_dist.shape, out_dist.dtype)
for i in range(queries.shape[0]):
X = queries[np.newaxis, i, :]
Y = dataset[out_idx[i, :], :]
if metric == "sqeuclidean":
dist[i, :] = pairwise_distances(X, Y, "sqeuclidean")
elif metric == "euclidean":
dist[i, :] = pairwise_distances(X, Y, "euclidean")
elif metric == "inner_product":
dist[i, :] = np.matmul(X, Y.T)
else:
raise ValueError("Invalid metric")
dist_eps = abs(dist)
dist_eps[dist < 1e-3] = 1e-3
diff = abs(out_dist - dist) / dist_eps
assert np.mean(diff) < eps
def run_ivf_flat_build_search_test(
n_rows,
n_cols,
n_queries,
k,
n_lists,
metric,
dtype,
add_data_on_build=True,
n_probes=100,
kmeans_trainset_fraction=1,
kmeans_n_iters=20,
compare=True,
inplace=True,
array_type="device",
):
dataset = generate_data((n_rows, n_cols), dtype)
if metric == "inner_product":
dataset = normalize(dataset, norm="l2", axis=1)
dataset_device = device_ndarray(dataset)
build_params = ivf_flat.IndexParams(
n_lists=n_lists,
metric=metric,
kmeans_n_iters=kmeans_n_iters,
kmeans_trainset_fraction=kmeans_trainset_fraction,
add_data_on_build=add_data_on_build,
)
if array_type == "device":
index = ivf_flat.build(build_params, dataset_device)
else:
index = ivf_flat.build(build_params, dataset)
assert index.trained
assert index.metric == build_params.metric
assert index.n_lists == build_params.n_lists
if not add_data_on_build:
dataset_1 = dataset[: n_rows // 2, :]
dataset_2 = dataset[n_rows // 2 :, :]
indices_1 = np.arange(n_rows // 2, dtype=np.int64)
indices_2 = np.arange(n_rows // 2, n_rows, dtype=np.int64)
if array_type == "device":
dataset_1_device = device_ndarray(dataset_1)
dataset_2_device = device_ndarray(dataset_2)
indices_1_device = device_ndarray(indices_1)
indices_2_device = device_ndarray(indices_2)
index = ivf_flat.extend(index, dataset_1_device, indices_1_device)
index = ivf_flat.extend(index, dataset_2_device, indices_2_device)
else:
index = ivf_flat.extend(index, dataset_1, indices_1)
index = ivf_flat.extend(index, dataset_2, indices_2)
assert index.size >= n_rows
queries = generate_data((n_queries, n_cols), dtype)
out_idx = np.zeros((n_queries, k), dtype=np.int64)
out_dist = np.zeros((n_queries, k), dtype=np.float32)
queries_device = device_ndarray(queries)
out_idx_device = device_ndarray(out_idx) if inplace else None
out_dist_device = device_ndarray(out_dist) if inplace else None
search_params = ivf_flat.SearchParams(n_probes=n_probes)
ret_output = ivf_flat.search(
search_params,
index,
queries_device,
k,
neighbors=out_idx_device,
distances=out_dist_device,
)
if not inplace:
out_dist_device, out_idx_device = ret_output
if not compare:
return
out_idx = out_idx_device.copy_to_host()
out_dist = out_dist_device.copy_to_host()
# Calculate reference values with sklearn
skl_metric = {
"sqeuclidean": "sqeuclidean",
"inner_product": "cosine",
"euclidean": "euclidean",
}[metric]
nn_skl = NearestNeighbors(
n_neighbors=k, algorithm="brute", metric=skl_metric
)
nn_skl.fit(dataset)
skl_idx = nn_skl.kneighbors(queries, return_distance=False)
recall = calc_recall(out_idx, skl_idx)
assert recall > 0.7
check_distances(dataset, queries, metric, out_idx, out_dist)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("n_rows", [10000])
@pytest.mark.parametrize("n_cols", [10])
@pytest.mark.parametrize("n_queries", [100])
@pytest.mark.parametrize("n_lists", [100])
@pytest.mark.parametrize("dtype", [np.float32, np.int8, np.uint8])
@pytest.mark.parametrize("array_type", ["device"])
def test_ivf_pq_dtypes(
n_rows, n_cols, n_queries, n_lists, dtype, inplace, array_type
):
# Note that inner_product tests use normalized input which we cannot
# represent in int8, therefore we test only sqeuclidean metric here.
run_ivf_flat_build_search_test(
n_rows=n_rows,
n_cols=n_cols,
n_queries=n_queries,
k=10,
n_lists=n_lists,
metric="sqeuclidean",
dtype=dtype,
inplace=inplace,
array_type=array_type,
)
@pytest.mark.parametrize(
"params",
[
pytest.param(
{
"n_rows": 0,
"n_cols": 10,
"n_queries": 10,
"k": 1,
"n_lists": 10,
},
marks=pytest.mark.xfail(reason="empty dataset"),
),
{"n_rows": 1, "n_cols": 10, "n_queries": 10, "k": 1, "n_lists": 1},
{"n_rows": 10, "n_cols": 1, "n_queries": 10, "k": 10, "n_lists": 10},
# {"n_rows": 999, "n_cols": 42, "n_queries": 453, "k": 137,
# "n_lists": 53},
],
)
def test_ivf_flat_n(params):
# We do not test recall, just confirm that we can handle edge cases for
# certain parameters
run_ivf_flat_build_search_test(
n_rows=params["n_rows"],
n_cols=params["n_cols"],
n_queries=params["n_queries"],
k=params["k"],
n_lists=params["n_lists"],
metric="sqeuclidean",
dtype=np.float32,
compare=False,
)
@pytest.mark.parametrize(
"metric", ["sqeuclidean", "inner_product", "euclidean"]
)
@pytest.mark.parametrize("dtype", [np.float32])
def test_ivf_flat_build_params(metric, dtype):
run_ivf_flat_build_search_test(
n_rows=10000,
n_cols=10,
n_queries=1000,
k=10,
n_lists=100,
metric=metric,
dtype=dtype,
add_data_on_build=True,
n_probes=100,
)
@pytest.mark.parametrize(
"params",
[
{
"n_lists": 100,
"trainset_fraction": 0.9,
"n_iters": 30,
},
],
)
def test_ivf_flat_params(params):
run_ivf_flat_build_search_test(
n_rows=10000,
n_cols=16,
n_queries=1000,
k=10,
n_lists=params["n_lists"],
metric="sqeuclidean",
dtype=np.float32,
kmeans_trainset_fraction=params.get("trainset_fraction", 1.0),
kmeans_n_iters=params.get("n_iters", 20),
)
@pytest.mark.parametrize(
"params",
[
{
"k": 10,
"n_probes": 100,
},
{
"k": 10,
"n_probes": 99,
},
{
"k": 10,
"n_probes": 100,
},
{
"k": 129,
"n_probes": 100,
},
],
)
def test_ivf_pq_search_params(params):
run_ivf_flat_build_search_test(
n_rows=10000,
n_cols=16,
n_queries=1000,
k=params["k"],
n_lists=100,
n_probes=params["n_probes"],
metric="sqeuclidean",
dtype=np.float32,
)
@pytest.mark.parametrize("dtype", [np.float32, np.int8, np.uint8])
@pytest.mark.parametrize("array_type", ["device"])
def test_extend(dtype, array_type):
run_ivf_flat_build_search_test(
n_rows=10000,
n_cols=10,
n_queries=100,
k=10,
n_lists=100,
metric="sqeuclidean",
dtype=dtype,
add_data_on_build=False,
array_type=array_type,
)
def test_build_assertions():
with pytest.raises(TypeError):
run_ivf_flat_build_search_test(
n_rows=1000,
n_cols=10,
n_queries=100,
k=10,
n_lists=100,
metric="sqeuclidean",
dtype=np.float64,
)
n_rows = 1000
n_cols = 100
n_queries = 212
k = 10
dataset = generate_data((n_rows, n_cols), np.float32)
dataset_device = device_ndarray(dataset)
index_params = ivf_flat.IndexParams(
n_lists=50,
metric="sqeuclidean",
kmeans_n_iters=20,
kmeans_trainset_fraction=1,
add_data_on_build=False,
)
index = ivf_flat.Index()
queries = generate_data((n_queries, n_cols), np.float32)
out_idx = np.zeros((n_queries, k), dtype=np.int64)
out_dist = np.zeros((n_queries, k), dtype=np.float32)
queries_device = device_ndarray(queries)
out_idx_device = device_ndarray(out_idx)
out_dist_device = device_ndarray(out_dist)
search_params = ivf_flat.SearchParams(n_probes=50)
with pytest.raises(ValueError):
# Index must be built before search
ivf_flat.search(
search_params,
index,
queries_device,
k,
out_idx_device,
out_dist_device,
)
index = ivf_flat.build(index_params, dataset_device)
assert index.trained
indices = np.arange(n_rows + 1, dtype=np.int64)
indices_device = device_ndarray(indices)
with pytest.raises(ValueError):
# Dataset dimension mismatch
ivf_flat.extend(index, queries_device, indices_device)
with pytest.raises(ValueError):
# indices dimension mismatch
ivf_flat.extend(index, dataset_device, indices_device)
@pytest.mark.parametrize(
"params",
[
{"q_dt": np.float64},
{"q_order": "F"},
{"q_cols": 101},
{"idx_dt": np.uint32},
{"idx_order": "F"},
{"idx_rows": 42},
{"idx_cols": 137},
{"dist_dt": np.float64},
{"dist_order": "F"},
{"dist_rows": 42},
{"dist_cols": 137},
],
)
def test_search_inputs(params):
"""Test with invalid input dtype, order, or dimension."""
n_rows = 1000
n_cols = 100
n_queries = 256
k = 10
dtype = np.float32
q_dt = params.get("q_dt", np.float32)
q_order = params.get("q_order", "C")
queries = generate_data(
(n_queries, params.get("q_cols", n_cols)), q_dt
).astype(q_dt, order=q_order)
queries_device = device_ndarray(queries)
idx_dt = params.get("idx_dt", np.int64)
idx_order = params.get("idx_order", "C")
out_idx = np.zeros(
(params.get("idx_rows", n_queries), params.get("idx_cols", k)),
dtype=idx_dt,
order=idx_order,
)
out_idx_device = device_ndarray(out_idx)
dist_dt = params.get("dist_dt", np.float32)
dist_order = params.get("dist_order", "C")
out_dist = np.zeros(
(params.get("dist_rows", n_queries), params.get("dist_cols", k)),
dtype=dist_dt,
order=dist_order,
)
out_dist_device = device_ndarray(out_dist)
index_params = ivf_flat.IndexParams(
n_lists=50, metric="sqeuclidean", add_data_on_build=True
)
dataset = generate_data((n_rows, n_cols), dtype)
dataset_device = device_ndarray(dataset)
index = ivf_flat.build(index_params, dataset_device)
assert index.trained
with pytest.raises(Exception):
search_params = ivf_flat.SearchParams(n_probes=50)
ivf_flat.search(
search_params,
index,
queries_device,
k,
out_idx_device,
out_dist_device,
)
@pytest.mark.parametrize("dtype", [np.float32, np.int8, np.ubyte])
def test_save_load(dtype):
n_rows = 10000
n_cols = 50
n_queries = 1000
dataset = generate_data((n_rows, n_cols), dtype)
dataset_device = device_ndarray(dataset)
build_params = ivf_flat.IndexParams(n_lists=100, metric="sqeuclidean")
index = ivf_flat.build(build_params, dataset_device)
assert index.trained
filename = "my_index.bin"
ivf_flat.save(filename, index)
loaded_index = ivf_flat.load(filename)
assert index.metric == loaded_index.metric
assert index.n_lists == loaded_index.n_lists
assert index.dim == loaded_index.dim
assert index.adaptive_centers == loaded_index.adaptive_centers
queries = generate_data((n_queries, n_cols), dtype)
queries_device = device_ndarray(queries)
search_params = ivf_flat.SearchParams(n_probes=100)
k = 10
distance_dev, neighbors_dev = ivf_flat.search(
search_params, index, queries_device, k
)
neighbors = neighbors_dev.copy_to_host()
dist = distance_dev.copy_to_host()
del index
distance_dev, neighbors_dev = ivf_flat.search(
search_params, loaded_index, queries_device, k
)
neighbors2 = neighbors_dev.copy_to_host()
dist2 = distance_dev.copy_to_host()
assert np.all(neighbors == neighbors2)
assert np.allclose(dist, dist2, rtol=1e-6)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/test/test_fused_l2_argmin.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from scipy.spatial.distance import cdist
from pylibraft.common import DeviceResources, device_ndarray
from pylibraft.distance import fused_l2_nn_argmin
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("n_rows", [10, 100])
@pytest.mark.parametrize("n_clusters", [5, 10])
@pytest.mark.parametrize("n_cols", [3, 5])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_fused_l2_nn_minarg(n_rows, n_cols, n_clusters, dtype, inplace):
input1 = np.random.random_sample((n_rows, n_cols))
input1 = np.asarray(input1, order="C").astype(dtype)
input2 = np.random.random_sample((n_clusters, n_cols))
input2 = np.asarray(input2, order="C").astype(dtype)
output = np.zeros((n_rows), dtype="int32")
expected = cdist(input1, input2, metric="euclidean")
expected = expected.argmin(axis=1)
input1_device = device_ndarray(input1)
input2_device = device_ndarray(input2)
output_device = device_ndarray(output) if inplace else None
handle = DeviceResources()
ret_output = fused_l2_nn_argmin(
input1_device, input2_device, output_device, True, handle=handle
)
handle.sync()
output_device = ret_output if not inplace else output_device
actual = output_device.copy_to_host()
assert np.allclose(expected, actual, rtol=1e-4)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/mdspan.pyx | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import io
import numpy as np
from cpython.object cimport PyObject
from cython.operator cimport dereference as deref
from libc.stddef cimport size_t
from libc.stdint cimport int8_t, int32_t, int64_t, uint8_t, uint32_t, uintptr_t
from pylibraft.common.cpp.mdspan cimport (
col_major,
device_matrix_view,
host_matrix_view,
host_mdspan,
make_device_matrix_view,
make_host_matrix_view,
matrix_extent,
ostream,
ostringstream,
row_major,
serialize_mdspan,
)
from pylibraft.common.handle cimport device_resources
from pylibraft.common.optional cimport make_optional, optional
from pylibraft.common import DeviceResources
cdef extern from "Python.h":
Py_buffer* PyMemoryView_GET_BUFFER(PyObject* mview)
def run_roundtrip_test_for_mdspan(X, fortran_order=False):
if not isinstance(X, np.ndarray) or len(X.shape) != 2:
raise ValueError("Please call this function with a NumPy array with"
"2 dimensions")
handle = DeviceResources()
cdef device_resources * handle_ = \
<device_resources *> <size_t> handle.getHandle()
cdef ostringstream oss
if X.dtype == np.float32:
if fortran_order:
serialize_mdspan[float, matrix_extent[size_t], col_major](
deref(handle_),
<ostream&>oss,
<const host_mdspan[float, matrix_extent[size_t],
col_major] &>
make_host_matrix_view[float, size_t, col_major](
<float *><uintptr_t>PyMemoryView_GET_BUFFER(
<PyObject *> X.data).buf,
X.shape[0], X.shape[1]))
else:
serialize_mdspan[float, matrix_extent[size_t], row_major](
deref(handle_),
<ostream&>oss,
<const host_mdspan[float, matrix_extent[size_t],
row_major]&>
make_host_matrix_view[float, size_t, row_major](
<float *><uintptr_t>PyMemoryView_GET_BUFFER(
<PyObject *> X.data).buf,
X.shape[0], X.shape[1]))
elif X.dtype == np.float64:
if fortran_order:
serialize_mdspan[double, matrix_extent[size_t], col_major](
deref(handle_),
<ostream&>oss,
<const host_mdspan[double, matrix_extent[size_t],
col_major]&>
make_host_matrix_view[double, size_t, col_major](
<double *><uintptr_t>PyMemoryView_GET_BUFFER(
<PyObject *> X.data).buf,
X.shape[0], X.shape[1]))
else:
serialize_mdspan[double, matrix_extent[size_t], row_major](
deref(handle_),
<ostream&>oss,
<const host_mdspan[double, matrix_extent[size_t],
row_major]&>
make_host_matrix_view[double, size_t, row_major](
<double *><uintptr_t>PyMemoryView_GET_BUFFER(
<PyObject *> X.data).buf,
X.shape[0], X.shape[1]))
elif X.dtype == np.int32:
if fortran_order:
serialize_mdspan[int32_t, matrix_extent[size_t], col_major](
deref(handle_),
<ostream&>oss,
<const host_mdspan[int32_t, matrix_extent[size_t],
col_major]&>
make_host_matrix_view[int32_t, size_t, col_major](
<int32_t *><uintptr_t>PyMemoryView_GET_BUFFER(
<PyObject *> X.data).buf,
X.shape[0], X.shape[1]))
else:
serialize_mdspan[int32_t, matrix_extent[size_t], row_major](
deref(handle_),
<ostream&>oss,
<const host_mdspan[int32_t, matrix_extent[size_t],
row_major]&>
make_host_matrix_view[int32_t, size_t, row_major](
<int32_t *><uintptr_t>PyMemoryView_GET_BUFFER(
<PyObject *> X.data).buf,
X.shape[0], X.shape[1]))
elif X.dtype == np.uint32:
if fortran_order:
serialize_mdspan[uint32_t, matrix_extent[size_t], col_major](
deref(handle_),
<ostream&>oss,
<const host_mdspan[uint32_t, matrix_extent[size_t],
col_major]&>
make_host_matrix_view[uint32_t, size_t, col_major](
<uint32_t *><uintptr_t>PyMemoryView_GET_BUFFER(
<PyObject *> X.data).buf,
X.shape[0], X.shape[1]))
else:
serialize_mdspan[uint32_t, matrix_extent[size_t], row_major](
deref(handle_),
<ostream&>oss,
<const host_mdspan[uint32_t, matrix_extent[size_t],
row_major]&>
make_host_matrix_view[uint32_t, size_t, row_major](
<uint32_t *><uintptr_t>PyMemoryView_GET_BUFFER(
<PyObject *> X.data).buf,
X.shape[0], X.shape[1]))
else:
raise NotImplementedError()
f = io.BytesIO(oss.str())
X2 = np.load(f)
assert np.all(X.shape == X2.shape)
assert np.all(X == X2)
cdef device_matrix_view[float, int64_t, row_major] \
get_dmv_float(cai, check_shape) except *:
if cai.dtype != np.float32:
raise TypeError("dtype %s not supported" % cai.dtype)
if check_shape and len(cai.shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(cai.shape))
shape = (cai.shape[0], cai.shape[1] if len(cai.shape) == 2 else 1)
return make_device_matrix_view[float, int64_t, row_major](
<float*><uintptr_t>cai.data, shape[0], shape[1])
cdef device_matrix_view[uint8_t, int64_t, row_major] \
get_dmv_uint8(cai, check_shape) except *:
if cai.dtype != np.uint8:
raise TypeError("dtype %s not supported" % cai.dtype)
if check_shape and len(cai.shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(cai.shape))
shape = (cai.shape[0], cai.shape[1] if len(cai.shape) == 2 else 1)
return make_device_matrix_view[uint8_t, int64_t, row_major](
<uint8_t*><uintptr_t>cai.data, shape[0], shape[1])
cdef device_matrix_view[int8_t, int64_t, row_major] \
get_dmv_int8(cai, check_shape) except *:
if cai.dtype != np.int8:
raise TypeError("dtype %s not supported" % cai.dtype)
if check_shape and len(cai.shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(cai.shape))
shape = (cai.shape[0], cai.shape[1] if len(cai.shape) == 2 else 1)
return make_device_matrix_view[int8_t, int64_t, row_major](
<int8_t*><uintptr_t>cai.data, shape[0], shape[1])
cdef device_matrix_view[int64_t, int64_t, row_major] \
get_dmv_int64(cai, check_shape) except *:
if cai.dtype != np.int64:
raise TypeError("dtype %s not supported" % cai.dtype)
if check_shape and len(cai.shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(cai.shape))
shape = (cai.shape[0], cai.shape[1] if len(cai.shape) == 2 else 1)
return make_device_matrix_view[int64_t, int64_t, row_major](
<int64_t*><uintptr_t>cai.data, shape[0], shape[1])
cdef device_matrix_view[const_float, int64_t, row_major] \
get_const_dmv_float(cai, check_shape) except *:
if cai.dtype != np.float32:
raise TypeError("dtype %s not supported" % cai.dtype)
if check_shape and len(cai.shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(cai.shape))
shape = (cai.shape[0], cai.shape[1] if len(cai.shape) == 2 else 1)
return make_device_matrix_view[const_float, int64_t, row_major](
<const float*><uintptr_t>cai.data, shape[0], shape[1])
cdef device_matrix_view[const_uint8_t, int64_t, row_major] \
get_const_dmv_uint8(cai, check_shape) except *:
if cai.dtype != np.uint8:
raise TypeError("dtype %s not supported" % cai.dtype)
if check_shape and len(cai.shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(cai.shape))
shape = (cai.shape[0], cai.shape[1] if len(cai.shape) == 2 else 1)
return make_device_matrix_view[const_uint8_t, int64_t, row_major](
<const uint8_t*><uintptr_t>cai.data, shape[0], shape[1])
cdef device_matrix_view[const_int8_t, int64_t, row_major] \
get_const_dmv_int8(cai, check_shape) except *:
if cai.dtype != np.int8:
raise TypeError("dtype %s not supported" % cai.dtype)
if check_shape and len(cai.shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(cai.shape))
shape = (cai.shape[0], cai.shape[1] if len(cai.shape) == 2 else 1)
return make_device_matrix_view[const_int8_t, int64_t, row_major](
<const int8_t*><uintptr_t>cai.data, shape[0], shape[1])
cdef optional[device_matrix_view[int64_t, int64_t, row_major]] \
make_optional_view_int64(device_matrix_view[int64_t, int64_t, row_major]& dmv) except *: # noqa: E501
return make_optional[device_matrix_view[int64_t, int64_t, row_major]](dmv)
# todo(dantegd): we can unify and simplify this functions a little bit
# defining extra functions as-is is the quickest way to get what we need for
# cagra.pyx
cdef device_matrix_view[uint32_t, int64_t, row_major] \
get_dmv_uint32(cai, check_shape) except *:
if cai.dtype != np.uint32:
raise TypeError("dtype %s not supported" % cai.dtype)
if check_shape and len(cai.shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(cai.shape))
shape = (cai.shape[0], cai.shape[1] if len(cai.shape) == 2 else 1)
return make_device_matrix_view[uint32_t, int64_t, row_major](
<uint32_t*><uintptr_t>cai.data, shape[0], shape[1])
cdef host_matrix_view[float, int64_t, row_major] \
get_hmv_float(cai, check_shape) except *:
if cai.dtype != np.float32:
raise TypeError("dtype %s not supported" % cai.dtype)
if check_shape and len(cai.shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(cai.shape))
shape = (cai.shape[0], cai.shape[1] if len(cai.shape) == 2 else 1)
return make_host_matrix_view[float, int64_t, row_major](
<float*><uintptr_t>cai.data, shape[0], shape[1])
cdef host_matrix_view[uint8_t, int64_t, row_major] \
get_hmv_uint8(cai, check_shape) except *:
if cai.dtype != np.uint8:
raise TypeError("dtype %s not supported" % cai.dtype)
if check_shape and len(cai.shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(cai.shape))
shape = (cai.shape[0], cai.shape[1] if len(cai.shape) == 2 else 1)
return make_host_matrix_view[uint8_t, int64_t, row_major](
<uint8_t*><uintptr_t>cai.data, shape[0], shape[1])
cdef host_matrix_view[int8_t, int64_t, row_major] \
get_hmv_int8(cai, check_shape) except *:
if cai.dtype != np.int8:
raise TypeError("dtype %s not supported" % cai.dtype)
if check_shape and len(cai.shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(cai.shape))
shape = (cai.shape[0], cai.shape[1] if len(cai.shape) == 2 else 1)
return make_host_matrix_view[int8_t, int64_t, row_major](
<int8_t*><uintptr_t>cai.data, shape[0], shape[1])
cdef host_matrix_view[int64_t, int64_t, row_major] \
get_hmv_int64(cai, check_shape) except *:
if cai.dtype != np.int64:
raise TypeError("dtype %s not supported" % cai.dtype)
if check_shape and len(cai.shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(cai.shape))
shape = (cai.shape[0], cai.shape[1] if len(cai.shape) == 2 else 1)
return make_host_matrix_view[int64_t, int64_t, row_major](
<int64_t*><uintptr_t>cai.data, shape[0], shape[1])
cdef host_matrix_view[uint32_t, int64_t, row_major] \
get_hmv_uint32(cai, check_shape) except *:
if cai.dtype != np.int64:
raise TypeError("dtype %s not supported" % cai.dtype)
if check_shape and len(cai.shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(cai.shape))
shape = (cai.shape[0], cai.shape[1] if len(cai.shape) == 2 else 1)
return make_host_matrix_view[uint32_t, int64_t, row_major](
<uint32_t*><uintptr_t>cai.data, shape[0], shape[1])
cdef host_matrix_view[const_float, int64_t, row_major] \
get_const_hmv_float(cai, check_shape) except *:
if cai.dtype != np.float32:
raise TypeError("dtype %s not supported" % cai.dtype)
if check_shape and len(cai.shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(cai.shape))
shape = (cai.shape[0], cai.shape[1] if len(cai.shape) == 2 else 1)
return make_host_matrix_view[const_float, int64_t, row_major](
<const float*><uintptr_t>cai.data, shape[0], shape[1])
cdef host_matrix_view[const_uint8_t, int64_t, row_major] \
get_const_hmv_uint8(cai, check_shape) except *:
if cai.dtype != np.uint8:
raise TypeError("dtype %s not supported" % cai.dtype)
if check_shape and len(cai.shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(cai.shape))
shape = (cai.shape[0], cai.shape[1] if len(cai.shape) == 2 else 1)
return make_host_matrix_view[const_uint8_t, int64_t, row_major](
<const uint8_t*><uintptr_t>cai.data, shape[0], shape[1])
cdef host_matrix_view[const_int8_t, int64_t, row_major] \
get_const_hmv_int8(cai, check_shape) except *:
if cai.dtype != np.int8:
raise TypeError("dtype %s not supported" % cai.dtype)
if check_shape and len(cai.shape) != 2:
raise ValueError("Expected a 2D array, got %d D" % len(cai.shape))
shape = (cai.shape[0], cai.shape[1] if len(cai.shape) == 2 else 1)
return make_host_matrix_view[const_int8_t, int64_t, row_major](
<const_int8_t*><uintptr_t>cai.data, shape[0], shape[1])
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Set the list of Cython files to build
set(cython_sources cuda.pyx handle.pyx mdspan.pyx interruptible.pyx)
set(linked_libraries raft::raft)
# Build all of the Cython targets
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}" ASSOCIATED_TARGETS raft MODULE_PREFIX common_
)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/handle.pyx | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import functools
from cuda.ccudart cimport cudaStream_t
from libc.stdint cimport uintptr_t
from rmm._lib.cuda_stream_view cimport cuda_stream_per_thread, cuda_stream_view
from .cuda cimport Stream
from .cuda import CudaRuntimeError
cdef class DeviceResources:
"""
DeviceResources is a lightweight python wrapper around the corresponding
C++ class of device_resources exposed by RAFT's C++ interface. Refer to
the header file raft/core/device_resources.hpp for interface level
details of this struct
Parameters
----------
stream : Optional stream to use for ordering CUDA instructions
Accepts pylibraft.common.Stream() or uintptr_t (cudaStream_t)
Examples
--------
Basic usage:
>>> from pylibraft.common import Stream, DeviceResources
>>> stream = Stream()
>>> handle = DeviceResources(stream)
>>>
>>> # call algos here
>>>
>>> # final sync of all work launched in the stream of this handle
>>> # this is same as `raft.cuda.Stream.sync()` call, but safer in case
>>> # the default stream inside the `device_resources` is being used
>>> handle.sync()
>>> del handle # optional!
Using a cuPy stream with RAFT device_resources:
>>> import cupy
>>> from pylibraft.common import Stream, DeviceResources
>>>
>>> cupy_stream = cupy.cuda.Stream()
>>> handle = DeviceResources(stream=cupy_stream.ptr)
Using a RAFT stream with CuPy ExternalStream:
>>> import cupy
>>> from pylibraft.common import Stream
>>>
>>> raft_stream = Stream()
>>> cupy_stream = cupy.cuda.ExternalStream(raft_stream.get_ptr())
"""
def __cinit__(self, stream=None, n_streams=0):
self.n_streams = n_streams
if n_streams > 0:
self.stream_pool.reset(new cuda_stream_pool(n_streams))
cdef uintptr_t s
cdef cuda_stream_view c_stream
# We should either have a pylibraft.common.Stream or a uintptr_t
# of a cudaStream_t
if stream is None:
# this constructor will construct a "main" handle on
# per-thread default stream, which is non-blocking
self.c_obj.reset(new handle_t(cuda_stream_per_thread,
self.stream_pool))
else:
# this constructor constructs a handle on user stream
if isinstance(stream, Stream):
# Stream is pylibraft Stream()
s = stream.get_ptr()
c_stream = cuda_stream_view(<cudaStream_t>s)
elif isinstance(stream, int):
# Stream is a pointer, cast to cudaStream_t
s = stream
c_stream = cuda_stream_view(<cudaStream_t>s)
else:
raise ValueError("stream should be common.Stream() or "
"uintptr_t to cudaStream_t")
self.c_obj.reset(new handle_t(c_stream,
self.stream_pool))
def sync(self):
"""
Issues a sync on the stream set for this instance.
"""
self.c_obj.get()[0].sync_stream()
def getHandle(self):
"""
Return the pointer to the underlying raft::device_resources
instance as a size_t
"""
return <size_t> self.c_obj.get()
def __getstate__(self):
return self.n_streams
def __setstate__(self, state):
self.n_streams = state
if self.n_streams > 0:
self.stream_pool.reset(new cuda_stream_pool(self.n_streams))
self.c_obj.reset(new device_resources(cuda_stream_per_thread,
self.stream_pool))
cdef class Handle(DeviceResources):
"""
Handle is a lightweight python wrapper around the corresponding
C++ class of handle_t exposed by RAFT's C++ interface. Refer to
the header file raft/core/handle.hpp for interface level
details of this struct
Note: This API is officially deprecated in favor of DeviceResources
and will be removed in a future release.
Parameters
----------
stream : Optional stream to use for ordering CUDA instructions
Accepts pylibraft.common.Stream() or uintptr_t (cudaStream_t)
Examples
--------
Basic usage:
>>> from pylibraft.common import Stream, Handle
>>> stream = Stream()
>>> handle = Handle(stream)
>>>
>>> # call algos here
>>>
>>> # final sync of all work launched in the stream of this handle
>>> # this is same as `raft.cuda.Stream.sync()` call, but safer in case
>>> # the default stream inside the `handle_t` is being used
>>> handle.sync()
>>> del handle # optional!
Using a cuPy stream with RAFT device_resources:
>>> import cupy
>>> from pylibraft.common import Stream, Handle
>>>
>>> cupy_stream = cupy.cuda.Stream()
>>> handle = Handle(stream=cupy_stream.ptr)
Using a RAFT stream with CuPy ExternalStream:
>>> import cupy
>>> from pylibraft.common import Stream
>>>
>>> raft_stream = Stream()
>>> cupy_stream = cupy.cuda.ExternalStream(raft_stream.get_ptr())
"""
def __getstate__(self):
return self.n_streams
def __setstate__(self, state):
self.n_streams = state
if self.n_streams > 0:
self.stream_pool.reset(new cuda_stream_pool(self.n_streams))
self.c_obj.reset(new handle_t(cuda_stream_per_thread,
self.stream_pool))
_HANDLE_PARAM_DOCSTRING = """
handle : Optional RAFT resource handle for reusing CUDA resources.
If a handle isn't supplied, CUDA resources will be
allocated inside this function and synchronized before the
function exits. If a handle is supplied, you will need to
explicitly synchronize yourself by calling `handle.sync()`
before accessing the output.
""".strip()
def auto_sync_handle(f):
"""Decorator to automatically call sync on a raft handle when
it isn't passed to a function.
When a handle=None is passed to the wrapped function, this decorator
will automatically create a default handle for the function, and
call sync on that handle when the function exits.
This will also insert the appropriate docstring for the handle parameter
"""
@functools.wraps(f)
def wrapper(*args, handle=None, **kwargs):
sync_handle = handle is None
handle = handle if handle is not None else DeviceResources()
ret_value = f(*args, handle=handle, **kwargs)
if sync_handle:
handle.sync()
return ret_value
wrapper.__doc__ = wrapper.__doc__.format(
handle_docstring=_HANDLE_PARAM_DOCSTRING
)
return wrapper
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/outputs.py | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import warnings
import pylibraft.config
def import_warn_(lib):
warnings.warn(
"%s is not available and output cannot be converted."
"Returning original output instead." % lib
)
def convert_to_torch(device_ndarray):
try:
import torch
return torch.as_tensor(device_ndarray, device="cuda")
except ImportError:
import_warn_("PyTorch")
return device_ndarray
def convert_to_cupy(device_ndarray):
try:
import cupy
return cupy.asarray(device_ndarray)
except ImportError:
import_warn_("CuPy")
return device_ndarray
def no_conversion(device_ndarray):
return device_ndarray
def convert_to_cai_type(device_ndarray):
output_as_ = pylibraft.config.output_as_
if callable(output_as_):
return output_as_(device_ndarray)
elif output_as_ == "raft":
return device_ndarray
elif output_as_ == "torch":
return convert_to_torch(device_ndarray)
elif output_as_ == "cupy":
return convert_to_cupy(device_ndarray)
else:
raise ValueError("No valid type conversion found for %s" % output_as_)
def conv(ret):
for i in ret:
if isinstance(i, pylibraft.common.device_ndarray):
yield convert_to_cai_type(i)
else:
yield i
def auto_convert_output(f):
"""Decorator to automatically convert an output device_ndarray
(or list or tuple of device_ndarray) into the configured
`__cuda_array_interface__` compliant type.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
ret_value = f(*args, **kwargs)
if isinstance(ret_value, pylibraft.common.device_ndarray):
return convert_to_cai_type(ret_value)
elif isinstance(ret_value, tuple):
return tuple(conv(ret_value))
elif isinstance(ret_value, list):
return list(conv(ret_value))
else:
return ret_value
return wrapper
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/cuda.pyx | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from cuda.ccudart cimport (
cudaError_t,
cudaGetErrorName,
cudaGetErrorString,
cudaGetLastError,
cudaStream_t,
cudaStreamCreate,
cudaStreamDestroy,
cudaStreamSynchronize,
cudaSuccess,
)
from libc.stdint cimport uintptr_t
class CudaRuntimeError(RuntimeError):
def __init__(self, extraMsg=None):
cdef cudaError_t e = cudaGetLastError()
cdef bytes errMsg = cudaGetErrorString(e)
cdef bytes errName = cudaGetErrorName(e)
msg = "Error! %s reason='%s'" % (errName.decode(), errMsg.decode())
if extraMsg is not None:
msg += " extraMsg='%s'" % extraMsg
super(CudaRuntimeError, self).__init__(msg)
cdef class Stream:
"""
Stream represents a thin-wrapper around cudaStream_t and its operations.
Examples
--------
>>> from pylibraft.common.cuda import Stream
>>> stream = Stream()
>>> stream.sync()
>>> del stream # optional!
"""
def __cinit__(self):
cdef cudaStream_t stream
cdef cudaError_t e = cudaStreamCreate(&stream)
if e != cudaSuccess:
raise CudaRuntimeError("Stream create")
self.s = stream
def __dealloc__(self):
self.sync()
cdef cudaError_t e = cudaStreamDestroy(self.s)
if e != cudaSuccess:
raise CudaRuntimeError("Stream destroy")
def sync(self):
"""
Synchronize on the cudastream owned by this object. Note that this
could raise exception due to issues with previous asynchronous
launches
"""
cdef cudaError_t e = cudaStreamSynchronize(self.s)
if e != cudaSuccess:
raise CudaRuntimeError("Stream sync")
cdef cudaStream_t getStream(self):
return self.s
def get_ptr(self):
"""
Return the uintptr_t pointer of the underlying cudaStream_t handle
"""
return <uintptr_t>self.s
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/handle.pxd | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from libcpp.memory cimport shared_ptr, unique_ptr
from rmm._lib.cuda_stream_pool cimport cuda_stream_pool
from rmm._lib.cuda_stream_view cimport cuda_stream_view
# Keeping `handle_t` around for backwards compatibility at the
# cython layer but users are encourage to switch to device_resources
cdef extern from "raft/core/handle.hpp" namespace "raft" nogil:
cdef cppclass handle_t:
handle_t() except +
handle_t(cuda_stream_view stream_view) except +
handle_t(cuda_stream_view stream_view,
shared_ptr[cuda_stream_pool] stream_pool) except +
cuda_stream_view get_stream() except +
void sync_stream() except +
cdef extern from "raft/core/device_resources.hpp" namespace "raft" nogil:
cdef cppclass device_resources:
device_resources() except +
device_resources(cuda_stream_view stream_view) except +
device_resources(cuda_stream_view stream_view,
shared_ptr[cuda_stream_pool] stream_pool) except +
cuda_stream_view get_stream() except +
void sync_stream() except +
cdef class DeviceResources:
cdef unique_ptr[device_resources] c_obj
cdef shared_ptr[cuda_stream_pool] stream_pool
cdef int n_streams
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/device_ndarray.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import rmm
class device_ndarray:
"""
pylibraft.common.device_ndarray is meant to be a very lightweight
__cuda_array_interface__ wrapper around a numpy.ndarray.
"""
def __init__(self, np_ndarray):
"""
Construct a pylibraft.common.device_ndarray wrapper around a
numpy.ndarray
Parameters
----------
ndarray : A numpy.ndarray which will be copied and moved to the device
Examples
--------
The device_ndarray is __cuda_array_interface__ compliant so it is
interoperable with other libraries that also support it, such as
CuPy and PyTorch.
The following usage example demonstrates
converting a pylibraft.common.device_ndarray to a cupy.ndarray:
.. code-block:: python
import cupy as cp
from pylibraft.common import device_ndarray
raft_array = device_ndarray.empty((100, 50))
cupy_array = cp.asarray(raft_array)
And the converting pylibraft.common.device_ndarray to a PyTorch tensor:
.. code-block:: python
import torch
from pylibraft.common import device_ndarray
raft_array = device_ndarray.empty((100, 50))
torch_tensor = torch.as_tensor(raft_array, device='cuda')
"""
self.ndarray_ = np_ndarray
order = "C" if self.c_contiguous else "F"
self.device_buffer_ = rmm.DeviceBuffer.to_device(
self.ndarray_.tobytes(order=order)
)
@classmethod
def empty(cls, shape, dtype=np.float32, order="C"):
"""
Return a new device_ndarray of given shape and type, without
initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty array, e.g., (2, 3) or 2.
dtype : data-type, optional
Desired output data-type for the array, e.g, numpy.int8.
Default is numpy.float32.
order : {'C', 'F'}, optional (default: 'C')
Whether to store multi-dimensional dat ain row-major (C-style)
or column-major (Fortran-style) order in memory
"""
arr = np.empty(shape, dtype=dtype, order=order)
return cls(arr)
@property
def c_contiguous(self):
"""
Is the current device_ndarray laid out in row-major format?
"""
strides = self.strides
return strides is None or strides[1] == self.dtype.itemsize
@property
def f_contiguous(self):
"""
Is the current device_ndarray laid out in column-major format?
"""
return not self.c_contiguous
@property
def dtype(self):
"""
Datatype of the current device_ndarray instance
"""
array_interface = self.ndarray_.__array_interface__
return np.dtype(array_interface["typestr"])
@property
def shape(self):
"""
Shape of the current device_ndarray instance
"""
array_interface = self.ndarray_.__array_interface__
return array_interface["shape"]
@property
def strides(self):
"""
Strides of the current device_ndarray instance
"""
array_interface = self.ndarray_.__array_interface__
return array_interface.get("strides")
@property
def __cuda_array_interface__(self):
"""
Returns the __cuda_array_interface__ compliant dict for
integrating with other device-enabled libraries using
zero-copy semantics.
"""
device_cai = self.device_buffer_.__cuda_array_interface__
host_cai = self.ndarray_.__array_interface__.copy()
host_cai["data"] = (device_cai["data"][0], device_cai["data"][1])
return host_cai
def copy_to_host(self):
"""
Returns a new numpy.ndarray object on host with the current contents of
this device_ndarray
"""
ret = np.frombuffer(
self.device_buffer_.tobytes(),
dtype=self.dtype,
like=self.ndarray_,
).astype(self.dtype)
ret = np.lib.stride_tricks.as_strided(ret, self.shape, self.strides)
return ret
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/interruptible.pxd | #
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from libcpp.memory cimport shared_ptr
from rmm._lib.cuda_stream_view cimport cuda_stream_view
cdef extern from "raft/core/interruptible.hpp" namespace "raft" nogil:
cdef cppclass interruptible:
void cancel()
cdef extern from "raft/core/interruptible.hpp" \
namespace "raft::interruptible" nogil:
cdef void inter_synchronize \
"raft::interruptible::synchronize"(cuda_stream_view stream) except+
cdef void inter_yield "raft::interruptible::yield"() except+
cdef shared_ptr[interruptible] get_token() except+
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/__init__.pxd | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/input_validation.py | #
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import numpy as np
def do_dtypes_match(*cais):
last_dtype = cais[0].__cuda_array_interface__["typestr"]
for cai in cais:
typestr = cai.__cuda_array_interface__["typestr"]
if last_dtype != typestr:
return False
last_dtype = typestr
return True
def do_rows_match(*cais):
last_row = cais[0].__cuda_array_interface__["shape"][0]
for cai in cais:
rows = cai.__cuda_array_interface__["shape"][0]
if last_row != rows:
return False
last_row = rows
return True
def do_cols_match(*cais):
last_col = cais[0].__cuda_array_interface__["shape"][1]
for cai in cais:
cols = cai.__cuda_array_interface__["shape"][1]
if last_col != cols:
return False
last_col = cols
return True
def do_shapes_match(*cais):
last_shape = cais[0].__cuda_array_interface__["shape"]
for cai in cais:
shape = cai.__cuda_array_interface__["shape"]
if last_shape != shape:
return False
last_shape = shape
return True
def is_c_contiguous(cai):
"""
Checks whether an array is C contiguous.
Parameters
----------
cai : CUDA array interface
"""
dt = np.dtype(cai["typestr"])
return (
"strides" not in cai
or cai["strides"] is None
or cai["strides"][1] == dt.itemsize
)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/cuda.pxd | #
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuda.ccudart cimport cudaStream_t
cdef class Stream:
cdef cudaStream_t s
cdef cudaStream_t getStream(self)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/ai_wrapper.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from pylibraft.common import input_validation
class ai_wrapper:
"""
Simple wrapper around a array interface object to reduce
boilerplate for extracting common information from the underlying
dictionary.
"""
def __init__(self, ai_arr):
"""
Constructor accepts an array interface compliant array
Parameters
----------
ai_arr : array interface array
"""
self.ai_ = ai_arr.__array_interface__
self.from_cai = False
@property
def dtype(self):
"""
Returns the dtype of the underlying array interface
"""
return np.dtype(self.ai_["typestr"])
@property
def shape(self):
"""
Returns the shape of the underlying array interface
"""
return self.ai_["shape"]
@property
def c_contiguous(self):
"""
Returns whether the underlying array interface has
c-ordered (row-major) layout
"""
return input_validation.is_c_contiguous(self.ai_)
@property
def f_contiguous(self):
"""
Returns whether the underlying array interface has
f-ordered (column-major) layout
"""
return not input_validation.is_c_contiguous(self.ai_)
@property
def data(self):
"""
Returns the data pointer of the underlying array interface
"""
return self.ai_["data"][0]
def validate_shape_dtype(self, expected_dims=None, expected_dtype=None):
"""Checks to see if the shape, dtype, and strides match expectations"""
if expected_dims is not None and len(self.shape) != expected_dims:
raise ValueError(
f"unexpected shape {self.shape} - "
f"expected {expected_dims} dimensions"
)
if expected_dtype is not None and self.dtype != expected_dtype:
raise ValueError(
f"invalid dtype {self.dtype}: expected " f"{expected_dtype}"
)
if not self.c_contiguous:
raise ValueError("input must be c-contiguous")
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/optional.pxd | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
# Code from Cython libcpp
from libcpp cimport bool
cdef extern from "<optional>" namespace "std" nogil:
cdef cppclass nullopt_t:
nullopt_t()
cdef nullopt_t nullopt
cdef cppclass optional[T]:
ctypedef T value_type
optional()
optional(nullopt_t)
optional(optional&) except +
optional(T&) except +
bool has_value()
T& value()
T& value_or[U](U& default_value)
void swap(optional&)
void reset()
T& emplace(...)
T& operator*()
optional& operator=(optional&)
optional& operator=[U](U&)
bool operator bool()
bool operator!()
bool operator==[U](optional&, U&)
bool operator!=[U](optional&, U&)
bool operator<[U](optional&, U&)
bool operator>[U](optional&, U&)
bool operator<=[U](optional&, U&)
bool operator>=[U](optional&, U&)
optional[T] make_optional[T](...) except +
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/__init__.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .ai_wrapper import ai_wrapper
from .cai_wrapper import cai_wrapper
from .cuda import Stream
from .device_ndarray import device_ndarray
from .handle import DeviceResources, Handle
from .outputs import auto_convert_output
__all__ = ["DeviceResources", "Handle", "Stream"]
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/cai_wrapper.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from types import SimpleNamespace
from pylibraft.common.ai_wrapper import ai_wrapper
class cai_wrapper(ai_wrapper):
"""
Simple wrapper around a CUDA array interface object to reduce
boilerplate for extracting common information from the underlying
dictionary.
"""
def __init__(self, cai_arr):
"""
Constructor accepts a CUDA array interface compliant array
Parameters
----------
cai_arr : CUDA array interface array
"""
helper = SimpleNamespace(
__array_interface__=cai_arr.__cuda_array_interface__
)
super().__init__(helper)
self.from_cai = True
def wrap_array(array):
try:
return cai_wrapper(array)
except AttributeError:
return ai_wrapper(array)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/interruptible.pyx | #
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import contextlib
import signal
from cuda.ccudart cimport cudaStream_t
from cython.operator cimport dereference
from rmm._lib.cuda_stream_view cimport cuda_stream_view
from .cuda cimport Stream
@contextlib.contextmanager
def cuda_interruptible():
'''
Temporarily install a keyboard interrupt handler (Ctrl+C)
that cancels the enclosed interruptible C++ thread.
Use this on a long-running C++ function imported via cython:
>>> with cuda_interruptible():
>>> my_long_running_function(...)
It's also recommended to release the GIL during the call, to
make sure the handler has a chance to run:
>>> with cuda_interruptible():
>>> with nogil:
>>> my_long_running_function(...)
'''
cdef shared_ptr[interruptible] token = get_token()
def newhr(*args, **kwargs):
with nogil:
dereference(token).cancel()
try:
oldhr = signal.signal(signal.SIGINT, newhr)
except ValueError:
# the signal creation would fail if this is not the main thread
# That's fine! The feature is disabled.
oldhr = None
try:
yield
finally:
if oldhr is not None:
signal.signal(signal.SIGINT, oldhr)
def synchronize(stream: Stream):
'''
Same as cudaStreamSynchronize, but can be interrupted
if called within a `with cuda_interruptible()` block.
'''
cdef cuda_stream_view c_stream = cuda_stream_view(stream.getStream())
with nogil:
inter_synchronize(c_stream)
def cuda_yield():
'''
Check for an asynchronously received interrupted_exception.
Raises the exception if a user pressed Ctrl+C within a
`with cuda_interruptible()` block before.
'''
with nogil:
inter_yield()
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.